answer
stringlengths
15
1.25M
(function (global, factory) { typeof exports === 'object' && typeof module !== 'undefined' ? module.exports = factory() : typeof define === 'function' && define.amd ? define(factory) : (global.Inferno = factory()); }(this, function () { 'use strict'; var babelHelpers = {}; babelHelpers.typeof = typeof Symbol === "function" && typeof Symbol.iterator === "symbol" ? function (obj) { return typeof obj; } : function (obj) { return obj && typeof Symbol === "function" && obj.constructor === Symbol ? "symbol" : typeof obj; }; babelHelpers.classCallCheck = function (instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError("Cannot call a class as a function"); } }; babelHelpers.createClass = function () { function defineProperties(target, props) { for (var i = 0; i < props.length; i++) { var descriptor = props[i]; descriptor.enumerable = descriptor.enumerable || false; descriptor.configurable = true; if ("value" in descriptor) descriptor.writable = true; Object.defineProperty(target, descriptor.key, descriptor); } } return function (Constructor, protoProps, staticProps) { if (protoProps) defineProperties(Constructor.prototype, protoProps); if (staticProps) defineProperties(Constructor, staticProps); return Constructor; }; }(); babelHelpers.extends = Object.assign || function (target) { for (var i = 1; i < arguments.length; i++) { var source = arguments[i]; for (var key in source) { if (Object.prototype.hasOwnProperty.call(source, key)) { target[key] = source[key]; } } } return target; }; babelHelpers; function isNullOrUndefined(obj) { return obj === void 0 || obj === null; } function isAttrAnEvent(attr) { return attr[0] === 'o' && attr[1] === 'n' && attr.length > 3; } function VNode(blueprint) { this.bp = blueprint; this.dom = null; this.instance = null; this.tag = null; this.children = null; this.style = null; this.className = null; this.attrs = null; this.events = null; this.hooks = null; this.key = null; this.clipData = null; } VNode.prototype = { setAttrs: function setAttrs(attrs) { this.attrs = attrs; return this; }, setTag: function setTag(tag) { this.tag = tag; return this; }, setStyle: function setStyle(style) { this.style = style; return this; }, setClassName: function setClassName(className) { this.className = className; return this; }, setChildren: function setChildren(children) { this.children = children; return this; }, setHooks: function setHooks(hooks) { this.hooks = hooks; return this; }, setEvents: function setEvents(events) { this.events = events; return this; }, setKey: function setKey(key) { this.key = key; return this; } }; function createVNode(bp) { return new VNode(bp); } function createBlueprint(shape, childrenType) { var tag = shape.tag || null; var tagIsDynamic = tag && tag.arg !== void 0 ? true : false; var children = !isNullOrUndefined(shape.children) ? shape.children : null; var childrenIsDynamic = children && children.arg !== void 0 ? true : false; var attrs = shape.attrs || null; var attrsIsDynamic = attrs && attrs.arg !== void 0 ? true : false; var hooks = shape.hooks || null; var hooksIsDynamic = hooks && hooks.arg !== void 0 ? true : false; var events = shape.events || null; var eventsIsDynamic = events && events.arg !== void 0 ? true : false; var key = shape.key !== void 0 ? shape.key : null; var keyIsDynamic = !isNullOrUndefined(key) && !isNullOrUndefined(key.arg); var style = shape.style || null; var styleIsDynamic = style && style.arg !== void 0 ? true : false; var className = shape.className !== void 0 ? shape.className : null; var classNameIsDynamic = className && className.arg !== void 0 ? true : false; var blueprint = { lazy: shape.lazy || false, dom: null, pools: { keyed: {}, nonKeyed: [] }, tag: !tagIsDynamic ? tag : null, className: className !== '' && className ? className : null, style: style !== '' && style ? style : null, isComponent: tagIsDynamic, hasAttrs: attrsIsDynamic || (attrs ? true : false), hasHooks: hooksIsDynamic, hasEvents: eventsIsDynamic, hasStyle: styleIsDynamic || (style !== '' && style ? true : false), hasClassName: classNameIsDynamic || (className !== '' && className ? true : false), childrenType: childrenType === void 0 ? children ? 5 : 0 : childrenType, attrKeys: null, eventKeys: null, isSVG: shape.isSVG || false }; return function () { var vNode = new VNode(blueprint); if (tagIsDynamic === true) { vNode.tag = arguments[tag.arg]; } if (childrenIsDynamic === true) { vNode.children = arguments[children.arg]; } if (attrsIsDynamic === true) { vNode.attrs = arguments[attrs.arg]; } else { vNode.attrs = attrs; } if (hooksIsDynamic === true) { vNode.hooks = arguments[hooks.arg]; } if (eventsIsDynamic === true) { vNode.events = arguments[events.arg]; } if (keyIsDynamic === true) { vNode.key = arguments[key.arg]; } if (styleIsDynamic === true) { vNode.style = arguments[style.arg]; } else { vNode.style = blueprint.style; } if (classNameIsDynamic === true) { vNode.className = arguments[className.arg]; } else { vNode.className = blueprint.className; } return vNode; }; } // Runs only once in applications lifetime var isBrowser = typeof window !== 'undefined' && window.document; // Copy of the util from dom/util, otherwise it makes massive bundles function <API key>(tag, isSVG) { var dom = void 0; if (isSVG === true) { dom = document.createElementNS('http: } else { dom = document.createElement(tag); } return dom; } function <API key>(tag, attrs, isSVG) { if (isBrowser) { var dom = <API key>(tag, isSVG); if (attrs) { <API key>(attrs, dom); } return dom; } return null; } function <API key>(attrs, dom) { var attrKeys = Object.keys(attrs); for (var i = 0; i < attrKeys.length; i++) { var attr = attrKeys[i]; var value = attrs[attr]; if (attr === 'className') { dom.className = value; } else { if (value === true) { dom.setAttribute(attr, attr); } else if (!isNullOrUndefined(value) && value !== false && !isAttrAnEvent(attr)) { dom.setAttribute(attr, value); } } } } var index = { createBlueprint: createBlueprint, createVNode: createVNode, universal: { createElement: <API key> } }; return index; }));
/** * OpenLayers 3 Layer Switcher Control. * See [the examples](./examples) for usage. * @constructor * @extends {ol.control.Control} * @param {Object} opt_options Control options, extends olx.control.ControlOptions adding: * **`tipLabel`** `String` - the button tooltip. */ ol.control.LayerSwitcher = function(opt_options) { var options = opt_options || {}; var tipLabel = options.tipLabel ? options.tipLabel : 'Legend'; this.mapListeners = []; this.hiddenClassName = 'ol-unselectable ol-control layer-switcher'; this.shownClassName = this.hiddenClassName + ' shown'; var element = document.createElement('div'); element.className = this.hiddenClassName; var button = document.createElement('button'); button.setAttribute('title', tipLabel); element.appendChild(button); this.panel = document.createElement('div'); this.panel.className = 'panel'; element.appendChild(this.panel); var this_ = this; element.onmouseover = function(e) { this_.showPanel(); }; button.onclick = function(e) { this_.showPanel(); }; element.onmouseout = function(e) { e = e || window.event; if (!element.contains(e.toElement)) { this_.hidePanel(); } }; ol.control.Control.call(this, { element: element, target: options.target }); }; ol.inherits(ol.control.LayerSwitcher, ol.control.Control); /** * Show the layer panel. */ ol.control.LayerSwitcher.prototype.showPanel = function() { if (this.element.className != this.shownClassName) { this.element.className = this.shownClassName; this.renderPanel(); } }; /** * Hide the layer panel. */ ol.control.LayerSwitcher.prototype.hidePanel = function() { if (this.element.className != this.hiddenClassName) { this.element.className = this.hiddenClassName; } }; /** * Re-draw the layer panel to represent the current state of the layers. */ ol.control.LayerSwitcher.prototype.renderPanel = function() { this.<API key>(); while(this.panel.firstChild) { this.panel.removeChild(this.panel.firstChild); } var ul = document.createElement('ul'); this.panel.appendChild(ul); this.renderLayers_(this.getMap(), ul); }; /** * Set the map instance the control is associated with. * @param {ol.Map} map The map instance. */ ol.control.LayerSwitcher.prototype.setMap = function(map) { // Clean up listeners associated with the previous map for (var i = 0, key; i < this.mapListeners.length; i++) { this.getMap().unByKey(this.mapListeners[i]); } this.mapListeners.length = 0; // Wire up listeners etc. and store reference to new map ol.control.Control.prototype.setMap.call(this, map); if (map) { var this_ = this; this.mapListeners.push(map.on('pointerdown', function() { this_.hidePanel(); })); this.renderPanel(); } }; /** * Ensure only the top-most base layer is visible if more than one is visible. * @private */ ol.control.LayerSwitcher.prototype.<API key> = function() { var lastVisibleBaseLyr; ol.control.LayerSwitcher.forEachRecursive(this.getMap(), function(l, idx, a) { if (l.get('type') === 'base' && l.getVisible()) { lastVisibleBaseLyr = l; } }); if (lastVisibleBaseLyr) this.setVisible_(lastVisibleBaseLyr, true); }; /** * Toggle the visible state of a layer. * Takes care of hiding other layers in the same exclusive group if the layer * is toggle to visible. * @private * @param {ol.layer.Base} The layer whos visibility will be toggled. */ ol.control.LayerSwitcher.prototype.setVisible_ = function(lyr, visible) { var map = this.getMap(); lyr.setVisible(visible); if (visible && lyr.get('type') === 'base') { // Hide all other base layers regardless of grouping ol.control.LayerSwitcher.forEachRecursive(map, function(l, idx, a) { if (l != lyr && l.get('type') === 'base') { l.setVisible(false); } }); } }; /** * Render all layers that are children of a group. * @private * @param {ol.layer.Base} lyr Layer to be rendered (should have a title property). * @param {Number} idx Position in parent group list. */ ol.control.LayerSwitcher.prototype.renderLayer_ = function(lyr, idx) { var this_ = this; var li = document.createElement('li'); var lyrTitle = lyr.get('title'); var lyrId = lyr.get('title').replace(' ', '-') + '_' + idx; var label = document.createElement('label'); if (lyr.getLayers) { li.className = 'group'; label.innerHTML = lyrTitle; li.appendChild(label); var ul = document.createElement('ul'); li.appendChild(ul); this.renderLayers_(lyr, ul); } else { var input = document.createElement('input'); if (lyr.get('type') === 'base') { input.type = 'radio'; input.name = 'base'; } else { input.type = 'checkbox'; } input.id = lyrId; input.checked = lyr.get('visible'); input.onchange = function(e) { this_.setVisible_(lyr, e.target.checked); }; li.appendChild(input); label.htmlFor = lyrId; label.innerHTML = lyrTitle; li.appendChild(label); } return li; }; /** * Render all layers that are children of a group. * @private * @param {ol.layer.Group} lyr Group layer whos children will be rendered. * @param {Element} elm DOM element that children will be appended to. */ ol.control.LayerSwitcher.prototype.renderLayers_ = function(lyr, elm) { var lyrs = lyr.getLayers().getArray().slice().reverse(); for (var i = 0, l; i < lyrs.length; i++) { l = lyrs[i]; if (l.get('title')) { elm.appendChild(this.renderLayer_(l, i)); } } }; /** * **Static** Call the supplied function for each layer in the passed layer group * recursing nested groups. * @param {ol.layer.Group} lyr The layer group to start iterating from. * @param {Function} fn Callback which will be called for each `ol.layer.Base` * found under `lyr`. The signature for `fn` is the same as `ol.Collection#forEach` */ ol.control.LayerSwitcher.forEachRecursive = function(lyr, fn) { lyr.getLayers().forEach(function(lyr, idx, a) { fn(lyr, idx, a); if (lyr.getLayers) { ol.control.LayerSwitcher.forEachRecursive(lyr, fn); } }); };
/* driver for the ADC ads1114 (16 bits I2C 860SpS max) from Texas instruments * Navarro & Gorraz & Hattenberger */ #ifndef ADS_1114_H #define ADS_1114_H #include "std.h" #include "mcu_periph/i2c.h" /* I2C slave address */ #ifndef ADS1114_1_I2C_ADDR #define ADS1114_1_I2C_ADDR 0x90 // slave address byte (I2c address(7bits) + R/W @ 0) #endif #ifndef ADS1114_2_I2C_ADDR #define ADS1114_2_I2C_ADDR 0x92 // slave address byte (I2c address(7bits) + R/W @ 0) #endif /* I2C conf register */ #define <API key> 0x00 // access to the Conversion register (16bits) #define <API key> 0x01 // access to the Configuration register (16bits) /* ADS1114_1 default conf */ #ifndef ADS1114_1_OS #define ADS1114_1_OS 0x0 // Operational status #endif #ifndef ADS1114_1_MUX #define ADS1114_1_MUX 0x0 // Input multiplexer #endif #ifndef ADS1114_1_PGA #define ADS1114_1_PGA 0x3 // Programable gain amplifier (= 4 with a Full Scale of +/- 1.024V) #endif #ifndef ADS1114_1_MODE #define ADS1114_1_MODE 0x0 // Continuous conversion mode #endif #ifndef ADS1114_1_DR #define ADS1114_1_DR 0x4 // Data rate (128 SPS) #endif #ifndef ADS1114_1_COMP_MODE #define ADS1114_1_COMP_MODE 0x0 // Comparator mode #endif #ifndef ADS1114_1_COMP_POL #define ADS1114_1_COMP_POL 0x0 // Comparator polarity #endif #ifndef ADS1114_1_COMP_LAT #define ADS1114_1_COMP_LAT 0x0 // Latching comparator #endif #ifndef ADS1114_1_COMP_QUE #define ADS1114_1_COMP_QUE 0x3 // Comparator queue (disable) #endif #define <API key> ((ADS1114_1_OS<<7)|(ADS1114_1_MUX<<4)|(ADS1114_1_PGA<<1)|(ADS1114_1_MODE)) #define <API key> ((ADS1114_1_DR<<5)|(ADS1114_1_COMP_MODE<<4)|(ADS1114_1_COMP_POL<<3)|(ADS1114_1_COMP_LAT<<2)|(ADS1114_1_COMP_QUE)) /* ADS1114_1 default conf */ #ifndef ADS1114_2_OS #define ADS1114_2_OS 0x0 // Operational status #endif #ifndef ADS1114_2_MUX #define ADS1114_2_MUX 0x0 // Input multiplexer #endif #ifndef ADS1114_2_PGA #define ADS1114_2_PGA 0x3 // Programable gain amplifier (= 4 with a Full Scale of +/- 1.024V) #endif #ifndef ADS1114_2_MODE #define ADS1114_2_MODE 0x0 // Continuous conversion mode #endif #ifndef ADS1114_2_DR #define ADS1114_2_DR 0x4 // Data rate (128 SPS) #endif #ifndef ADS1114_2_COMP_MODE #define ADS1114_2_COMP_MODE 0x0 // Comparator mode #endif #ifndef ADS1114_2_COMP_POL #define ADS1114_2_COMP_POL 0x0 // Comparator polarity #endif #ifndef ADS1114_2_COMP_LAT #define ADS1114_2_COMP_LAT 0x0 // Latching comparator #endif #ifndef ADS1114_2_COMP_QUE #define ADS1114_2_COMP_QUE 0x3 // Comparator queue (disable) #endif #define <API key> ((ADS1114_2_OS<<7)|(ADS1114_2_MUX<<4)|(ADS1114_2_PGA<<1)|(ADS1114_2_MODE)) #define <API key> ((ADS1114_2_DR<<5)|(ADS1114_2_COMP_MODE<<4)|(ADS1114_2_COMP_POL<<3)|(ADS1114_2_COMP_LAT<<2)|(ADS1114_2_COMP_QUE)) /* Default I2C device */ // FIXME all ads on the same device for now #ifndef ADS1114_I2C_DEV #define ADS1114_I2C_DEV i2c1 #endif struct ads1114_periph { struct i2c_transaction trans; uint8_t i2c_addr; bool_t config_done; bool_t data_available; }; #if USE_ADS1114_1 extern struct ads1114_periph ads1114_1; #endif #if USE_ADS1114_2 extern struct ads1114_periph ads1114_2; #endif extern void ads1114_init(void); extern void ads1114_read(struct ads1114_periph *p); // Generic Event Macro #define _Ads1114Event(_p) {\ if (!_p.config_done) { \ if (_p.trans.status == I2CTransSuccess) { _p.config_done = TRUE; _p.trans.status = I2CTransDone; } \ if (_p.trans.status == I2CTransFailed) { _p.trans.status = I2CTransDone; } \ } else { \ if (_p.trans.status == I2CTransSuccess) { _p.data_available = TRUE; _p.trans.status = I2CTransDone; } \ if (_p.trans.status == I2CTransFailed) { _p.trans.status = I2CTransDone; } \ } \ } #if USE_ADS1114_1 #define Ads1114_1Event() _Ads1114Event(ads1114_1) #else #define Ads1114_1Event() {} #endif #if USE_ADS1114_2 #define Ads1114_2Event() _Ads1114Event(ads1114_2) #else #define Ads1114_2Event() {} #endif // Final event macro #define Ads1114Event() { \ Ads1114_1Event(); \ Ads1114_2Event(); \ } // Get value macro // @param ads1114 periph #define Ads1114GetValue(_p) ((int16_t)(((int16_t)_p.trans.buf[0]<<8)|_p.trans.buf[1])) #endif // ADS_1114_H
<?php // Moodle is free software: you can redistribute it and/or modify // (at your option) any later version. // Moodle is distributed in the hope that it will be useful, // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the defined('MOODLE_INTERNAL') || die(); $plugin->version = 2015111600; $plugin->requires = 2015111000; // Requires this Moodle version $plugin->component = 'datafield_number'; // Full name of the plugin (used for diagnostics)
<html> <head> <script> if (window.testRunner) { testRunner.dumpAsText(); } </script> <style> @-webkit-keyframes anim { 42% { -<API key>: 42rem; } } body::first-letter { -<API key>: anim; } </style> </head> <body> This test passes if it does not crash. </body> </html>
// <API key>: Apache-2.0 WITH LLVM-exception // This file implements the PowerPC branch predicates. #include "PPCPredicates.h" #include "llvm/Support/ErrorHandling.h" #include <cassert> using namespace llvm; PPC::Predicate PPC::InvertPredicate(PPC::Predicate Opcode) { switch (Opcode) { case PPC::PRED_EQ: return PPC::PRED_NE; case PPC::PRED_NE: return PPC::PRED_EQ; case PPC::PRED_LT: return PPC::PRED_GE; case PPC::PRED_GE: return PPC::PRED_LT; case PPC::PRED_GT: return PPC::PRED_LE; case PPC::PRED_LE: return PPC::PRED_GT; case PPC::PRED_NU: return PPC::PRED_UN; case PPC::PRED_UN: return PPC::PRED_NU; case PPC::PRED_EQ_MINUS: return PPC::PRED_NE_PLUS; case PPC::PRED_NE_MINUS: return PPC::PRED_EQ_PLUS; case PPC::PRED_LT_MINUS: return PPC::PRED_GE_PLUS; case PPC::PRED_GE_MINUS: return PPC::PRED_LT_PLUS; case PPC::PRED_GT_MINUS: return PPC::PRED_LE_PLUS; case PPC::PRED_LE_MINUS: return PPC::PRED_GT_PLUS; case PPC::PRED_NU_MINUS: return PPC::PRED_UN_PLUS; case PPC::PRED_UN_MINUS: return PPC::PRED_NU_PLUS; case PPC::PRED_EQ_PLUS: return PPC::PRED_NE_MINUS; case PPC::PRED_NE_PLUS: return PPC::PRED_EQ_MINUS; case PPC::PRED_LT_PLUS: return PPC::PRED_GE_MINUS; case PPC::PRED_GE_PLUS: return PPC::PRED_LT_MINUS; case PPC::PRED_GT_PLUS: return PPC::PRED_LE_MINUS; case PPC::PRED_LE_PLUS: return PPC::PRED_GT_MINUS; case PPC::PRED_NU_PLUS: return PPC::PRED_UN_MINUS; case PPC::PRED_UN_PLUS: return PPC::PRED_NU_MINUS; // Simple predicates for single condition-register bits. case PPC::PRED_BIT_SET: return PPC::PRED_BIT_UNSET; case PPC::PRED_BIT_UNSET: return PPC::PRED_BIT_SET; } llvm_unreachable("Unknown PPC branch opcode!"); } PPC::Predicate PPC::getSwappedPredicate(PPC::Predicate Opcode) { switch (Opcode) { case PPC::PRED_EQ: return PPC::PRED_EQ; case PPC::PRED_NE: return PPC::PRED_NE; case PPC::PRED_LT: return PPC::PRED_GT; case PPC::PRED_GE: return PPC::PRED_LE; case PPC::PRED_GT: return PPC::PRED_LT; case PPC::PRED_LE: return PPC::PRED_GE; case PPC::PRED_NU: return PPC::PRED_NU; case PPC::PRED_UN: return PPC::PRED_UN; case PPC::PRED_EQ_MINUS: return PPC::PRED_EQ_MINUS; case PPC::PRED_NE_MINUS: return PPC::PRED_NE_MINUS; case PPC::PRED_LT_MINUS: return PPC::PRED_GT_MINUS; case PPC::PRED_GE_MINUS: return PPC::PRED_LE_MINUS; case PPC::PRED_GT_MINUS: return PPC::PRED_LT_MINUS; case PPC::PRED_LE_MINUS: return PPC::PRED_GE_MINUS; case PPC::PRED_NU_MINUS: return PPC::PRED_NU_MINUS; case PPC::PRED_UN_MINUS: return PPC::PRED_UN_MINUS; case PPC::PRED_EQ_PLUS: return PPC::PRED_EQ_PLUS; case PPC::PRED_NE_PLUS: return PPC::PRED_NE_PLUS; case PPC::PRED_LT_PLUS: return PPC::PRED_GT_PLUS; case PPC::PRED_GE_PLUS: return PPC::PRED_LE_PLUS; case PPC::PRED_GT_PLUS: return PPC::PRED_LT_PLUS; case PPC::PRED_LE_PLUS: return PPC::PRED_GE_PLUS; case PPC::PRED_NU_PLUS: return PPC::PRED_NU_PLUS; case PPC::PRED_UN_PLUS: return PPC::PRED_UN_PLUS; case PPC::PRED_BIT_SET: case PPC::PRED_BIT_UNSET: llvm_unreachable("Invalid use of bit predicate code"); } llvm_unreachable("Unknown PPC branch opcode!"); }
import { __core_private__ as r } from '@angular/core'; export declare type RenderDebugInfo = typeof r._RenderDebugInfo; export declare var RenderDebugInfo: typeof r.RenderDebugInfo; export declare type DirectRenderer = typeof r._DirectRenderer; export declare var <API key>: typeof r.<API key>; export declare type <API key> = typeof r.<API key>; export declare var <API key>: typeof r.<API key>; export declare var reflector: typeof r.reflector; export declare type NoOpAnimationPlayer = typeof r.<API key>; export declare var NoOpAnimationPlayer: typeof r.NoOpAnimationPlayer; export declare type AnimationPlayer = typeof r._AnimationPlayer; export declare var AnimationPlayer: typeof r.AnimationPlayer; export declare type <API key> = typeof r.<API key>; export declare var <API key>: typeof r.<API key>; export declare type <API key> = typeof r.<API key>; export declare var <API key>: typeof r.<API key>; export declare type AnimationKeyframe = typeof r._AnimationKeyframe; export declare var AnimationKeyframe: typeof r.AnimationKeyframe; export declare type AnimationStyles = typeof r._AnimationStyles; export declare var AnimationStyles: typeof r.AnimationStyles; export declare var <API key>: typeof r.<API key>; export declare var <API key>: typeof r.<API key>; export declare var clearStyles: typeof r.clearStyles; export declare var <API key>: typeof r.<API key>;
(function () { /*** Variables ***/ var win = window, doc = document, attrProto = { setAttribute: Element.prototype.setAttribute, removeAttribute: Element.prototype.removeAttribute }, hasShadow = Element.prototype.createShadowRoot, container = doc.createElement('div'), noop = function(){}, trueop = function(){ return true; }, regexReplaceCommas = /,/g, regexCamelToDash = /([a-z])([A-Z])/g, regexPseudoParens = /\(|\)/g, regexPseudoCapture = /:(\w+)\u276A(.+?(?=\u276B))|:(\w+)/g, regexDigits = /(\d+)/g, keypseudo = { action: function (pseudo, event) { return pseudo.value.match(regexDigits).indexOf(String(event.keyCode)) > -1 == (pseudo.name == 'keypass') || null; } }, /* - The prefix object generated here is added to the xtag object as xtag.prefix later in the code - Prefix provides a variety of prefix variations for the browser in which your code is running - The 4 variations of prefix are as follows: * prefix.dom: the correct prefix case and form when used on DOM elements/style properties * prefix.lowercase: a lowercase version of the prefix for use in various user-code situations * prefix.css: the lowercase, dashed version of the prefix * prefix.js: addresses prefixed APIs present in global and non-Element contexts */ prefix = (function () { var styles = win.getComputedStyle(doc.documentElement, ''), pre = (Array.prototype.slice .call(styles) .join('') .match(/-(moz|webkit|ms)-/) || (styles.OLink === '' && ['', 'o']) )[1]; return { dom: pre == 'ms' ? 'MS' : pre, lowercase: pre, css: '-' + pre + '-', js: pre == 'ms' ? pre : pre[0].toUpperCase() + pre.substr(1) }; })(), matchSelector = Element.prototype.matches || Element.prototype.matchesSelector || Element.prototype[prefix.lowercase + 'MatchesSelector']; /*** Functions ***/ // Utilities /* This is an enhanced typeof check for all types of objects. Where typeof would normaly return 'object' for many common DOM objects (like NodeLists and HTMLCollections). - For example: typeOf(document.children) will correctly return 'htmlcollection' */ var typeCache = {}, typeString = typeCache.toString, typeRegexp = /\s([a-zA-Z]+)/; function typeOf(obj) { var type = typeString.call(obj); return typeCache[type] || (typeCache[type] = type.match(typeRegexp)[1].toLowerCase()); } function clone(item, type){ var fn = clone[type || typeOf(item)]; return fn ? fn(item) : item; } clone.object = function(src){ var obj = {}; for (var key in src) obj[key] = clone(src[key]); return obj; }; clone.array = function(src){ var i = src.length, array = new Array(i); while (i--) array[i] = clone(src[i]); return array; }; /* The toArray() method allows for conversion of any object to a true array. For types that cannot be converted to an array, the method returns a 1 item array containing the passed-in object. */ var unsliceable = { 'undefined': 1, 'null': 1, 'number': 1, 'boolean': 1, 'string': 1, 'function': 1 }; function toArray(obj){ return unsliceable[typeOf(obj)] ? [obj] : Array.prototype.slice.call(obj, 0); } // DOM var str = ''; function query(element, selector){ return (selector || str).length ? toArray(element.querySelectorAll(selector)) : []; } // Pseudos function parsePseudo(fn){fn();} // Mixins function mergeOne(source, key, current){ var type = typeOf(current); if (type == 'object' && typeOf(source[key]) == 'object') xtag.merge(source[key], current); else source[key] = clone(current, type); return source; } function mergeMixin(tag, original, mixin, name) { var key, keys = {}; for (var z in original) keys[z.split(':')[0]] = z; for (z in mixin) { key = keys[z.split(':')[0]]; if (typeof original[key] == 'function') { if (!key.match(':mixins')) { original[key + ':mixins'] = original[key]; delete original[key]; key = key + ':mixins'; } original[key].__mixin__ = xtag.applyPseudos(z + (z.match(':mixins') ? '' : ':mixins'), mixin[z], tag.pseudos, original[key].__mixin__); } else { original[z] = mixin[z]; delete original[key]; } } } var uniqueMixinCount = 0; function addMixin(tag, original, mixin){ for (var z in mixin){ original[z + ':__mixin__(' + (uniqueMixinCount++) + ')'] = xtag.applyPseudos(z, mixin[z], tag.pseudos); } } function resolveMixins(mixins, output){ var index = mixins.length; while (index output.unshift(mixins[index]); if (xtag.mixins[mixins[index]].mixins) resolveMixins(xtag.mixins[mixins[index]].mixins, output); } return output; } function applyMixins(tag) { resolveMixins(tag.mixins, []).forEach(function(name){ var mixin = xtag.mixins[name]; for (var type in mixin) { var item = mixin[type], original = tag[type]; if (!original) tag[type] = item; else { switch (type){ case 'mixins': break; case 'events': addMixin(tag, original, item); break; case 'accessors': case 'prototype': for (var z in item) { if (!original[z]) original[z] = item[z]; else mergeMixin(tag, original[z], item[z], name); } break; default: mergeMixin(tag, original, item, name); } } } }); return tag; } // Events function delegateAction(pseudo, event) { var match, target = event.target, root = event.currentTarget; while (!match && target && target != root) { if (target.tagName && matchSelector.call(target, pseudo.value)) match = target; target = target.parentNode; } if (!match && root.tagName && matchSelector.call(root, pseudo.value)) match = root; return match ? pseudo.listener = pseudo.listener.bind(match) : null; } function touchFilter(event){ return event.button === 0; } function writeProperty(key, event, base, desc){ if (desc) event[key] = base[key]; else Object.defineProperty(event, key, { writable: true, enumerable: true, value: base[key] }); } var skipProps = {}; for (var z in doc.createEvent('CustomEvent')) skipProps[z] = 1; function inheritEvent(event, base){ var desc = Object.<API key>(event, 'target'); for (var z in base) { if (!skipProps[z]) writeProperty(z, event, base, desc); } event.baseEvent = base; } // Accessors function modAttr(element, attr, name, value, method){ attrProto[method].call(element, name, attr && attr.boolean ? '' : value); } function syncAttr(element, attr, name, value, method){ if (attr && (attr.property || attr.selector)) { var nodes = attr.property ? [element.xtag[attr.property]] : attr.selector ? xtag.query(element, attr.selector) : [], index = nodes.length; while (index--) nodes[index][method](name, value); } } function attachProperties(tag, prop, z, accessor, attr, name){ var key = z.split(':'), type = key[0]; if (type == 'get') { key[0] = prop; tag.prototype[prop].get = xtag.applyPseudos(key.join(':'), accessor[z], tag.pseudos, accessor[z]); } else if (type == 'set') { key[0] = prop; var setter = tag.prototype[prop].set = xtag.applyPseudos(key.join(':'), attr ? function(value){ var old, method = 'setAttribute'; if (attr.boolean){ value = !!value; old = this.hasAttribute(name); if (!value) method = 'removeAttribute'; } else { value = attr.validate ? attr.validate.call(this, value) : value; old = this.getAttribute(name); } modAttr(this, attr, name, value, method); accessor[z].call(this, value, old); syncAttr(this, attr, name, value, method); } : accessor[z] ? function(value){ accessor[z].call(this, value); } : null, tag.pseudos, accessor[z]); if (attr) attr.setter = accessor[z]; } else tag.prototype[prop][z] = accessor[z]; } function parseAccessor(tag, prop){ tag.prototype[prop] = {}; var accessor = tag.accessors[prop], attr = accessor.attribute, name; if (attr) { name = attr.name = (attr ? (attr.name || prop.replace(regexCamelToDash, '$1-$2')) : prop).toLowerCase(); attr.key = prop; tag.attributes[name] = attr; } for (var z in accessor) attachProperties(tag, prop, z, accessor, attr, name); if (attr) { if (!tag.prototype[prop].get) { var method = (attr.boolean ? 'has' : 'get') + 'Attribute'; tag.prototype[prop].get = function(){ return this[method](name); }; } if (!tag.prototype[prop].set) tag.prototype[prop].set = function(value){ value = attr.boolean ? !!value : attr.validate ? attr.validate.call(this, value) : value; var method = attr.boolean ? (value ? 'setAttribute' : 'removeAttribute') : 'setAttribute'; modAttr(this, attr, name, value, method); syncAttr(this, attr, name, value, method); }; } } var unwrapComment = /\/\*!?(?:\@preserve)?[ \t]*(?:\r\n|\n)([\s\S]*?)(?:\r\n|\n)\s*\*\ function parseMultiline(fn){ return typeof fn == 'function' ? unwrapComment.exec(fn.toString())[1] : fn; } /*** X-Tag Object Definition ***/ var xtag = { tags: {}, defaultOptions: { pseudos: [], mixins: [], events: {}, methods: {}, accessors: {}, lifecycle: {}, attributes: {}, 'prototype': { xtag: { get: function(){ return this.__xtag__ ? this.__xtag__ : (this.__xtag__ = { data: {} }); } } } }, register: function (name, options) { var _name; if (typeof name == 'string') _name = name.toLowerCase(); else throw 'First argument must be a Custom Element string name'; xtag.tags[_name] = options || {}; var basePrototype = options.prototype; delete options.prototype; var tag = xtag.tags[_name].compiled = applyMixins(xtag.merge({}, xtag.defaultOptions, options)); var proto = tag.prototype; var lifecycle = tag.lifecycle; for (var z in tag.events) tag.events[z] = xtag.parseEvent(z, tag.events[z]); for (z in lifecycle) lifecycle[z.split(':')[0]] = xtag.applyPseudos(z, lifecycle[z], tag.pseudos, lifecycle[z]); for (z in tag.methods) proto[z.split(':')[0]] = { value: xtag.applyPseudos(z, tag.methods[z], tag.pseudos, tag.methods[z]), enumerable: true }; for (z in tag.accessors) parseAccessor(tag, z); if (tag.shadow) tag.shadow = tag.shadow.nodeName ? tag.shadow : xtag.createFragment(tag.shadow); if (tag.content) tag.content = tag.content.nodeName ? tag.content.innerHTML : parseMultiline(tag.content); var created = lifecycle.created; var finalized = lifecycle.finalized; proto.createdCallback = { enumerable: true, value: function(){ var element = this; if (tag.shadow && hasShadow) this.createShadowRoot().appendChild(tag.shadow.cloneNode(true)); if (tag.content) this.appendChild(document.createElement('div')).outerHTML = tag.content; var output = created ? created.apply(this, arguments) : null; xtag.addEvents(this, tag.events); for (var name in tag.attributes) { var attr = tag.attributes[name], hasAttr = this.hasAttribute(name), hasDefault = attr.def !== undefined; if (hasAttr || attr.boolean || hasDefault) { this[attr.key] = attr.boolean ? hasAttr : !hasAttr && hasDefault ? attr.def : this.getAttribute(name); } } tag.pseudos.forEach(function(obj){ obj.onAdd.call(element, obj); }); this.xtagComponentReady = true; if (finalized) finalized.apply(this, arguments); return output; } }; var inserted = lifecycle.inserted; var removed = lifecycle.removed; if (inserted || removed) { proto.attachedCallback = { value: function(){ if (removed) this.xtag.__parentNode__ = this.parentNode; if (inserted) return inserted.apply(this, arguments); }, enumerable: true }; } if (removed) { proto.detachedCallback = { value: function(){ var args = toArray(arguments); args.unshift(this.xtag.__parentNode__); var output = removed.apply(this, args); delete this.xtag.__parentNode__; return output; }, enumerable: true }; } if (lifecycle.attributeChanged) proto.<API key> = { value: lifecycle.attributeChanged, enumerable: true }; proto.setAttribute = { writable: true, enumerable: true, value: function (name, value){ var old; var _name = name.toLowerCase(); var attr = tag.attributes[_name]; if (attr) { old = this.getAttribute(_name); value = attr.boolean ? '' : attr.validate ? attr.validate.call(this, value) : value; } modAttr(this, attr, _name, value, 'setAttribute'); if (attr) { if (attr.setter) attr.setter.call(this, attr.boolean ? true : value, old); syncAttr(this, attr, _name, value, 'setAttribute'); } } }; proto.removeAttribute = { writable: true, enumerable: true, value: function (name){ var _name = name.toLowerCase(); var attr = tag.attributes[_name]; var old = this.hasAttribute(_name); modAttr(this, attr, _name, '', 'removeAttribute'); if (attr) { if (attr.setter) attr.setter.call(this, attr.boolean ? false : undefined, old); syncAttr(this, attr, _name, '', 'removeAttribute'); } } }; var definition = {}; var instance = basePrototype instanceof win.HTMLElement; var extended = tag['extends'] && (definition['extends'] = tag['extends']); if (basePrototype) Object.getOwnPropertyNames(basePrototype).forEach(function(z){ var prop = proto[z]; var desc = instance ? Object.<API key>(basePrototype, z) : basePrototype[z]; if (prop) { for (var y in desc) { if (typeof desc[y] == 'function' && prop[y]) prop[y] = xtag.wrap(desc[y], prop[y]); else prop[y] = desc[y]; } } proto[z] = prop || desc; }); definition['prototype'] = Object.create( extended ? Object.create(doc.createElement(extended).constructor).prototype : win.HTMLElement.prototype, proto ); return doc.registerElement(_name, definition); }, /* Exposed Variables */ mixins: {}, prefix: prefix, captureEvents: { focus: 1, blur: 1, scroll: 1, DOMMouseScroll: 1 }, customEvents: { animationstart: { attach: [prefix.dom + 'AnimationStart'] }, animationend: { attach: [prefix.dom + 'AnimationEnd'] }, transitionend: { attach: [prefix.dom + 'TransitionEnd'] }, move: { attach: ['pointermove'] }, enter: { attach: ['pointerenter'] }, leave: { attach: ['pointerleave'] }, scrollwheel: { attach: ['DOMMouseScroll', 'mousewheel'], condition: function(event){ event.delta = event.wheelDelta ? event.wheelDelta / 40 : Math.round(event.detail / 3.5 * -1); return true; } }, tap: { attach: ['pointerdown', 'pointerup'], condition: function(event, custom){ if (event.type == 'pointerdown') { custom.startX = event.clientX; custom.startY = event.clientY; } else if (event.button === 0 && Math.abs(custom.startX - event.clientX) < 10 && Math.abs(custom.startY - event.clientY) < 10) return true; } }, tapstart: { attach: ['pointerdown'], condition: touchFilter }, tapend: { attach: ['pointerup'], condition: touchFilter }, tapmove: { attach: ['pointerdown'], condition: function(event, custom){ if (event.type == 'pointerdown') { var listener = custom.listener.bind(this); if (!custom.tapmoveListeners) custom.tapmoveListeners = xtag.addEvents(document, { pointermove: listener, pointerup: listener, pointercancel: listener }); } else if (event.type == 'pointerup' || event.type == 'pointercancel') { xtag.removeEvents(document, custom.tapmoveListeners); custom.tapmoveListeners = null; } return true; } }, taphold: { attach: ['pointerdown', 'pointerup'], condition: function(event, custom){ if (event.type == 'pointerdown') { (custom.pointers = custom.pointers || {})[event.pointerId] = setTimeout( xtag.fireEvent.bind(null, this, 'taphold'), custom.duration || 1000 ); } else if (event.type == 'pointerup') { if (custom.pointers) { clearTimeout(custom.pointers[event.pointerId]); delete custom.pointers[event.pointerId]; } } else return true; } } }, pseudos: { __mixin__: {}, mixins: { onCompiled: function(fn, pseudo){ var mixin = pseudo.source && pseudo.source.__mixin__ || pseudo.source; if (mixin) switch (pseudo.value) { case null: case '': case 'before': return function(){ mixin.apply(this, arguments); return fn.apply(this, arguments); }; case 'after': return function(){ var returns = fn.apply(this, arguments); mixin.apply(this, arguments); return returns; }; case 'none': return fn; } else return fn; } }, keypass: keypseudo, keyfail: keypseudo, delegate: { action: delegateAction }, preventable: { action: function (pseudo, event) { return !event.defaultPrevented; } }, duration: { onAdd: function(pseudo){ pseudo.source.duration = Number(pseudo.value); } }, capture: { onCompiled: function(fn, pseudo){ if (pseudo.source) pseudo.source.capture = true; } } }, /* UTILITIES */ clone: clone, typeOf: typeOf, toArray: toArray, wrap: function (original, fn) { return function(){ var output = original.apply(this, arguments); fn.apply(this, arguments); return output; }; }, /* Recursively merges one object with another. The first argument is the destination object, all other objects passed in as arguments are merged from right to left, conflicts are overwritten */ merge: function(source, k, v){ if (typeOf(k) == 'string') return mergeOne(source, k, v); for (var i = 1, l = arguments.length; i < l; i++){ var object = arguments[i]; for (var key in object) mergeOne(source, key, object[key]); } return source; }, uid: function(){ return Math.random().toString(36).substr(2,10); }, /* DOM */ query: query, skipTransition: function(element, fn, bind){ var prop = prefix.js + 'TransitionProperty'; element.style[prop] = element.style.transitionProperty = 'none'; var callback = fn ? fn.call(bind || element) : null; return xtag.skipFrame(function(){ element.style[prop] = element.style.transitionProperty = ''; if (callback) callback.call(bind || element); }); }, requestFrame: (function(){ var raf = win.<API key> || win[prefix.lowercase + '<API key>'] || function(fn){ return win.setTimeout(fn, 20); }; return function(fn){ return raf(fn); }; })(), cancelFrame: (function(){ var cancel = win.<API key> || win[prefix.lowercase + '<API key>'] || win.clearTimeout; return function(id){ return cancel(id); }; })(), skipFrame: function(fn){ var id = xtag.requestFrame(function(){ id = xtag.requestFrame(fn); }); return id; }, matchSelector: function (element, selector) { return matchSelector.call(element, selector); }, set: function (element, method, value) { element[method] = value; if (window.CustomElements) CustomElements.upgradeAll(element); }, innerHTML: function(el, html){ xtag.set(el, 'innerHTML', html); }, hasClass: function (element, klass) { return element.className.split(' ').indexOf(klass.trim())>-1; }, addClass: function (element, klass) { var list = element.className.trim().split(' '); klass.trim().split(' ').forEach(function (name) { if (!~list.indexOf(name)) list.push(name); }); element.className = list.join(' ').trim(); return element; }, removeClass: function (element, klass) { var classes = klass.trim().split(' '); element.className = element.className.trim().split(' ').filter(function (name) { return name && !~classes.indexOf(name); }).join(' '); return element; }, toggleClass: function (element, klass) { return xtag[xtag.hasClass(element, klass) ? 'removeClass' : 'addClass'].call(null, element, klass); }, /* Runs a query on only the children of an element */ queryChildren: function (element, selector) { var id = element.id, attr = '#' + (element.id = id || 'x_' + xtag.uid()) + ' > ', parent = element.parentNode || !container.appendChild(element); selector = attr + (selector + '').replace(regexReplaceCommas, ',' + attr); var result = element.parentNode.querySelectorAll(selector); if (!id) element.removeAttribute('id'); if (!parent) container.removeChild(element); return toArray(result); }, /* Creates a document fragment with the content passed in - content can be a string of HTML, an element, or an array/collection of elements */ createFragment: function(content) { var template = document.createElement('template'); if (content) { if (content.nodeName) toArray(arguments).forEach(function(e){ template.content.appendChild(e); }); else template.innerHTML = parseMultiline(content); } return document.importNode(template.content, true); }, /* Removes an element from the DOM for more performant node manipulation. The element is placed back into the DOM at the place it was taken from. */ manipulate: function(element, fn){ var next = element.nextSibling, parent = element.parentNode, returned = fn.call(element) || element; if (next) parent.insertBefore(returned, next); else parent.appendChild(returned); }, /* PSEUDOS */ applyPseudos: function(key, fn, target, source) { var listener = fn, pseudos = {}; if (key.match(':')) { var matches = [], valueFlag = 0; key.replace(regexPseudoParens, function(match){ if (match == '(') return ++valueFlag == 1 ? '\u276A' : '('; return !--valueFlag ? '\u276B' : ')'; }).replace(regexPseudoCapture, function(z, name, value, solo){ matches.push([name || solo, value]); }); var i = matches.length; while (i--) parsePseudo(function(){ var name = matches[i][0], value = matches[i][1]; if (!xtag.pseudos[name]) throw "pseudo not found: " + name + " " + value; value = (value === '' || typeof value == 'undefined') ? null : value; var pseudo = pseudos[i] = Object.create(xtag.pseudos[name]); pseudo.key = key; pseudo.name = name; pseudo.value = value; pseudo['arguments'] = (value || '').split(','); pseudo.action = pseudo.action || trueop; pseudo.source = source; pseudo.onAdd = pseudo.onAdd || noop; pseudo.onRemove = pseudo.onRemove || noop; var original = pseudo.listener = listener; listener = function(){ var output = pseudo.action.apply(this, [pseudo].concat(toArray(arguments))); if (output === null || output === false) return output; output = pseudo.listener.apply(this, arguments); pseudo.listener = original; return output; }; if (!target) pseudo.onAdd.call(fn, pseudo); else target.push(pseudo); }); } for (var z in pseudos) { if (pseudos[z].onCompiled) listener = pseudos[z].onCompiled(listener, pseudos[z]) || listener; } return listener; }, removePseudos: function(target, pseudos){ pseudos.forEach(function(obj){ obj.onRemove.call(target, obj); }); }, /*** Events ***/ parseEvent: function(type, fn) { var pseudos = type.split(':'), key = pseudos.shift(), custom = xtag.customEvents[key], event = xtag.merge({ type: key, stack: noop, condition: trueop, capture: xtag.captureEvents[key], attach: [], _attach: [], pseudos: '', _pseudos: [], onAdd: noop, onRemove: noop }, custom || {}); event.attach = toArray(event.base || event.attach); event.chain = key + (event.pseudos.length ? ':' + event.pseudos : '') + (pseudos.length ? ':' + pseudos.join(':') : ''); var stack = xtag.applyPseudos(event.chain, fn, event._pseudos, event); event.stack = function(e){ e.currentTarget = e.currentTarget || this; var detail = e.detail || {}; if (!detail.__stack__) return stack.apply(this, arguments); else if (detail.__stack__ == stack) { e.stopPropagation(); e.cancelBubble = true; return stack.apply(this, arguments); } }; event.listener = function(e){ var args = toArray(arguments), output = event.condition.apply(this, args.concat([event])); if (!output) return output; // Remove this when affected browser builds with this regression fall below 5% marketshare if (e.type != key && (e.baseEvent && e.type != e.baseEvent.type)) { xtag.fireEvent(e.target, key, { baseEvent: e, detail: output !== true && (output.__stack__ = stack) ? output : { __stack__: stack } }); } else return event.stack.apply(this, args); }; event.attach.forEach(function(name) { event._attach.push(xtag.parseEvent(name, event.listener)); }); return event; }, addEvent: function (element, type, fn, capture) { var event = typeof fn == 'function' ? xtag.parseEvent(type, fn) : fn; event._pseudos.forEach(function(obj){ obj.onAdd.call(element, obj); }); event._attach.forEach(function(obj) { xtag.addEvent(element, obj.type, obj); }); event.onAdd.call(element, event, event.listener); element.addEventListener(event.type, event.stack, capture || event.capture); return event; }, addEvents: function (element, obj) { var events = {}; for (var z in obj) { events[z] = xtag.addEvent(element, z, obj[z]); } return events; }, removeEvent: function (element, type, event) { event = event || type; event.onRemove.call(element, event, event.listener); xtag.removePseudos(element, event._pseudos); event._attach.forEach(function(obj) { xtag.removeEvent(element, obj); }); element.removeEventListener(event.type, event.stack); }, removeEvents: function(element, obj){ for (var z in obj) xtag.removeEvent(element, obj[z]); }, fireEvent: function(element, type, options){ var event = doc.createEvent('CustomEvent'); options = options || {}; event.initCustomEvent(type, options.bubbles !== false, options.cancelable !== false, options.detail ); if (options.baseEvent) inheritEvent(event, options.baseEvent); element.dispatchEvent(event); } }; if (typeof define === 'function' && define.amd) define(xtag); else if (typeof module !== 'undefined' && module.exports) module.exports = xtag; else win.xtag = xtag; doc.addEventListener('WebComponentsReady', function(){ xtag.fireEvent(doc.body, 'DOMComponentsLoaded'); }); })();
#include <linux/irq.h> #include <linux/msi.h> #include <linux/module.h> #include <linux/interrupt.h> #include <linux/kernel_stat.h> #include "internals.h" static void dynamic_irq_init_x(unsigned int irq, bool keep_chip_data) { struct irq_desc *desc; unsigned long flags; desc = irq_to_desc(irq); if (!desc) { WARN(1, KERN_ERR "Trying to initialize invalid IRQ%d\n", irq); return; } /* Ensure we don't have left over values from a previous use of this irq */ <API key>(&desc->lock, flags); desc->status = IRQ_DISABLED; desc->chip = &no_irq_chip; desc->handle_irq = handle_bad_irq; desc->depth = 1; desc->msi_desc = NULL; desc->handler_data = NULL; if (!keep_chip_data) desc->chip_data = NULL; desc->action = NULL; desc->irq_count = 0; desc->irqs_unhandled = 0; #ifdef CONFIG_SMP cpumask_setall(desc->affinity); #ifdef <API key> cpumask_clear(desc->pending_mask); #endif #endif <API key>(&desc->lock, flags); } /** * dynamic_irq_init - initialize a dynamically allocated irq * @irq: irq number to initialize */ void dynamic_irq_init(unsigned int irq) { dynamic_irq_init_x(irq, false); } /** * <API key> - initialize a dynamically allocated irq * @irq: irq number to initialize * * does not set irq_to_desc(irq)->chip_data to NULL */ void <API key>(unsigned int irq) { dynamic_irq_init_x(irq, true); } static void <API key>(unsigned int irq, bool keep_chip_data) { struct irq_desc *desc = irq_to_desc(irq); unsigned long flags; if (!desc) { WARN(1, KERN_ERR "Trying to cleanup invalid IRQ%d\n", irq); return; } <API key>(&desc->lock, flags); if (desc->action) { <API key>(&desc->lock, flags); WARN(1, KERN_ERR "Destroying IRQ%d without calling free_irq\n", irq); return; } desc->msi_desc = NULL; desc->handler_data = NULL; if (!keep_chip_data) desc->chip_data = NULL; desc->handle_irq = handle_bad_irq; desc->chip = &no_irq_chip; desc->name = NULL; clear_kstat_irqs(desc); <API key>(&desc->lock, flags); } /** * dynamic_irq_cleanup - cleanup a dynamically allocated irq * @irq: irq number to initialize */ void dynamic_irq_cleanup(unsigned int irq) { <API key>(irq, false); } /** * <API key> - cleanup a dynamically allocated irq * @irq: irq number to initialize * * does not set irq_to_desc(irq)->chip_data to NULL */ void <API key>(unsigned int irq) { <API key>(irq, true); } /** * set_irq_chip - set the irq chip for an irq * @irq: irq number * @chip: pointer to irq chip description structure */ int set_irq_chip(unsigned int irq, struct irq_chip *chip) { struct irq_desc *desc = irq_to_desc(irq); unsigned long flags; if (!desc) { WARN(1, KERN_ERR "Trying to install chip for IRQ%d\n", irq); return -EINVAL; } if (!chip) chip = &no_irq_chip; <API key>(&desc->lock, flags); <API key>(chip); desc->chip = chip; <API key>(&desc->lock, flags); return 0; } EXPORT_SYMBOL(set_irq_chip); /** * set_irq_type - set the irq trigger type for an irq * @irq: irq number * @type: IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h */ int set_irq_type(unsigned int irq, unsigned int type) { struct irq_desc *desc = irq_to_desc(irq); unsigned long flags; int ret = -ENXIO; if (!desc) { printk(KERN_ERR "Trying to set irq type for IRQ%d\n", irq); return -ENODEV; } type &= IRQ_TYPE_SENSE_MASK; if (type == IRQ_TYPE_NONE) return 0; <API key>(&desc->lock, flags); ret = __irq_set_trigger(desc, irq, type); <API key>(&desc->lock, flags); return ret; } EXPORT_SYMBOL(set_irq_type); /** * set_irq_data - set irq type data for an irq * @irq: Interrupt number * @data: Pointer to interrupt specific data * * Set the hardware irq controller data for an irq */ int set_irq_data(unsigned int irq, void *data) { struct irq_desc *desc = irq_to_desc(irq); unsigned long flags; if (!desc) { printk(KERN_ERR "Trying to install controller data for IRQ%d\n", irq); return -EINVAL; } <API key>(&desc->lock, flags); desc->handler_data = data; <API key>(&desc->lock, flags); return 0; } EXPORT_SYMBOL(set_irq_data); /** * set_irq_msi - set MSI descriptor data for an irq * @irq: Interrupt number * @entry: Pointer to MSI descriptor data * * Set the MSI descriptor entry for an irq */ int set_irq_msi(unsigned int irq, struct msi_desc *entry) { struct irq_desc *desc = irq_to_desc(irq); unsigned long flags; if (!desc) { printk(KERN_ERR "Trying to install msi data for IRQ%d\n", irq); return -EINVAL; } <API key>(&desc->lock, flags); desc->msi_desc = entry; if (entry) entry->irq = irq; <API key>(&desc->lock, flags); return 0; } /** * set_irq_chip_data - set irq chip data for an irq * @irq: Interrupt number * @data: Pointer to chip specific data * * Set the hardware irq chip data for an irq */ int set_irq_chip_data(unsigned int irq, void *data) { struct irq_desc *desc = irq_to_desc(irq); unsigned long flags; if (!desc) { printk(KERN_ERR "Trying to install chip data for IRQ%d\n", irq); return -EINVAL; } if (!desc->chip) { printk(KERN_ERR "BUG: bad set_irq_chip_data(IRQ#%d)\n", irq); return -EINVAL; } <API key>(&desc->lock, flags); desc->chip_data = data; <API key>(&desc->lock, flags); return 0; } EXPORT_SYMBOL(set_irq_chip_data); /** * <API key> - Set/Reset the IRQ_NESTED_THREAD flag of an irq * * @irq: Interrupt number * @nest: 0 to clear / 1 to set the IRQ_NESTED_THREAD flag * * The IRQ_NESTED_THREAD flag indicates that on * <API key>() no separate interrupt thread should be * created for the irq as the handler are called nested in the * context of a demultiplexing interrupt handler thread. */ void <API key>(unsigned int irq, int nest) { struct irq_desc *desc = irq_to_desc(irq); unsigned long flags; if (!desc) return; <API key>(&desc->lock, flags); if (nest) desc->status |= IRQ_NESTED_THREAD; else desc->status &= ~IRQ_NESTED_THREAD; <API key>(&desc->lock, flags); } EXPORT_SYMBOL_GPL(<API key>); /* * default enable function */ static void default_enable(unsigned int irq) { struct irq_desc *desc = irq_to_desc(irq); desc->chip->unmask(irq); desc->status &= ~IRQ_MASKED; } /* * default disable function */ static void default_disable(unsigned int irq) { } /* * default startup function */ static unsigned int default_startup(unsigned int irq) { struct irq_desc *desc = irq_to_desc(irq); desc->chip->enable(irq); return 0; } /* * default shutdown function */ static void default_shutdown(unsigned int irq) { struct irq_desc *desc = irq_to_desc(irq); desc->chip->mask(irq); desc->status |= IRQ_MASKED; } /* * Fixup enable/disable function pointers */ void <API key>(struct irq_chip *chip) { if (!chip->enable) chip->enable = default_enable; if (!chip->disable) chip->disable = default_disable; if (!chip->startup) chip->startup = default_startup; /* * We use chip->disable, when the user provided its own. When * we have default_disable set for chip->disable, then we need * to use default_shutdown, otherwise the irq line is not * disabled on free_irq(): */ if (!chip->shutdown) chip->shutdown = chip->disable != default_disable ? chip->disable : default_shutdown; if (!chip->name) chip->name = chip->typename; if (!chip->end) chip->end = dummy_irq_chip.end; } static inline void mask_ack_irq(struct irq_desc *desc, int irq) { if (desc->chip->mask_ack) desc->chip->mask_ack(irq); else { desc->chip->mask(irq); if (desc->chip->ack) desc->chip->ack(irq); } desc->status |= IRQ_MASKED; } static inline void mask_irq(struct irq_desc *desc, int irq) { if (desc->chip->mask) { desc->chip->mask(irq); desc->status |= IRQ_MASKED; } } static inline void unmask_irq(struct irq_desc *desc, int irq) { if (desc->chip->unmask) { desc->chip->unmask(irq); desc->status &= ~IRQ_MASKED; } } /* * handle_nested_irq - Handle a nested irq from a irq thread * @irq: the interrupt number * * Handle interrupts which are nested into a threaded interrupt * handler. The handler function is called inside the calling * threads context. */ void handle_nested_irq(unsigned int irq) { struct irq_desc *desc = irq_to_desc(irq); struct irqaction *action; int mask_this_irq = 0; irqreturn_t action_ret; might_sleep(); raw_spin_lock_irq(&desc->lock); <API key>(irq, desc); action = desc->action; if (unlikely(!action || (desc->status & IRQ_DISABLED))) { mask_this_irq = 1; if (!(desc->status & IRQ_LEVEL)) desc->status |= IRQ_PENDING; goto out_unlock; } desc->status |= IRQ_INPROGRESS; raw_spin_unlock_irq(&desc->lock); action_ret = action->thread_fn(action->irq, action->dev_id); if (!noirqdebug) note_interrupt(irq, desc, action_ret); raw_spin_lock_irq(&desc->lock); desc->status &= ~IRQ_INPROGRESS; out_unlock: if (unlikely(mask_this_irq)) { chip_bus_lock(irq, desc); mask_irq(desc, irq); <API key>(irq, desc); } raw_spin_unlock_irq(&desc->lock); } EXPORT_SYMBOL_GPL(handle_nested_irq); /** * handle_simple_irq - Simple and software-decoded IRQs. * @irq: the interrupt number * @desc: the interrupt description structure for this irq * * Simple interrupts are either sent from a demultiplexing interrupt * handler or come from hardware, where no interrupt hardware control * is necessary. * * Note: The caller is expected to handle the ack, clear, mask and * unmask issues if necessary. */ void handle_simple_irq(unsigned int irq, struct irq_desc *desc) { struct irqaction *action; irqreturn_t action_ret; raw_spin_lock(&desc->lock); if (unlikely(desc->status & IRQ_INPROGRESS)) goto out_unlock; desc->status &= ~(IRQ_REPLAY | IRQ_WAITING); <API key>(irq, desc); action = desc->action; if (unlikely(!action || (desc->status & IRQ_DISABLED))) goto out_unlock; desc->status |= IRQ_INPROGRESS; raw_spin_unlock(&desc->lock); action_ret = handle_IRQ_event(irq, action); if (!noirqdebug) note_interrupt(irq, desc, action_ret); raw_spin_lock(&desc->lock); desc->status &= ~IRQ_INPROGRESS; out_unlock: raw_spin_unlock(&desc->lock); } /** * handle_level_irq - Level type irq handler * @irq: the interrupt number * @desc: the interrupt description structure for this irq * * Level type interrupts are active as long as the hardware line has * the active level. This may require to mask the interrupt and unmask * it after the associated handler has acknowledged the device, so the * interrupt line is back to inactive. */ void handle_level_irq(unsigned int irq, struct irq_desc *desc) { struct irqaction *action; irqreturn_t action_ret; raw_spin_lock(&desc->lock); mask_ack_irq(desc, irq); if (unlikely(desc->status & IRQ_INPROGRESS)) goto out_unlock; desc->status &= ~(IRQ_REPLAY | IRQ_WAITING); <API key>(irq, desc); /* * If its disabled or no action available * keep it masked and get out of here */ action = desc->action; if (unlikely(!action || (desc->status & IRQ_DISABLED))) goto out_unlock; desc->status |= IRQ_INPROGRESS; raw_spin_unlock(&desc->lock); action_ret = handle_IRQ_event(irq, action); if (!noirqdebug) note_interrupt(irq, desc, action_ret); raw_spin_lock(&desc->lock); desc->status &= ~IRQ_INPROGRESS; if (!(desc->status & (IRQ_DISABLED | IRQ_ONESHOT))) unmask_irq(desc, irq); out_unlock: raw_spin_unlock(&desc->lock); } EXPORT_SYMBOL_GPL(handle_level_irq); /** * handle_fasteoi_irq - irq handler for transparent controllers * @irq: the interrupt number * @desc: the interrupt description structure for this irq * * Only a single callback will be issued to the chip: an ->eoi() * call when the interrupt has been serviced. This enables support * for modern forms of interrupt handlers, which handle the flow * details in hardware, transparently. */ void handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc) { struct irqaction *action; irqreturn_t action_ret; raw_spin_lock(&desc->lock); if (unlikely(desc->status & IRQ_INPROGRESS)) goto out; desc->status &= ~(IRQ_REPLAY | IRQ_WAITING); <API key>(irq, desc); /* * If its disabled or no action available * then mask it and get out of here: */ action = desc->action; if (unlikely(!action || (desc->status & IRQ_DISABLED))) { desc->status |= IRQ_PENDING; mask_irq(desc, irq); goto out; } desc->status |= IRQ_INPROGRESS; desc->status &= ~IRQ_PENDING; raw_spin_unlock(&desc->lock); action_ret = handle_IRQ_event(irq, action); if (!noirqdebug) note_interrupt(irq, desc, action_ret); raw_spin_lock(&desc->lock); desc->status &= ~IRQ_INPROGRESS; out: desc->chip->eoi(irq); raw_spin_unlock(&desc->lock); } /** * handle_edge_irq - edge type IRQ handler * @irq: the interrupt number * @desc: the interrupt description structure for this irq * * Interrupt occures on the falling and/or rising edge of a hardware * signal. The occurence is latched into the irq controller hardware * and must be acked in order to be reenabled. After the ack another * interrupt can happen on the same source even before the first one * is handled by the associated event handler. If this happens it * might be necessary to disable (mask) the interrupt depending on the * controller hardware. This requires to reenable the interrupt inside * of the loop which handles the interrupts which have arrived while * the handler was running. If all pending interrupts are handled, the * loop is left. */ void handle_edge_irq(unsigned int irq, struct irq_desc *desc) { raw_spin_lock(&desc->lock); desc->status &= ~(IRQ_REPLAY | IRQ_WAITING); /* * If we're currently running this IRQ, or its disabled, * we shouldn't process the IRQ. Mark it pending, handle * the necessary masking and go out */ if (unlikely((desc->status & (IRQ_INPROGRESS | IRQ_DISABLED)) || !desc->action)) { desc->status |= (IRQ_PENDING | IRQ_MASKED); mask_ack_irq(desc, irq); goto out_unlock; } <API key>(irq, desc); /* Start handling the irq */ if (desc->chip->ack) desc->chip->ack(irq); /* Mark the IRQ currently in progress.*/ desc->status |= IRQ_INPROGRESS; do { struct irqaction *action = desc->action; irqreturn_t action_ret; if (unlikely(!action)) { mask_irq(desc, irq); goto out_unlock; } /* * When another irq arrived while we were handling * one, we could have masked the irq. * Renable it, if it was not disabled in meantime. */ if (unlikely((desc->status & (IRQ_PENDING | IRQ_MASKED | IRQ_DISABLED)) == (IRQ_PENDING | IRQ_MASKED))) { unmask_irq(desc, irq); } desc->status &= ~IRQ_PENDING; raw_spin_unlock(&desc->lock); action_ret = handle_IRQ_event(irq, action); if (!noirqdebug) note_interrupt(irq, desc, action_ret); raw_spin_lock(&desc->lock); } while ((desc->status & (IRQ_PENDING | IRQ_DISABLED)) == IRQ_PENDING); desc->status &= ~IRQ_INPROGRESS; out_unlock: raw_spin_unlock(&desc->lock); } /** * handle_percpu_irq - Per CPU local irq handler * @irq: the interrupt number * @desc: the interrupt description structure for this irq * * Per CPU interrupts on SMP machines without locking requirements */ void handle_percpu_irq(unsigned int irq, struct irq_desc *desc) { irqreturn_t action_ret; <API key>(irq, desc); if (desc->chip->ack) desc->chip->ack(irq); action_ret = handle_IRQ_event(irq, desc->action); if (!noirqdebug) note_interrupt(irq, desc, action_ret); if (desc->chip->eoi) desc->chip->eoi(irq); } void __set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained, const char *name) { struct irq_desc *desc = irq_to_desc(irq); unsigned long flags; if (!desc) { printk(KERN_ERR "Trying to install type control for IRQ%d\n", irq); return; } if (!handle) handle = handle_bad_irq; else if (desc->chip == &no_irq_chip) { printk(KERN_WARNING "Trying to install %sinterrupt handler " "for IRQ%d\n", is_chained ? "chained " : "", irq); /* * Some ARM implementations install a handler for really dumb * interrupt hardware without setting an irq_chip. This worked * with the ARM no_irq_chip but the check in setup_irq would * prevent us to setup the interrupt at all. Switch it to * dummy_irq_chip for easy transition. */ desc->chip = &dummy_irq_chip; } chip_bus_lock(irq, desc); <API key>(&desc->lock, flags); /* Uninstall? */ if (handle == handle_bad_irq) { if (desc->chip != &no_irq_chip) mask_ack_irq(desc, irq); desc->status |= IRQ_DISABLED; desc->depth = 1; } desc->handle_irq = handle; desc->name = name; if (handle != handle_bad_irq && is_chained) { desc->status &= ~IRQ_DISABLED; desc->status |= IRQ_NOREQUEST | IRQ_NOPROBE; desc->depth = 0; desc->chip->startup(irq); } <API key>(&desc->lock, flags); <API key>(irq, desc); } EXPORT_SYMBOL_GPL(__set_irq_handler); void <API key>(unsigned int irq, struct irq_chip *chip, irq_flow_handler_t handle) { set_irq_chip(irq, chip); __set_irq_handler(irq, handle, 0, NULL); } void <API key>(unsigned int irq, struct irq_chip *chip, irq_flow_handler_t handle, const char *name) { set_irq_chip(irq, chip); __set_irq_handler(irq, handle, 0, name); } void set_irq_noprobe(unsigned int irq) { struct irq_desc *desc = irq_to_desc(irq); unsigned long flags; if (!desc) { printk(KERN_ERR "Trying to mark IRQ%d non-probeable\n", irq); return; } <API key>(&desc->lock, flags); desc->status |= IRQ_NOPROBE; <API key>(&desc->lock, flags); } void set_irq_probe(unsigned int irq) { struct irq_desc *desc = irq_to_desc(irq); unsigned long flags; if (!desc) { printk(KERN_ERR "Trying to mark IRQ%d probeable\n", irq); return; } <API key>(&desc->lock, flags); desc->status &= ~IRQ_NOPROBE; <API key>(&desc->lock, flags); }
#include "testutil.hpp" const char *bind_address = 0; const char *connect_address = 0; void <API key> (void *ctx) { void *dealer = zmq_socket (ctx, ZMQ_DEALER); assert (dealer); int rc = zmq_bind (dealer, bind_address); assert (rc == 0); const size_t services = 5; void *rep [services]; for (size_t peer = 0; peer < services; ++peer) { rep [peer] = zmq_socket (ctx, ZMQ_REP); assert (rep [peer]); int timeout = 250; rc = zmq_setsockopt (rep [peer], ZMQ_RCVTIMEO, &timeout, sizeof (int)); assert (rc == 0); rc = zmq_connect (rep [peer], connect_address); assert (rc == 0); } // Wait for connections. rc = zmq_poll (0, 0, 100); assert (rc == 0); // Send all requests for (size_t i = 0; i < services; ++i) s_send_seq (dealer, 0, "ABC", SEQ_END); // Expect every REP got one message zmq_msg_t msg; zmq_msg_init (&msg); for (size_t peer = 0; peer < services; ++peer) s_recv_seq (rep [peer], "ABC", SEQ_END); rc = zmq_msg_close (&msg); assert (rc == 0); close_zero_linger (dealer); for (size_t peer = 0; peer < services; ++peer) close_zero_linger (rep [peer]); // Wait for disconnects. rc = zmq_poll (0, 0, 100); assert (rc == 0); } void test_fair_queue_in (void *ctx) { void *receiver = zmq_socket (ctx, ZMQ_DEALER); assert (receiver); int timeout = 250; int rc = zmq_setsockopt (receiver, ZMQ_RCVTIMEO, &timeout, sizeof (int)); assert (rc == 0); rc = zmq_bind (receiver, bind_address); assert (rc == 0); const size_t services = 5; void *senders [services]; for (size_t peer = 0; peer < services; ++peer) { senders [peer] = zmq_socket (ctx, ZMQ_DEALER); assert (senders [peer]); rc = zmq_setsockopt (senders [peer], ZMQ_RCVTIMEO, &timeout, sizeof (int)); assert (rc == 0); rc = zmq_connect (senders [peer], connect_address); assert (rc == 0); } zmq_msg_t msg; rc = zmq_msg_init (&msg); assert (rc == 0); s_send_seq (senders [0], "A", SEQ_END); s_recv_seq (receiver, "A", SEQ_END); s_send_seq (senders [0], "A", SEQ_END); s_recv_seq (receiver, "A", SEQ_END); // send our requests for (size_t peer = 0; peer < services; ++peer) s_send_seq (senders [peer], "B", SEQ_END); // Wait for data. rc = zmq_poll (0, 0, 50); assert (rc == 0); // handle the requests for (size_t peer = 0; peer < services; ++peer) s_recv_seq (receiver, "B", SEQ_END); rc = zmq_msg_close (&msg); assert (rc == 0); close_zero_linger (receiver); for (size_t peer = 0; peer < services; ++peer) close_zero_linger (senders [peer]); // Wait for disconnects. rc = zmq_poll (0, 0, 100); assert (rc == 0); } void <API key> (void *ctx) { void *A = zmq_socket (ctx, ZMQ_DEALER); assert (A); int rc = zmq_bind (A, bind_address); assert (rc == 0); void *B = zmq_socket (ctx, ZMQ_DEALER); assert (B); rc = zmq_connect (B, connect_address); assert (rc == 0); // Send a message in both directions s_send_seq (A, "ABC", SEQ_END); s_send_seq (B, "DEF", SEQ_END); rc = zmq_disconnect (B, connect_address); assert (rc == 0); // Disconnect may take time and need command processing. zmq_pollitem_t poller [2] = { { A, 0, 0, 0 }, { B, 0, 0, 0 } }; rc = zmq_poll (poller, 2, 100); assert (rc == 0); rc = zmq_poll (poller, 2, 100); assert (rc == 0); // No messages should be available, sending should fail. zmq_msg_t msg; zmq_msg_init (&msg); rc = zmq_send (A, 0, 0, ZMQ_DONTWAIT); assert (rc == -1); assert (errno == EAGAIN); rc = zmq_msg_recv (&msg, A, ZMQ_DONTWAIT); assert (rc == -1); assert (errno == EAGAIN); // After a reconnect of B, the messages should still be gone rc = zmq_connect (B, connect_address); assert (rc == 0); rc = zmq_msg_recv (&msg, A, ZMQ_DONTWAIT); assert (rc == -1); assert (errno == EAGAIN); rc = zmq_msg_recv (&msg, B, ZMQ_DONTWAIT); assert (rc == -1); assert (errno == EAGAIN); rc = zmq_msg_close (&msg); assert (rc == 0); close_zero_linger (A); close_zero_linger (B); // Wait for disconnects. rc = zmq_poll (0, 0, 100); assert (rc == 0); } void <API key> (void *ctx) { void *sc = zmq_socket (ctx, ZMQ_DEALER); assert (sc); int timeout = 250; int rc = zmq_setsockopt (sc, ZMQ_SNDTIMEO, &timeout, sizeof (timeout)); assert (rc == 0); rc = zmq_send (sc, 0, 0, ZMQ_DONTWAIT); assert (rc == -1); assert (errno == EAGAIN); rc = zmq_send (sc, 0, 0, 0); assert (rc == -1); assert (errno == EAGAIN); rc = zmq_close (sc); assert (rc == 0); } int main (void) { <API key>(); void *ctx = zmq_ctx_new (); assert (ctx); const char *binds [] = { "inproc://a", "tcp://127.0.0.1:5555" }; const char *connects [] = { "inproc://a", "tcp://localhost:5555" }; for (int transports = 0; transports < 2; ++transports) { bind_address = binds [transports]; connect_address = connects [transports]; // SHALL route outgoing messages to available peers using a round-robin // strategy. <API key> (ctx); // SHALL receive incoming messages from its peers using a fair-queuing // strategy. test_fair_queue_in (ctx); // SHALL block on sending, or return a suitable error, when it has no connected peers. <API key> (ctx); // SHALL create a double queue when a peer connects to it. If this peer // disconnects, the DEALER socket SHALL destroy its double queue and SHALL // discard any messages it contains. // <API key> (ctx); } int rc = zmq_ctx_term (ctx); assert (rc == 0); return 0 ; }
/** * @fileoverview New tab page * This is the main code for the new tab page used by touch-enabled Chrome * browsers. For now this is still a prototype. */ // Use an anonymous function to enable strict mode just for this file (which // will be concatenated with other files when embedded in Chrome cr.define('ntp', function() { 'use strict'; /** * NewTabView instance. * @type {!Object|undefined} */ var newTabView; /** * The '<API key>' element. * @type {!Element|undefined} */ var <API key>; /** * If non-null, an info bubble for showing messages to the user. It points at * the Most Visited label, and is used to draw more attention to the * navigation dot UI. * @type {!Element|undefined} */ var promoBubble; /** * If non-null, an bubble confirming that the user has signed into sync. It * points at the login status at the top of the page. * @type {!Element|undefined} */ var loginBubble; /** * true if |loginBubble| should be shown. * @type {boolean} */ var <API key> = false; /** * The '<API key>' element. * @type {!Element|undefined} */ var otherSessionsButton; /** * The time when all sections are ready. * @type {number|undefined} * @private */ var startTime; /** * The time in milliseconds for most transitions. This should match what's * in new_tab.css. Unfortunately there's no better way to try to time * something to occur until after a transition has completed. * @type {number} * @const */ var <API key> = 500; /** * See description for these values in ntp_stats.h. * @enum {number} */ var NtpFollowAction = { CLICKED_TILE: 11, <API key>: 12, OTHER: 13 }; /** * Creates a NewTabView object. NewTabView extends PageListView with * new tab UI specific logics. * @constructor * @extends {PageListView} */ function NewTabView() { var pageSwitcherStart = null; var pageSwitcherEnd = null; if (loadTimeData.getValue('showApps')) { pageSwitcherStart = getRequiredElement('page-switcher-start'); pageSwitcherEnd = getRequiredElement('page-switcher-end'); } this.initialize(getRequiredElement('page-list'), getRequiredElement('dot-list'), getRequiredElement('card-slider-frame'), getRequiredElement('trash'), pageSwitcherStart, pageSwitcherEnd); } NewTabView.prototype = { __proto__: ntp.PageListView.prototype, /** @override */ appendTilePage: function(page, title, titleIsEditable, opt_refNode) { ntp.PageListView.prototype.appendTilePage.apply(this, arguments); if (promoBubble) window.setTimeout(promoBubble.reposition.bind(promoBubble), 0); } }; /** * Invoked at startup once the DOM is available to initialize the app. */ function onLoad() { sectionsToWaitFor = 0; if (loadTimeData.getBoolean('showMostvisited')) sectionsToWaitFor++; if (loadTimeData.getBoolean('showApps')) { sectionsToWaitFor++; if (loadTimeData.getBoolean('<API key>')) { $('<API key>').addEventListener('click', function() { chrome.send('<API key>'); }); $('<API key>').addEventListener('click', function() { chrome.send('onLearnMore'); }); } } if (loadTimeData.getBoolean('<API key>')) sectionsToWaitFor++; measureNavDots(); // Load the current theme colors. themeChanged(); newTabView = new NewTabView(); <API key> = getRequiredElement('<API key>'); <API key>.addEventListener( 'webkitTransitionEnd', <API key>); if (loadTimeData.getBoolean('showRecentlyClosed')) { cr.ui.decorate($('<API key>'), ntp.RecentMenuButton); chrome.send('<API key>'); } else { $('<API key>').hidden = true; } if (loadTimeData.getBoolean('<API key>')) { otherSessionsButton = getRequiredElement('<API key>'); cr.ui.decorate(otherSessionsButton, ntp.<API key>); otherSessionsButton.initialize(loadTimeData.getBoolean('isUserSignedIn')); } else { getRequiredElement('<API key>').hidden = true; } if (loadTimeData.getBoolean('showMostvisited')) { var mostVisited = new ntp.MostVisitedPage(); // Move the footer into the most visited page if we are in "bare minimum" // mode. if (document.body.classList.contains('bare-minimum')) mostVisited.appendFooter(getRequiredElement('footer')); newTabView.appendTilePage(mostVisited, loadTimeData.getString('mostvisited'), false); chrome.send('getMostVisited'); } if (loadTimeData.getBoolean('<API key>')) { var suggestionsScript = document.createElement('script'); suggestionsScript.src = 'suggestions_page.js'; suggestionsScript.onload = function() { newTabView.appendTilePage(new ntp.SuggestionsPage(), loadTimeData.getString('suggestions'), false, (newTabView.appsPages.length > 0) ? newTabView.appsPages[0] : null); chrome.send('getSuggestions'); cr.dispatchSimpleEvent(document, 'sectionready', true, true); }; document.querySelector('head').appendChild(suggestionsScript); } if (!loadTimeData.getBoolean('showWebStoreIcon')) { var webStoreIcon = $('<API key>'); // Not all versions of the NTP have a footer, so this may not exist. if (webStoreIcon) webStoreIcon.hidden = true; } else { var webStoreLink = loadTimeData.getString('webStoreLink'); var url = appendParam(webStoreLink, 'utm_source', 'chrome-ntp-launcher'); $('<API key>').href = url; $('<API key>').addEventListener('click', <API key>); } // We need to wait for all the footer menu setup to be completed before // we can compute its layout. layoutFooter(); if (loadTimeData.getString('<API key>')) { loginBubble = new cr.ui.Bubble; loginBubble.anchorNode = $('login-container'); loginBubble.arrowLocation = cr.ui.ArrowLocation.TOP_END; loginBubble.bubbleAlignment = cr.ui.BubbleAlignment.<API key>; loginBubble.<API key> = 2000; loginBubble.closeButtonVisible = false; $('<API key>').onclick = function() { chrome.send('showAdvancedLoginUI'); }; $('<API key>').onclick = loginBubble.hide.bind(loginBubble); var bubbleContent = $('<API key>'); loginBubble.content = bubbleContent; // The anchor node won't be updated until updateLogin is called so don't // show the bubble yet. <API key> = true; } if (loadTimeData.valueExists('bubblePromoText')) { promoBubble = new cr.ui.Bubble; promoBubble.anchorNode = getRequiredElement('promo-bubble-anchor'); promoBubble.arrowLocation = cr.ui.ArrowLocation.BOTTOM_START; promoBubble.bubbleAlignment = cr.ui.BubbleAlignment.ENTIRELY_VISIBLE; promoBubble.<API key> = 2000; promoBubble.content = parseHtmlSubset( loadTimeData.getString('bubblePromoText'), ['BR']); var bubbleLink = promoBubble.querySelector('a'); if (bubbleLink) { bubbleLink.addEventListener('click', function(e) { chrome.send('<API key>'); }); } promoBubble.handleCloseEvent = function() { promoBubble.hide(); chrome.send('bubblePromoClosed'); }; promoBubble.show(); chrome.send('bubblePromoViewed'); } var loginContainer = getRequiredElement('login-container'); loginContainer.addEventListener('click', showSyncLoginUI); if (loadTimeData.getBoolean('shouldShowSyncLogin')) chrome.send('initializeSyncLogin'); <API key>(function() { // Tell the slider about the pages. newTabView.updateSliderCards(); // Mark the current page. newTabView.cardSlider.currentCardValue.navigationDot.classList.add( 'selected'); if (loadTimeData.valueExists('<API key>')) { var promoText = loadTimeData.getString('<API key>'); var tags = ['IMG']; var attrs = { src: function(node, value) { return node.tagName == 'IMG' && /^data\:image\/(?:png|gif|jpe?g)/.test(value); }, }; var promo = parseHtmlSubset(promoText, tags, attrs); var promoLink = promo.querySelector('a'); if (promoLink) { promoLink.addEventListener('click', function(e) { chrome.send('<API key>'); }); } showNotification(promo, [], function() { chrome.send('<API key>'); }, 60000); chrome.send('<API key>'); } cr.dispatchSimpleEvent(document, 'ntpLoaded', true, true); document.documentElement.classList.remove('starting-up'); startTime = Date.now(); }); <API key>(); // From webui/js/util.js. cr.ui.FocusManager.<API key>(); } /** * Launches the chrome web store app with the chrome-ntp-launcher * source. * @param {Event} e The click event. */ function <API key>(e) { chrome.send('<API key>', [encodeURIComponent(this.href), ntp.APP_LAUNCH.NTP_WEBSTORE_FOOTER]); } /* * The number of sections to wait on. * @type {number} */ var sectionsToWaitFor = -1; /** * Queued callbacks which lie in wait for all sections to be ready. * @type {array} */ var readyCallbacks = []; /** * Fired as each section of pages becomes ready. * @param {Event} e Each page's synthetic DOM event. */ document.addEventListener('sectionready', function(e) { if (--sectionsToWaitFor <= 0) { while (readyCallbacks.length) { readyCallbacks.shift()(); } } }); /** * This is used to simulate a fire-once event (i.e. $(document).ready() in * jQuery or Y.on('domready') in YUI. If all sections are ready, the callback * is fired right away. If all pages are not ready yet, the function is queued * for later execution. * @param {function} callback The work to be done when ready. */ function <API key>(callback) { assert(typeof callback == 'function'); if (sectionsToWaitFor > 0) readyCallbacks.push(callback); else window.setTimeout(callback, 0); // Do soon after, but asynchronously. } /** * Measure the width of a nav dot with a given title. * @param {string} id The loadTimeData ID of the desired title. * @return {number} The width of the nav dot. */ function measureNavDot(id) { var measuringDiv = $('fontMeasuringDiv'); measuringDiv.textContent = loadTimeData.getString(id); // The 4 is for border and padding. return Math.max(measuringDiv.clientWidth * 1.15 + 4, 80); } /** * Fills in an invisible div with the longest dot title string so that * its length may be measured and the nav dots sized accordingly. */ function measureNavDots() { var pxWidth = measureNavDot('appDefaultPageName'); if (loadTimeData.getBoolean('showMostvisited')) pxWidth = Math.max(measureNavDot('mostvisited'), pxWidth); var styleElement = document.createElement('style'); styleElement.type = 'text/css'; // max-width is used because if we run out of space, the nav dots will be // shrunk. styleElement.textContent = '.dot { max-width: ' + pxWidth + 'px; }'; document.querySelector('head').appendChild(styleElement); } /** * Layout the footer so that the nav dots stay centered. */ function layoutFooter() { // We need the image to be loaded. var logo = $('logo-img'); var logoImg = logo.querySelector('img'); if (!logoImg.complete) { logoImg.onload = layoutFooter; return; } var menu = $('<API key>'); if (menu.clientWidth > logoImg.width) logo.style.WebkitFlex = '0 1 ' + menu.clientWidth + 'px'; else menu.style.WebkitFlex = '0 1 ' + logoImg.width + 'px'; } function themeChanged(opt_hasAttribution) { $('themecss').href = 'chrome://theme/css/new_tab_theme.css?' + Date.now(); if (typeof opt_hasAttribution != 'undefined') { document.documentElement.setAttribute('hasattribution', opt_hasAttribution); } updateAttribution(); } function <API key>(attached) { document.documentElement.setAttribute('bookmarkbarattached', attached); } /** * Attributes the attribution image at the bottom left. */ function updateAttribution() { var attribution = $('attribution'); if (document.documentElement.getAttribute('hasattribution') == 'true') { attribution.hidden = false; } else { attribution.hidden = true; } } /** * Timeout ID. * @type {number} */ var notificationTimeout = 0; /** * Shows the notification bubble. * @param {string|Node} message The notification message or node to use as * message. * @param {Array.<{text: string, action: function()}>} links An array of * records describing the links in the notification. Each record should * have a 'text' attribute (the display string) and an 'action' attribute * (a function to run when the link is activated). * @param {Function} opt_closeHandler The callback invoked if the user * manually dismisses the notification. */ function showNotification(message, links, opt_closeHandler, opt_timeout) { window.clearTimeout(notificationTimeout); var span = document.querySelector('#notification > span'); if (typeof message == 'string') { span.textContent = message; } else { span.textContent = ''; // Remove all children. span.appendChild(message); } var linksBin = $('notificationLinks'); linksBin.textContent = ''; for (var i = 0; i < links.length; i++) { var link = linksBin.ownerDocument.createElement('div'); link.textContent = links[i].text; link.action = links[i].action; link.onclick = function() { this.action(); hideNotification(); }; link.setAttribute('role', 'button'); link.setAttribute('tabindex', 0); link.className = 'link-button'; linksBin.appendChild(link); } function closeFunc(e) { if (opt_closeHandler) opt_closeHandler(); hideNotification(); } document.querySelector('#notification button').onclick = closeFunc; document.addEventListener('dragstart', closeFunc); <API key>.hidden = false; <API key>(); newTabView.cardSlider.frame.addEventListener( 'cardSlider:card_change_ended', onCardChangeEnded); var timeout = opt_timeout || 10000; notificationTimeout = window.setTimeout(hideNotification, timeout); } /** * Hide the notification bubble. */ function hideNotification() { <API key>.classList.add('inactive'); newTabView.cardSlider.frame.removeEventListener( 'cardSlider:card_change_ended', onCardChangeEnded); } /** * Happens when 1 or more consecutive card changes end. * @param {Event} e The cardSlider:card_change_ended event. */ function onCardChangeEnded(e) { // If we ended on the same page as we started, ignore. if (newTabView.cardSlider.currentCardValue.notification) return; // Hide the notification the old page. <API key>.classList.add('card-changed'); <API key>(); } /** * Move and show the notification on the current page. */ function <API key>() { var page = newTabView.cardSlider.currentCardValue; <API key>(function() { if (page != newTabView.cardSlider.currentCardValue) return; // NOTE: This moves the notification to inside of the current page. page.notification = <API key>; // Reveal the notification and instruct it to hide itself if ignored. <API key>.classList.remove('inactive'); // Gives the browser time to apply this rule before we remove it (causing // a transition). window.setTimeout(function() { <API key>.classList.remove('card-changed'); }, 0); }); } /** * When done fading out, set hidden to true so the notification can't be * tabbed to or clicked. * @param {Event} e The webkitTransitionEnd event. */ function <API key>(e) { if (<API key>.classList.contains('inactive')) <API key>.hidden = true; } function <API key>(dataItems) { $('<API key>').dataItems = dataItems; layoutFooter(); } function setMostVisitedPages(data, hasBlacklistedUrls) { newTabView.mostVisitedPage.data = data; cr.dispatchSimpleEvent(document, 'sectionready', true, true); } function setSuggestionsPages(data, hasBlacklistedUrls) { newTabView.suggestionsPage.data = data; } /** * Set the dominant color for a node. This will be called in response to * <API key>. The node represented by |id| better have a setter * for stripeColor. * @param {string} id The ID of a node. * @param {string} color The color represented as a CSS string. */ function <API key>(id, color) { var node = $(id); if (node) node.stripeColor = color; } /** * Updates the text displayed in the login container. If there is no text then * the login container is hidden. * @param {string} loginHeader The first line of text. * @param {string} loginSubHeader The second line of text. * @param {string} iconURL The url for the login status icon. If this is null then the login status icon is hidden. * @param {boolean} isUserSignedIn Indicates if the user is signed in or not. */ function updateLogin(loginHeader, loginSubHeader, iconURL, isUserSignedIn) { if (loginHeader || loginSubHeader) { $('login-container').hidden = false; $('login-status-header').innerHTML = loginHeader; $('<API key>').innerHTML = loginSubHeader; $('card-slider-frame').classList.add('showing-login-area'); if (iconURL) { $('<API key>').style.backgroundImage = url(iconURL); $('<API key>').classList.add('login-status-icon'); } else { $('<API key>').style.backgroundImage = 'none'; $('<API key>').classList.remove( 'login-status-icon'); } } else { $('login-container').hidden = true; $('card-slider-frame').classList.remove('showing-login-area'); } if (<API key>) { window.setTimeout(loginBubble.show.bind(loginBubble), 0); chrome.send('loginMessageSeen'); <API key> = false; } else if (loginBubble) { loginBubble.reposition(); } if (otherSessionsButton) { otherSessionsButton.updateSignInState(isUserSignedIn); layoutFooter(); } } /** * Show the sync login UI. * @param {Event} e The click event. */ function showSyncLoginUI(e) { var rect = e.currentTarget.<API key>(); chrome.send('showSyncLoginUI', [rect.left, rect.top, rect.width, rect.height]); } /** * Logs the time to click for the specified item. * @param {string} item The item to log the time-to-click. */ function logTimeToClick(item) { var timeToClick = Date.now() - startTime; chrome.send('logTimeToClick', ['NewTabPage.TimeToClick' + item, timeToClick]); } /** * Wrappers to forward the callback to corresponding PageListView member. */ function appAdded() { return newTabView.appAdded.apply(newTabView, arguments); } function appMoved() { return newTabView.appMoved.apply(newTabView, arguments); } function appRemoved() { return newTabView.appRemoved.apply(newTabView, arguments); } function <API key>() { return newTabView.<API key>.apply(newTabView, arguments); } function <API key>() { return newTabView.<API key>.apply(newTabView, arguments); } function appsReordered() { return newTabView.appsReordered.apply(newTabView, arguments); } function enterRearrangeMode() { return newTabView.enterRearrangeMode.apply(newTabView, arguments); } function setForeignSessions(sessionList, isTabSyncEnabled) { if (otherSessionsButton) { otherSessionsButton.setForeignSessions(sessionList, isTabSyncEnabled); layoutFooter(); } } function getAppsCallback() { return newTabView.getAppsCallback.apply(newTabView, arguments); } function getAppsPageIndex() { return newTabView.getAppsPageIndex.apply(newTabView, arguments); } function getCardSlider() { return newTabView.cardSlider; } function leaveRearrangeMode() { return newTabView.leaveRearrangeMode.apply(newTabView, arguments); } function saveAppPageName() { return newTabView.saveAppPageName.apply(newTabView, arguments); } function <API key>(appId) { newTabView.highlightAppId = appId; } // Return an object with all the exports return { appAdded: appAdded, appMoved: appMoved, appRemoved: appRemoved, <API key>: <API key>, <API key>: <API key>, enterRearrangeMode: enterRearrangeMode, getAppsCallback: getAppsCallback, getAppsPageIndex: getAppsPageIndex, getCardSlider: getCardSlider, onLoad: onLoad, leaveRearrangeMode: leaveRearrangeMode, logTimeToClick: logTimeToClick, NtpFollowAction: NtpFollowAction, saveAppPageName: saveAppPageName, <API key>: <API key>, <API key>: <API key>, setForeignSessions: setForeignSessions, setMostVisitedPages: setMostVisitedPages, setSuggestionsPages: setSuggestionsPages, <API key>: <API key>, <API key>: <API key>, showNotification: showNotification, themeChanged: themeChanged, updateLogin: updateLogin }; }); document.addEventListener('DOMContentLoaded', ntp.onLoad); var toCssPx = cr.ui.toCssPx;
#ifndef <API key> #define <API key> #include <limits> #include <boost/type_traits/ice.hpp> #include <boost/mpl/and.hpp> #include <boost/mpl/or.hpp> #include <boost/mpl/not.hpp> #include <boost/icl/detail/notate.hpp> #include <boost/icl/detail/design_config.hpp> #include <boost/icl/detail/on_absorbtion.hpp> #include <boost/icl/detail/interval_map_algo.hpp> #include <boost/icl/<API key>.hpp> #include <boost/icl/type_traits/<API key>.hpp> #include <boost/icl/map.hpp> namespace boost{namespace icl { template<class DomainT, class CodomainT> struct mapping_pair { DomainT key; CodomainT data; mapping_pair():key(), data(){} mapping_pair(const DomainT& key_value, const CodomainT& data_value) :key(key_value), data(data_value){} mapping_pair(const std::pair<DomainT,CodomainT>& std_pair) :key(std_pair.first), data(std_pair.second){} }; /** \brief Implements a map as a map of intervals (base class) */ template < class SubType, typename DomainT, typename CodomainT, class Traits = icl::partial_absorber, ICL_COMPARE Compare = <API key>(ICL_COMPARE_DEFAULT, DomainT), ICL_COMBINE Combine = <API key>(icl::inplace_plus, CodomainT), ICL_SECTION Section = <API key>(icl::inter_section, CodomainT), ICL_INTERVAL(ICL_COMPARE) Interval = <API key>(<API key>, DomainT, Compare), ICL_ALLOC Alloc = std::allocator > class interval_base_map { public: //= Associated types typedef interval_base_map<SubType,DomainT,CodomainT, Traits,Compare,Combine,Section,Interval,Alloc> type; The designated \e derived or \e sub_type of this base class typedef SubType sub_type; Auxilliary type for overloadresolution typedef type overloadable_type; Traits of an itl map typedef Traits traits; //- Associated types: Related types The atomized type representing the corresponding container of elements typedef typename icl::map<DomainT,CodomainT, Traits,Compare,Combine,Section,Alloc> atomized_type; //- Associated types: Data Domain type (type of the keys) of the map typedef DomainT domain_type; typedef typename boost::call_traits<DomainT>::param_type domain_param; Domain type (type of the keys) of the map typedef CodomainT codomain_type; Auxiliary type to help the compiler resolve ambiguities when using std::make_pair typedef mapping_pair<domain_type,codomain_type> domain_mapping_type; Conceptual is a map a set of elements of type \c element_type typedef domain_mapping_type element_type; The interval type of the map typedef ICL_INTERVAL_TYPE(Interval,DomainT,Compare) interval_type; Auxiliary type for overload resolution typedef std::pair<interval_type,CodomainT> <API key>; Type of an interval containers segment, that is spanned by an interval typedef std::pair<interval_type,CodomainT> segment_type; //- Associated types: Size The difference type of an interval which is sometimes different form the domain_type typedef typename difference_type_of<domain_type>::type difference_type; The size type of an interval which is mostly std::size_t typedef typename size_type_of<domain_type>::type size_type; //- Associated types: Functors Comparison functor for domain values typedef ICL_COMPARE_DOMAIN(Compare,DomainT) domain_compare; typedef ICL_COMPARE_DOMAIN(Compare,segment_type) segment_compare; Combine functor for codomain value aggregation typedef <API key>(Combine,CodomainT) codomain_combine; Inverse Combine functor for codomain value aggregation typedef typename inverse<codomain_combine>::type <API key>; Intersection functor for codomain values typedef typename mpl::if_ <has_set_semantics<codomain_type> , <API key>(Section,CodomainT) , codomain_combine >::type codomain_intersect; Inverse Combine functor for codomain value intersection typedef typename inverse<codomain_intersect>::type <API key>; Comparison functor for intervals which are keys as well typedef exclusive_less_than<interval_type> interval_compare; Comparison functor for keys typedef exclusive_less_than<interval_type> key_compare; //- Associated types: Implementation and stl related The allocator type of the set typedef Alloc<std::pair<const interval_type, codomain_type> > allocator_type; Container type for the implementation typedef ICL_IMPL_SPACE::map<interval_type,codomain_type, key_compare,allocator_type> ImplMapT; key type of the implementing container typedef typename ImplMapT::key_type key_type; value type of the implementing container typedef typename ImplMapT::value_type value_type; data type of the implementing container typedef typename ImplMapT::value_type::second_type data_type; pointer type typedef typename ImplMapT::pointer pointer; const pointer type typedef typename ImplMapT::const_pointer const_pointer; reference type typedef typename ImplMapT::reference reference; const reference type typedef typename ImplMapT::const_reference const_reference; iterator for iteration over intervals typedef typename ImplMapT::iterator iterator; const_iterator for iteration over intervals typedef typename ImplMapT::const_iterator const_iterator; iterator for reverse iteration over intervals typedef typename ImplMapT::reverse_iterator reverse_iterator; const_iterator for iteration over intervals typedef typename ImplMapT::<API key> <API key>; element iterator: Depreciated, see documentation. typedef boost::icl::element_iterator<iterator> element_iterator; const element iterator: Depreciated, see documentation. typedef boost::icl::element_iterator<const_iterator> <API key>; element reverse iterator: Depreciated, see documentation. typedef boost::icl::element_iterator<reverse_iterator> <API key>; element const reverse iterator: Depreciated, see documentation. typedef boost::icl::element_iterator<<API key>> <API key>; typedef typename on_absorbtion<type, codomain_combine, Traits::absorbs_identities>::type <API key>; public: <API key>(bool, is_total_invertible = ( Traits::is_total && has_inverse<codomain_type>::value)); <API key>(int, fineness = 0); public: //= Construct, copy, destruct /** Default constructor for the empty object */ interval_base_map() { <API key>((<API key><DomainT>)); <API key>((<API key><DomainT>)); <API key>((<API key><CodomainT>)); <API key>((<API key><CodomainT>)); } /** Copy constructor */ interval_base_map(const interval_base_map& src): _map(src._map) { <API key>((<API key><DomainT>)); <API key>((<API key><DomainT>)); <API key>((<API key><CodomainT>)); <API key>((<API key><CodomainT>)); } /** Copy assignment operator */ interval_base_map& operator = (const interval_base_map& src) { this->_map = src._map; return *this; } # ifndef <API key> //= Move semantics /** Move constructor */ interval_base_map(interval_base_map&& src): _map(boost::move(src._map)) { <API key>((<API key><DomainT>)); <API key>((<API key><DomainT>)); <API key>((<API key><CodomainT>)); <API key>((<API key><CodomainT>)); } /** Move assignment operator */ interval_base_map& operator = (interval_base_map&& src) { this->_map = boost::move(src._map); return *this; } # endif // <API key> /** swap the content of containers */ void swap(interval_base_map& object) { _map.swap(object._map); } //= Containedness /** clear the map */ void clear() { icl::clear(*that()); } /** is the map empty? */ bool empty()const { return icl::is_empty(*that()); } //= Size /** An interval map's size is it's cardinality */ size_type size()const { return icl::cardinality(*that()); } /** Size of the iteration over this container */ std::size_t iterative_size()const { return _map.size(); } //= Selection /** Find the interval value pair, that contains \c key */ const_iterator find(const domain_type& key_value)const { return icl::find(*this, key_value); } /** Find the first interval value pair, that collides with interval \c key_interval */ const_iterator find(const interval_type& key_interval)const { return _map.find(key_interval); } /** Total select function. */ codomain_type operator()(const domain_type& key_value)const { const_iterator it_ = icl::find(*this, key_value); return it_==end() ? identity_element<codomain_type>::value() : (*it_).second; } //= Addition /** Addition of a key value pair to the map */ SubType& add(const element_type& key_value_pair) { return icl::add(*that(), key_value_pair); } /** Addition of an interval value pair to the map. */ SubType& add(const segment_type& interval_value_pair) { this->template _add<codomain_combine>(interval_value_pair); return *that(); } /** Addition of an interval value pair \c interval_value_pair to the map. Iterator \c prior_ is a hint to the position \c interval_value_pair can be inserted after. */ iterator add(iterator prior_, const segment_type& interval_value_pair) { return this->template _add<codomain_combine>(prior_, interval_value_pair); } //= Subtraction /** Subtraction of a key value pair from the map */ SubType& subtract(const element_type& key_value_pair) { return icl::subtract(*that(), key_value_pair); } /** Subtraction of an interval value pair from the map. */ SubType& subtract(const segment_type& interval_value_pair) { on_invertible<type, is_total_invertible> ::subtract(*that(), interval_value_pair); return *that(); } //= Insertion /** Insertion of a \c key_value_pair into the map. */ SubType& insert(const element_type& key_value_pair) { return icl::insert(*that(), key_value_pair); } /** Insertion of an \c interval_value_pair into the map. */ SubType& insert(const segment_type& interval_value_pair) { _insert(interval_value_pair); return *that(); } /** Insertion of an \c interval_value_pair into the map. Iterator \c prior_. serves as a hint to insert after the element \c prior point to. */ iterator insert(iterator prior, const segment_type& interval_value_pair) { return _insert(prior, interval_value_pair); } /** With <tt>key_value_pair = (k,v)</tt> set value \c v for key \c k */ SubType& set(const element_type& key_value_pair) { return icl::set_at(*that(), key_value_pair); } /** With <tt>interval_value_pair = (I,v)</tt> set value \c v for all keys in interval \c I in the map. */ SubType& set(const segment_type& interval_value_pair) { return icl::set_at(*that(), interval_value_pair); } //= Erasure /** Erase a \c key_value_pair from the map. */ SubType& erase(const element_type& key_value_pair) { icl::erase(*that(), key_value_pair); return *that(); } /** Erase an \c interval_value_pair from the map. */ SubType& erase(const segment_type& interval_value_pair); /** Erase a key value pair for \c key. */ SubType& erase(const domain_type& key) { return icl::erase(*that(), key); } /** Erase all value pairs within the range of the interval <tt>inter_val</tt> from the map. */ SubType& erase(const interval_type& inter_val); /** Erase all value pairs within the range of the interval that iterator \c position points to. */ void erase(iterator position){ this->_map.erase(position); } /** Erase all value pairs for a range of iterators <tt>[first,past)</tt>. */ void erase(iterator first, iterator past){ this->_map.erase(first, past); } //= Intersection /** The intersection of \c interval_value_pair and \c *this map is added to \c section. */ void add_intersection(SubType& section, const segment_type& interval_value_pair)const { on_definedness<SubType, Traits::is_total> ::add_intersection(section, *that(), interval_value_pair); } //= Symmetric difference /** If \c *this map contains \c key_value_pair it is erased, otherwise it is added. */ SubType& flip(const element_type& key_value_pair) { return icl::flip(*that(), key_value_pair); } /** If \c *this map contains \c interval_value_pair it is erased, otherwise it is added. */ SubType& flip(const segment_type& interval_value_pair) { on_total_absorbable<SubType, Traits::is_total, Traits::absorbs_identities> ::flip(*that(), interval_value_pair); return *that(); } //= Iterator related iterator lower_bound(const key_type& interval) { return _map.lower_bound(interval); } iterator upper_bound(const key_type& interval) { return _map.upper_bound(interval); } const_iterator lower_bound(const key_type& interval)const { return _map.lower_bound(interval); } const_iterator upper_bound(const key_type& interval)const { return _map.upper_bound(interval); } std::pair<iterator,iterator> equal_range(const key_type& interval) { return std::pair<iterator,iterator> (lower_bound(interval), upper_bound(interval)); } std::pair<const_iterator,const_iterator> equal_range(const key_type& interval)const { return std::pair<const_iterator,const_iterator> (lower_bound(interval), upper_bound(interval)); } iterator begin() { return _map.begin(); } iterator end() { return _map.end(); } const_iterator begin()const { return _map.begin(); } const_iterator end()const { return _map.end(); } reverse_iterator rbegin() { return _map.rbegin(); } reverse_iterator rend() { return _map.rend(); } <API key> rbegin()const { return _map.rbegin(); } <API key> rend()const { return _map.rend(); } private: template<class Combiner> iterator _add(const segment_type& interval_value_pair); template<class Combiner> iterator _add(iterator prior_, const segment_type& interval_value_pair); template<class Combiner> void _subtract(const segment_type& interval_value_pair); iterator _insert(const segment_type& interval_value_pair); iterator _insert(iterator prior_, const segment_type& interval_value_pair); private: template<class Combiner> void add_segment(const interval_type& inter_val, const CodomainT& co_val, iterator& it_); template<class Combiner> void add_main(interval_type& inter_val, const CodomainT& co_val, iterator& it_, const iterator& last_); template<class Combiner> void add_rear(const interval_type& inter_val, const CodomainT& co_val, iterator& it_); void add_front(const interval_type& inter_val, iterator& first_); private: void subtract_front(const interval_type& inter_val, iterator& first_); template<class Combiner> void subtract_main(const CodomainT& co_val, iterator& it_, const iterator& last_); template<class Combiner> void subtract_rear(interval_type& inter_val, const CodomainT& co_val, iterator& it_); private: void insert_main(const interval_type&, const CodomainT&, iterator&, const iterator&); void erase_rest ( interval_type&, const CodomainT&, iterator&, const iterator&); template<class FragmentT> void <API key>(SubType& section, const FragmentT& fragment)const { section += *that(); section.add(fragment); } void <API key>(SubType& section, const segment_type& operand)const { interval_type inter_val = operand.first; if(icl::is_empty(inter_val)) return; std::pair<const_iterator, const_iterator> exterior = equal_range(inter_val); if(exterior.first == exterior.second) return; for(const_iterator it_=exterior.first; it_ != exterior.second; it_++) { interval_type common_interval = (*it_).first & inter_val; if(!icl::is_empty(common_interval)) { section.template _add<codomain_combine> (value_type(common_interval, (*it_).second) ); section.template _add<codomain_intersect>(value_type(common_interval, operand.second)); } } } void <API key>(SubType& section, const element_type& operand)const { <API key>(section, make_segment<type>(operand)); } protected: template <class Combiner> iterator gap_insert(iterator prior_, const interval_type& inter_val, const codomain_type& co_val ) { // inter_val is not conained in this map. Insertion will be successful BOOST_ASSERT(this->_map.find(inter_val) == this->_map.end()); BOOST_ASSERT((!on_absorbtion<type,Combiner,Traits::absorbs_identities>::is_absorbable(co_val))); return this->_map.insert(prior_, value_type(inter_val, version<Combiner>()(co_val))); } template <class Combiner> std::pair<iterator, bool> add_at(const iterator& prior_, const interval_type& inter_val, const codomain_type& co_val ) { // Never try to insert an identity element into an identity element absorber here: BOOST_ASSERT((!(on_absorbtion<type,Combiner,Traits::absorbs_identities>::is_absorbable(co_val)))); iterator inserted_ = this->_map.insert(prior_, value_type(inter_val, Combiner::identity_element())); if((*inserted_).first == inter_val && (*inserted_).second == Combiner::identity_element()) { Combiner()((*inserted_).second, co_val); return std::pair<iterator,bool>(inserted_, true); } else return std::pair<iterator,bool>(inserted_, false); } std::pair<iterator, bool> insert_at(const iterator& prior_, const interval_type& inter_val, const codomain_type& co_val ) { iterator inserted_ = this->_map.insert(prior_, value_type(inter_val, co_val)); if(inserted_ == prior_) return std::pair<iterator,bool>(inserted_, false); else if((*inserted_).first == inter_val) return std::pair<iterator,bool>(inserted_, true); else return std::pair<iterator,bool>(inserted_, false); } protected: sub_type* that() { return static_cast<sub_type*>(this); } const sub_type* that()const { return static_cast<const sub_type*>(this); } protected: ImplMapT _map; private: template<class Type, bool is_total_invertible> struct on_invertible; template<class Type> struct on_invertible<Type, true> { typedef typename Type::segment_type segment_type; typedef typename Type::<API key> <API key>; static void subtract(Type& object, const segment_type& operand) { object.template _add<<API key>>(operand); } }; template<class Type> struct on_invertible<Type, false> { typedef typename Type::segment_type segment_type; typedef typename Type::<API key> <API key>; static void subtract(Type& object, const segment_type& operand) { object.template _subtract<<API key>>(operand); } }; friend struct on_invertible<type, true>; friend struct on_invertible<type, false>; template<class Type, bool is_total> struct on_definedness; template<class Type> struct on_definedness<Type, true> { static void add_intersection(Type& section, const Type& object, const segment_type& operand) { object.<API key>(section, operand); } }; template<class Type> struct on_definedness<Type, false> { static void add_intersection(Type& section, const Type& object, const segment_type& operand) { object.<API key>(section, operand); } }; friend struct on_definedness<type, true>; friend struct on_definedness<type, false>; template<class Type, bool has_set_semantics> struct on_codomain_model; template<class Type> struct on_codomain_model<Type, true> { typedef typename Type::interval_type interval_type; typedef typename Type::codomain_type codomain_type; typedef typename Type::segment_type segment_type; typedef typename Type::codomain_combine codomain_combine; typedef typename Type::<API key> <API key>; static void add(Type& intersection, interval_type& common_interval, const codomain_type& flip_value, const codomain_type& co_value) { codomain_type common_value = flip_value; <API key>()(common_value, co_value); intersection.template _add<codomain_combine>(segment_type(common_interval, common_value)); } }; template<class Type> struct on_codomain_model<Type, false> { typedef typename Type::interval_type interval_type; typedef typename Type::codomain_type codomain_type; typedef typename Type::segment_type segment_type; typedef typename Type::codomain_combine codomain_combine; static void add(Type& intersection, interval_type& common_interval, const codomain_type&, const codomain_type&) { intersection.template _add<codomain_combine>(segment_type(common_interval, identity_element<codomain_type>::value())); } }; friend struct on_codomain_model<type, true>; friend struct on_codomain_model<type, false>; template<class Type, bool is_total, bool absorbs_identities> struct on_total_absorbable; template<class Type> struct on_total_absorbable<Type, true, true> { static void flip(Type& object, const typename Type::segment_type&) { icl::clear(object); } }; #ifdef BOOST_MSVC #pragma warning(push) #pragma warning(disable:4127) // conditional expression is constant #endif template<class Type> struct on_total_absorbable<Type, true, false> { typedef typename Type::segment_type segment_type; typedef typename Type::codomain_type codomain_type; static void flip(Type& object, const segment_type& operand) { object += operand; ICL_FORALL(typename Type, it_, object) (*it_).second = identity_element<codomain_type>::value(); if(mpl::not_<<API key><Type> >::value) icl::join(object); } }; #ifdef BOOST_MSVC #pragma warning(pop) #endif template<class Type, bool absorbs_identities> struct on_total_absorbable<Type, false, absorbs_identities> { typedef typename Type::segment_type segment_type; typedef typename Type::codomain_type codomain_type; typedef typename Type::interval_type interval_type; typedef typename Type::value_type value_type; typedef typename Type::const_iterator const_iterator; typedef typename Type::set_type set_type; typedef typename Type::<API key> <API key>; static void flip(Type& object, const segment_type& interval_value_pair) { // That which is common shall be subtracted // That which is not shall be added // So interval_value_pair has to be 'complementary added' or flipped interval_type span = interval_value_pair.first; std::pair<const_iterator, const_iterator> exterior = object.equal_range(span); const_iterator first_ = exterior.first; const_iterator end_ = exterior.second; interval_type covered, left_over, common_interval; const codomain_type& x_value = interval_value_pair.second; const_iterator it_ = first_; set_type eraser; Type intersection; while(it_ != end_ ) { const codomain_type& co_value = (*it_).second; covered = (*it_++).first; //[a ... : span // [b ... : covered //[a b) : left_over left_over = right_subtract(span, covered); //That which is common ... common_interval = span & covered; if(!icl::is_empty(common_interval)) { // ... shall be subtracted icl::add(eraser, common_interval); on_codomain_model<Type, has_set_semantics<codomain_type>::value> ::add(intersection, common_interval, x_value, co_value); } icl::add(object, value_type(left_over, x_value)); //That which is not shall be added // Because this is a collision free addition I don't have to distinguish codomain_types. //... d) : span //... c) : covered // [c d) : span' span = left_subtract(span, covered); } //If span is not empty here, it is not in the set so it shall be added icl::add(object, value_type(span, x_value)); //finally rewrite the common segments icl::erase(object, eraser); object += intersection; } }; } ; //= Addition detail template <class SubType, class DomainT, class CodomainT, class Traits, ICL_COMPARE Compare, ICL_COMBINE Combine, ICL_SECTION Section, ICL_INTERVAL(ICL_COMPARE) Interval, ICL_ALLOC Alloc> inline void interval_base_map<SubType,DomainT,CodomainT,Traits,Compare,Combine,Section,Interval,Alloc> ::add_front(const interval_type& inter_val, iterator& first_) { // If the collision sequence has a left residual 'left_resid' it will // be split, to provide a standardized start of algorithms: // The addend interval 'inter_val' covers the beginning of the collision sequence. // only for the first there can be a left_resid: a part of *first_ left of inter_val interval_type left_resid = right_subtract((*first_).first, inter_val); if(!icl::is_empty(left_resid)) { iterator prior_ = cyclic_prior(*this, first_); const_cast<interval_type&>((*first_).first) = left_subtract((*first_).first, left_resid); //NOTE: Only splitting this->_map.insert(prior_, segment_type(left_resid, (*first_).second)); } //POST: // ...[-- first_ --... } template <class SubType, class DomainT, class CodomainT, class Traits, ICL_COMPARE Compare, ICL_COMBINE Combine, ICL_SECTION Section, ICL_INTERVAL(ICL_COMPARE) Interval, ICL_ALLOC Alloc> template<class Combiner> inline void interval_base_map<SubType,DomainT,CodomainT,Traits,Compare,Combine,Section,Interval,Alloc> ::add_segment(const interval_type& inter_val, const CodomainT& co_val, iterator& it_) { interval_type lead_gap = right_subtract(inter_val, (*it_).first); if(!icl::is_empty(lead_gap)) { iterator prior_ = prior(it_); iterator inserted_ = this->template gap_insert<Combiner>(prior_, lead_gap, co_val); that()->handle_inserted(prior_, inserted_); } // [-- it_ --) has a common part with the first overval Combiner()((*it_).second, co_val); that()->template <API key><Combiner>(it_++); } template <class SubType, class DomainT, class CodomainT, class Traits, ICL_COMPARE Compare, ICL_COMBINE Combine, ICL_SECTION Section, ICL_INTERVAL(ICL_COMPARE) Interval, ICL_ALLOC Alloc> template<class Combiner> inline void interval_base_map<SubType,DomainT,CodomainT,Traits,Compare,Combine,Section,Interval,Alloc> ::add_main(interval_type& inter_val, const CodomainT& co_val, iterator& it_, const iterator& last_) { interval_type cur_interval; while(it_!=last_) { cur_interval = (*it_).first ; add_segment<Combiner>(inter_val, co_val, it_); // shrink interval inter_val = left_subtract(inter_val, cur_interval); } } template <class SubType, class DomainT, class CodomainT, class Traits, ICL_COMPARE Compare, ICL_COMBINE Combine, ICL_SECTION Section, ICL_INTERVAL(ICL_COMPARE) Interval, ICL_ALLOC Alloc> template<class Combiner> inline void interval_base_map<SubType,DomainT,CodomainT,Traits,Compare,Combine,Section,Interval,Alloc> ::add_rear(const interval_type& inter_val, const CodomainT& co_val, iterator& it_) { iterator prior_ = cyclic_prior(*that(), it_); interval_type cur_itv = (*it_).first ; interval_type lead_gap = right_subtract(inter_val, cur_itv); if(!icl::is_empty(lead_gap)) { // [prior) [-- it_ ... iterator inserted_ = this->template gap_insert<Combiner>(prior_, lead_gap, co_val); that()->handle_inserted(prior_, inserted_); } interval_type end_gap = left_subtract(inter_val, cur_itv); if(!icl::is_empty(end_gap)) { Combiner()((*it_).second, co_val); that()->template gap_insert_at<Combiner>(it_, prior_, end_gap, co_val); } else { // only for the last there can be a right_resid: a part of *it_ right of x interval_type right_resid = left_subtract(cur_itv, inter_val); if(icl::is_empty(right_resid)) { Combiner()((*it_).second, co_val); that()->template <API key><Combiner>(prior_, it_); } else { // [-- it_ --right_resid) const_cast<interval_type&>((*it_).first) = right_subtract((*it_).first, right_resid); //NOTE: This is NOT an insertion that has to take care for correct application of // the Combiner functor. It only reestablished that state after splitting the // 'it_' interval value pair. Using _map_insert<Combiner> does not work here. iterator insertion_ = this->_map.insert(it_, value_type(right_resid, (*it_).second)); that()->handle_reinserted(insertion_); Combiner()((*it_).second, co_val); that()->template <API key><Combiner>(insertion_, it_); } } } //= Addition template <class SubType, class DomainT, class CodomainT, class Traits, ICL_COMPARE Compare, ICL_COMBINE Combine, ICL_SECTION Section, ICL_INTERVAL(ICL_COMPARE) Interval, ICL_ALLOC Alloc> template<class Combiner> inline typename interval_base_map<SubType,DomainT,CodomainT,Traits,Compare,Combine,Section,Interval,Alloc>::iterator interval_base_map<SubType,DomainT,CodomainT,Traits,Compare,Combine,Section,Interval,Alloc> ::_add(const segment_type& addend) { typedef typename on_absorbtion<type,Combiner, absorbs_identities<type>::value>::type on_absorbtion_; const interval_type& inter_val = addend.first; if(icl::is_empty(inter_val)) return this->_map.end(); const codomain_type& co_val = addend.second; if(on_absorbtion_::is_absorbable(co_val)) return this->_map.end(); std::pair<iterator,bool> insertion = this->_map.insert(value_type(inter_val, version<Combiner>()(co_val))); if(insertion.second) return that()->handle_inserted(insertion.first); else { // Detect the first and the end iterator of the collision sequence iterator first_ = this->_map.lower_bound(inter_val), last_ = insertion.first; //assert(end_ == this->_map.upper_bound(inter_val)); iterator it_ = first_; interval_type rest_interval = inter_val; add_front (rest_interval, it_ ); add_main<Combiner>(rest_interval, co_val, it_, last_); add_rear<Combiner>(rest_interval, co_val, it_ ); return it_; } } template <class SubType, class DomainT, class CodomainT, class Traits, ICL_COMPARE Compare, ICL_COMBINE Combine, ICL_SECTION Section, ICL_INTERVAL(ICL_COMPARE) Interval, ICL_ALLOC Alloc> template<class Combiner> inline typename interval_base_map<SubType,DomainT,CodomainT,Traits,Compare,Combine,Section,Interval,Alloc>::iterator interval_base_map<SubType,DomainT,CodomainT,Traits,Compare,Combine,Section,Interval,Alloc> ::_add(iterator prior_, const segment_type& addend) { typedef typename on_absorbtion<type,Combiner, absorbs_identities<type>::value>::type on_absorbtion_; const interval_type& inter_val = addend.first; if(icl::is_empty(inter_val)) return prior_; const codomain_type& co_val = addend.second; if(on_absorbtion_::is_absorbable(co_val)) return prior_; std::pair<iterator,bool> insertion = add_at<Combiner>(prior_, inter_val, co_val); if(insertion.second) return that()->handle_inserted(insertion.first); else { // Detect the first and the end iterator of the collision sequence std::pair<iterator,iterator> overlap = equal_range(inter_val); iterator it_ = overlap.first, last_ = prior(overlap.second); interval_type rest_interval = inter_val; add_front (rest_interval, it_ ); add_main<Combiner>(rest_interval, co_val, it_, last_); add_rear<Combiner>(rest_interval, co_val, it_ ); return it_; } } //= Subtraction detail template <class SubType, class DomainT, class CodomainT, class Traits, ICL_COMPARE Compare, ICL_COMBINE Combine, ICL_SECTION Section, ICL_INTERVAL(ICL_COMPARE) Interval, ICL_ALLOC Alloc> inline void interval_base_map<SubType,DomainT,CodomainT,Traits,Compare,Combine,Section,Interval,Alloc> ::subtract_front(const interval_type& inter_val, iterator& it_) { interval_type left_resid = right_subtract((*it_).first, inter_val); if(!icl::is_empty(left_resid)) { iterator prior_ = cyclic_prior(*this, it_); const_cast<interval_type&>((*it_).first) = left_subtract((*it_).first, left_resid); this->_map.insert(prior_, value_type(left_resid, (*it_).second)); // The segemnt *it_ is split at inter_val.first(), so as an invariant // segment *it_ is always "under" inter_val and a left_resid is empty. } } template <class SubType, class DomainT, class CodomainT, class Traits, ICL_COMPARE Compare, ICL_COMBINE Combine, ICL_SECTION Section, ICL_INTERVAL(ICL_COMPARE) Interval, ICL_ALLOC Alloc> template<class Combiner> inline void interval_base_map<SubType,DomainT,CodomainT,Traits,Compare,Combine,Section,Interval,Alloc> ::subtract_main(const CodomainT& co_val, iterator& it_, const iterator& last_) { while(it_ != last_) { Combiner()((*it_).second, co_val); that()->template <API key><Combiner>(it_++); } } template <class SubType, class DomainT, class CodomainT, class Traits, ICL_COMPARE Compare, ICL_COMBINE Combine, ICL_SECTION Section, ICL_INTERVAL(ICL_COMPARE) Interval, ICL_ALLOC Alloc> template<class Combiner> inline void interval_base_map<SubType,DomainT,CodomainT,Traits,Compare,Combine,Section,Interval,Alloc> ::subtract_rear(interval_type& inter_val, const CodomainT& co_val, iterator& it_) { interval_type right_resid = left_subtract((*it_).first, inter_val); if(icl::is_empty(right_resid)) { Combiner()((*it_).second, co_val); that()->template handle_combined<Combiner>(it_); } else { const_cast<interval_type&>((*it_).first) = right_subtract((*it_).first, right_resid); iterator next_ = this->_map.insert(it_, value_type(right_resid, (*it_).second)); Combiner()((*it_).second, co_val); that()->template <API key><Combiner>(it_, next_); } } //= Subtraction template <class SubType, class DomainT, class CodomainT, class Traits, ICL_COMPARE Compare, ICL_COMBINE Combine, ICL_SECTION Section, ICL_INTERVAL(ICL_COMPARE) Interval, ICL_ALLOC Alloc> template<class Combiner> inline void interval_base_map<SubType,DomainT,CodomainT,Traits,Compare,Combine,Section,Interval,Alloc> ::_subtract(const segment_type& minuend) { interval_type inter_val = minuend.first; if(icl::is_empty(inter_val)) return; const codomain_type& co_val = minuend.second; if(on_absorbtion<type,Combiner,Traits::absorbs_identities>::is_absorbable(co_val)) return; std::pair<iterator, iterator> exterior = equal_range(inter_val); if(exterior.first == exterior.second) return; iterator last_ = prior(exterior.second); iterator it_ = exterior.first; subtract_front (inter_val, it_ ); subtract_main <Combiner>( co_val, it_, last_); subtract_rear <Combiner>(inter_val, co_val, it_ ); } //= Insertion template <class SubType, class DomainT, class CodomainT, class Traits, ICL_COMPARE Compare, ICL_COMBINE Combine, ICL_SECTION Section, ICL_INTERVAL(ICL_COMPARE) Interval, ICL_ALLOC Alloc> inline void interval_base_map<SubType,DomainT,CodomainT,Traits,Compare,Combine,Section,Interval,Alloc> ::insert_main(const interval_type& inter_val, const CodomainT& co_val, iterator& it_, const iterator& last_) { iterator end_ = boost::next(last_); iterator prior_ = it_, inserted_; if(prior_ != this->_map.end()) --prior_; interval_type rest_interval = inter_val, left_gap, cur_itv; interval_type last_interval = last_ ->first; while(it_ != end_ ) { cur_itv = (*it_).first ; left_gap = right_subtract(rest_interval, cur_itv); if(!icl::is_empty(left_gap)) { inserted_ = this->_map.insert(prior_, value_type(left_gap, co_val)); it_ = that()->handle_inserted(inserted_); } // shrink interval rest_interval = left_subtract(rest_interval, cur_itv); prior_ = it_; ++it_; } //insert_rear(rest_interval, co_val, last_): interval_type end_gap = left_subtract(rest_interval, last_interval); if(!icl::is_empty(end_gap)) { inserted_ = this->_map.insert(prior_, value_type(end_gap, co_val)); it_ = that()->handle_inserted(inserted_); } else it_ = prior_; } template <class SubType, class DomainT, class CodomainT, class Traits, ICL_COMPARE Compare, ICL_COMBINE Combine, ICL_SECTION Section, ICL_INTERVAL(ICL_COMPARE) Interval, ICL_ALLOC Alloc> inline typename interval_base_map<SubType,DomainT,CodomainT,Traits,Compare,Combine,Section,Interval,Alloc>::iterator interval_base_map<SubType,DomainT,CodomainT,Traits,Compare,Combine,Section,Interval,Alloc> ::_insert(const segment_type& addend) { interval_type inter_val = addend.first; if(icl::is_empty(inter_val)) return this->_map.end(); const codomain_type& co_val = addend.second; if(<API key>::is_absorbable(co_val)) return this->_map.end(); std::pair<iterator,bool> insertion = this->_map.insert(addend); if(insertion.second) return that()->handle_inserted(insertion.first); else { // Detect the first and the end iterator of the collision sequence iterator first_ = this->_map.lower_bound(inter_val), last_ = insertion.first; //assert((++last_) == this->_map.upper_bound(inter_val)); iterator it_ = first_; insert_main(inter_val, co_val, it_, last_); return it_; } } template <class SubType, class DomainT, class CodomainT, class Traits, ICL_COMPARE Compare, ICL_COMBINE Combine, ICL_SECTION Section, ICL_INTERVAL(ICL_COMPARE) Interval, ICL_ALLOC Alloc> inline typename interval_base_map<SubType,DomainT,CodomainT,Traits,Compare,Combine,Section,Interval,Alloc>::iterator interval_base_map<SubType,DomainT,CodomainT,Traits,Compare,Combine,Section,Interval,Alloc> ::_insert(iterator prior_, const segment_type& addend) { interval_type inter_val = addend.first; if(icl::is_empty(inter_val)) return prior_; const codomain_type& co_val = addend.second; if(<API key>::is_absorbable(co_val)) return prior_; std::pair<iterator,bool> insertion = insert_at(prior_, inter_val, co_val); if(insertion.second) return that()->handle_inserted(insertion.first); { // Detect the first and the end iterator of the collision sequence std::pair<iterator,iterator> overlap = equal_range(inter_val); iterator it_ = overlap.first, last_ = prior(overlap.second); insert_main(inter_val, co_val, it_, last_); return it_; } } //= Erasure segment_type template <class SubType, class DomainT, class CodomainT, class Traits, ICL_COMPARE Compare, ICL_COMBINE Combine, ICL_SECTION Section, ICL_INTERVAL(ICL_COMPARE) Interval, ICL_ALLOC Alloc> inline void interval_base_map<SubType,DomainT,CodomainT,Traits,Compare,Combine,Section,Interval,Alloc> ::erase_rest(interval_type& inter_val, const CodomainT& co_val, iterator& it_, const iterator& last_) { // For all intervals within loop: (*it_).first are contained_in inter_val while(it_ != last_) if((*it_).second == co_val) this->_map.erase(it_++); else it_++; //erase_rear: if((*it_).second == co_val) { interval_type right_resid = left_subtract((*it_).first, inter_val); if(icl::is_empty(right_resid)) this->_map.erase(it_); else const_cast<interval_type&>((*it_).first) = right_resid; } } template <class SubType, class DomainT, class CodomainT, class Traits, ICL_COMPARE Compare, ICL_COMBINE Combine, ICL_SECTION Section, ICL_INTERVAL(ICL_COMPARE) Interval, ICL_ALLOC Alloc> inline SubType& interval_base_map<SubType,DomainT,CodomainT,Traits,Compare,Combine,Section,Interval,Alloc> ::erase(const segment_type& minuend) { interval_type inter_val = minuend.first; if(icl::is_empty(inter_val)) return *that(); const codomain_type& co_val = minuend.second; if(<API key>::is_absorbable(co_val)) return *that(); std::pair<iterator,iterator> exterior = equal_range(inter_val); if(exterior.first == exterior.second) return *that(); iterator first_ = exterior.first, end_ = exterior.second, last_ = cyclic_prior(*this, end_); iterator second_= first_; ++second_; if(first_ == last_) { // .....first_==last_..... // only for the last there can be a right_resid: a part of *it_ right of minuend interval_type right_resid = left_subtract((*first_).first, inter_val); if((*first_).second == co_val) { interval_type left_resid = right_subtract((*first_).first, inter_val); if(!icl::is_empty(left_resid)) { // [left_resid)..first_==last_...... const_cast<interval_type&>((*first_).first) = left_resid; if(!icl::is_empty(right_resid)) this->_map.insert(first_, value_type(right_resid, co_val)); } else if(!icl::is_empty(right_resid)) const_cast<interval_type&>((*first_).first) = right_resid; else this->_map.erase(first_); } } else { // first AND NOT last if((*first_).second == co_val) { interval_type left_resid = right_subtract((*first_).first, inter_val); if(icl::is_empty(left_resid)) this->_map.erase(first_); else const_cast<interval_type&>((*first_).first) = left_resid; } erase_rest(inter_val, co_val, second_, last_); } return *that(); } //= Erasure key_type template <class SubType, class DomainT, class CodomainT, class Traits, ICL_COMPARE Compare, ICL_COMBINE Combine, ICL_SECTION Section, ICL_INTERVAL(ICL_COMPARE) Interval, ICL_ALLOC Alloc> inline SubType& interval_base_map<SubType,DomainT,CodomainT,Traits,Compare,Combine,Section,Interval,Alloc> ::erase(const interval_type& minuend) { if(icl::is_empty(minuend)) return *that(); std::pair<iterator, iterator> exterior = equal_range(minuend); if(exterior.first == exterior.second) return *that(); iterator first_ = exterior.first, end_ = exterior.second, last_ = prior(end_); interval_type left_resid = right_subtract((*first_).first, minuend); interval_type right_resid = left_subtract(last_ ->first, minuend); if(first_ == last_ ) if(!icl::is_empty(left_resid)) { const_cast<interval_type&>((*first_).first) = left_resid; if(!icl::is_empty(right_resid)) this->_map.insert(first_, value_type(right_resid, (*first_).second)); } else if(!icl::is_empty(right_resid)) const_cast<interval_type&>((*first_).first) = left_subtract((*first_).first, minuend); else this->_map.erase(first_); else { // [left_resid fst) . . . . [lst right_resid) iterator second_= first_; ++second_; iterator start_ = icl::is_empty(left_resid)? first_: second_; iterator stop_ = icl::is_empty(right_resid)? end_ : last_ ; this->_map.erase(start_, stop_); //erase [start_, stop_) if(!icl::is_empty(left_resid)) const_cast<interval_type&>((*first_).first) = left_resid; if(!icl::is_empty(right_resid)) const_cast<interval_type&>(last_ ->first) = right_resid; } return *that(); } // type traits template < class SubType, class DomainT, class CodomainT, class Traits, ICL_COMPARE Compare, ICL_COMBINE Combine, ICL_SECTION Section, ICL_INTERVAL(ICL_COMPARE) Interval, ICL_ALLOC Alloc > struct is_map<icl::interval_base_map<SubType,DomainT,CodomainT,Traits,Compare,Combine,Section,Interval,Alloc> > { typedef is_map<icl::interval_base_map<SubType,DomainT,CodomainT,Traits,Compare,Combine,Section,Interval,Alloc> > type; <API key>(bool, value = true); }; template < class SubType, class DomainT, class CodomainT, class Traits, ICL_COMPARE Compare, ICL_COMBINE Combine, ICL_SECTION Section, ICL_INTERVAL(ICL_COMPARE) Interval, ICL_ALLOC Alloc > struct has_inverse<icl::interval_base_map<SubType,DomainT,CodomainT,Traits,Compare,Combine,Section,Interval,Alloc> > { typedef has_inverse<icl::interval_base_map<SubType,DomainT,CodomainT,Traits,Compare,Combine,Section,Interval,Alloc> > type; <API key>(bool, value = (has_inverse<CodomainT>::value)); }; template < class SubType, class DomainT, class CodomainT, class Traits, ICL_COMPARE Compare, ICL_COMBINE Combine, ICL_SECTION Section, ICL_INTERVAL(ICL_COMPARE) Interval, ICL_ALLOC Alloc > struct <API key><icl::interval_base_map<SubType,DomainT,CodomainT,Traits,Compare,Combine,Section,Interval,Alloc> > { typedef <API key><icl::interval_base_map<SubType,DomainT,CodomainT,Traits,Compare,Combine,Section,Interval,Alloc> > type; <API key>(bool, value = true); }; template < class SubType, class DomainT, class CodomainT, class Traits, ICL_COMPARE Compare, ICL_COMBINE Combine, ICL_SECTION Section, ICL_INTERVAL(ICL_COMPARE) Interval, ICL_ALLOC Alloc > struct absorbs_identities<icl::interval_base_map<SubType,DomainT,CodomainT,Traits,Compare,Combine,Section,Interval,Alloc> > { typedef absorbs_identities<icl::interval_base_map<SubType,DomainT,CodomainT,Traits,Compare,Combine,Section,Interval,Alloc> > type; <API key>(bool, value = (Traits::absorbs_identities)); }; template < class SubType, class DomainT, class CodomainT, class Traits, ICL_COMPARE Compare, ICL_COMBINE Combine, ICL_SECTION Section, ICL_INTERVAL(ICL_COMPARE) Interval, ICL_ALLOC Alloc > struct is_total<icl::interval_base_map<SubType,DomainT,CodomainT,Traits,Compare,Combine,Section,Interval,Alloc> > { typedef is_total<icl::interval_base_map<SubType,DomainT,CodomainT,Traits,Compare,Combine,Section,Interval,Alloc> > type; <API key>(bool, value = (Traits::is_total)); }; }} // namespace icl boost #endif
package com.parse; import java.util.ArrayList; import java.util.Collection; import java.util.HashSet; import java.util.Iterator; import java.util.List; import org.json.JSONArray; import org.json.JSONException; import org.json.JSONObject; /** * An operation that removes every instance of an element from an array field. */ /** package */ class <API key> implements ParseFieldOperation { protected final HashSet<Object> objects = new HashSet<>(); public <API key>(Collection<?> coll) { objects.addAll(coll); } @Override public JSONObject encode(ParseEncoder objectEncoder) throws JSONException { JSONObject output = new JSONObject(); output.put("__op", "Remove"); output.put("objects", objectEncoder.encode(new ArrayList<>(objects))); return output; } @Override public ParseFieldOperation mergeWithPrevious(ParseFieldOperation previous) { if (previous == null) { return this; } else if (previous instanceof <API key>) { return new ParseSetOperation(objects); } else if (previous instanceof ParseSetOperation) { Object value = ((ParseSetOperation) previous).getValue(); if (value instanceof JSONArray || value instanceof List) { return new ParseSetOperation(this.apply(value, null)); } else { throw new <API key>("You can only add an item to a List or JSONArray."); } } else if (previous instanceof <API key>) { HashSet<Object> result = new HashSet<>(((<API key>) previous).objects); result.addAll(objects); return new <API key>(result); } else { throw new <API key>("Operation is invalid after previous operation."); } } @Override public Object apply(Object oldValue, String key) { if (oldValue == null) { return new ArrayList<>(); } else if (oldValue instanceof JSONArray) { ArrayList<Object> old = <API key>.<API key>((JSONArray) oldValue); @SuppressWarnings("unchecked") ArrayList<Object> newValue = (ArrayList<Object>) this.apply(old, key); return new JSONArray(newValue); } else if (oldValue instanceof List) { ArrayList<Object> result = new ArrayList<>((List<?>) oldValue); result.removeAll(objects); // Remove the removed objects from "objects" -- the items remaining // should be ones that weren't removed by object equality. ArrayList<Object> objectsToBeRemoved = new ArrayList<>(objects); objectsToBeRemoved.removeAll(result); // Build up set of object IDs for any ParseObjects in the remaining <API key> HashSet<String> objectIds = new HashSet<>(); for (Object obj : objectsToBeRemoved) { if (obj instanceof ParseObject) { objectIds.add(((ParseObject) obj).getObjectId()); } } // And iterate over "result" to see if any other ParseObjects need to be removed Iterator<Object> resultIterator = result.iterator(); while (resultIterator.hasNext()) { Object obj = resultIterator.next(); if (obj instanceof ParseObject && objectIds.contains(((ParseObject) obj).getObjectId())) { resultIterator.remove(); } } return result; } else { throw new <API key>("Operation is invalid after previous operation."); } } }
<?php namespace Symfony\Component\DependencyInjection\Loader; use Symfony\Component\Config\Resource\FileResource; /** * PhpFileLoader loads service definitions from a PHP file. * * The PHP file is required and the $container variable can be * used within the file to change the container. * * @author Fabien Potencier <fabien@symfony.com> */ class PhpFileLoader extends FileLoader { /** * Loads a PHP file. * * @param mixed $file The resource * @param string $type The resource type */ public function load($file, $type = null) { // the container and loader variables are exposed to the included file below $container = $this->container; $loader = $this; $path = $this->locator->locate($file); $this->setCurrentDir(dirname($path)); $this->container->addResource(new FileResource($path)); include $path; } /** * Returns true if this class supports the given resource. * * @param mixed $resource A resource * @param string $type The resource type * * @return bool true if this class supports the given resource, false otherwise */ public function supports($resource, $type = null) { return is_string($resource) && 'php' === pathinfo($resource, PATHINFO_EXTENSION); } }
<!DOCTYPE html> <html> <! Copyright 2008 The Closure Library Authors. All Rights Reserved. Use of this source code is governed by the Apache License, Version 2.0. See the COPYING file for details. <head> <meta http-equiv="X-UA-Compatible" content="IE=edge"> <meta charset="UTF-8" /> <title>goog.dom.NodeOffset Tests</title> <script src="../base.js"></script> <script> goog.require('goog.dom.NodeOffsetTest'); </script> </head> <body> <div id="test1">Text<br> and <b>more <i id="i">text.</i></b></div> <div id="test2"></div> <div id="empty"></div> </body> </html>
angular.module('ngWig', ['ngwig-app-templates']); angular.module('ngWig').directive('ngWig', function () { return { scope: { content: '=ngWig' }, restrict: 'A', replace: true, templateUrl: 'ng-wig/views/ng-wig.html', link: function (scope, element, attrs) { scope.originalHeight = element.outerHeight(); scope.editMode = false; scope.autoexpand = !('autoexpand' in attrs) || attrs['autoexpand'] !== 'off'; scope.cssPath = scope.cssPath ? scope.cssPath : 'css/ng-wig.css'; scope.toggleEditMode = function() { scope.editMode = !scope.editMode; }; scope.execCommand = function (command, options) { if(command ==='createlink'){ options = prompt('Please enter the URL', 'http: } scope.$emit('execCommand', {command: command, options: options}); }; } } } ); angular.module('ngWig').directive('ngWigEditable', function () { function init(scope, $element, attrs, ctrl) { var document = $element[0].ownerDocument; $element.attr('contenteditable', true); //model --> view ctrl.$render = function () { $element.html(ctrl.$viewValue || ''); }; //view --> model function viewToModel() { ctrl.$setViewValue($element.html()); } $element.bind('blur keyup change paste', viewToModel); scope.$on('execCommand', function (event, params) { $element[0].focus(); var <API key> = document.selection, command = params.command, options = params.options; if (<API key>) { var textRange = <API key>.createRange(); } document.execCommand(command, false, options); if (<API key>) { textRange.collapse(false); textRange.select(); } viewToModel(); }); } return { restrict: 'A', require: 'ngModel', replace: true, link: init } } ); /** * No box-sizing, such a shame * * 1.Calculate outer height * @param bool Include margin * @returns Number Height in pixels * * 2. Set outer height * @param Number Height in pixels * @param bool Include margin * @returns angular.element Collection */ if (typeof angular.element.prototype.outerHeight !== 'function') { angular.element.prototype.outerHeight = function() { function parsePixels(cssString) { if (cssString.slice(-2) === 'px') { return parseFloat(cssString.slice(0, -2)); } return 0; } var includeMargin = false, height, $element = this.eq(0), element = $element[0]; if (arguments[0] === true || arguments[0] === false || arguments[0] === undefined) { if (!$element.length) { return 0; } includeMargin = arguments[0] && true || false; if (element.outerHeight) { height = element.outerHeight; } else { height = element.offsetHeight; } if (includeMargin) { height += parsePixels($element.css('marginTop')) + parsePixels($element.css('marginBottom')); } return height; } else { if (!$element.length) { return this; } height = parseFloat(arguments[0]); includeMargin = arguments[1] && true || false; if (includeMargin) { height -= parsePixels($element.css('marginTop')) + parsePixels($element.css('marginBottom')); } height -= parsePixels($element.css('borderTopWidth')) + parsePixels($element.css('borderBottomWidth')) + parsePixels($element.css('paddingTop')) + parsePixels($element.css('paddingBottom')); $element.css('height', height + 'px'); return this; } }; } angular.module('ngwig-app-templates', ['ng-wig/views/ng-wig.html']); angular.module("ng-wig/views/ng-wig.html", []).run(["$templateCache", function($templateCache) { $templateCache.put("ng-wig/views/ng-wig.html", "<div class=\"ng-wig\">\n" + " <ul class=\"nw-toolbar\">\n" + " <li class=\"nw-toolbar__item\">\n" + " <button type=\"button\" class=\"nw-button <API key>\" title=\"Header\" ng-click=\"execCommand('formatblock', '<h1>')\"></button>\n" + " </li><! " --><li class=\"nw-toolbar__item\">\n" + " <button type=\"button\" class=\"nw-button <API key>\" title=\"Paragraph\" ng-click=\"execCommand('formatblock', '<p>')\"></button>\n" + " </li><! " --><li class=\"nw-toolbar__item\">\n" + " <button type=\"button\" class=\"nw-button <API key>\" title=\"Unordered List\" ng-click=\"execCommand('insertunorderedlist')\"></button>\n" + " </li><! " --><li class=\"nw-toolbar__item\">\n" + " <button type=\"button\" class=\"nw-button <API key>\" title=\"Ordered List\" ng-click=\"execCommand('insertorderedlist')\"></button>\n" + " </li><! " --><li class=\"nw-toolbar__item\">\n" + " <button type=\"button\" class=\"nw-button nw-button--bold\" title=\"Bold\" ng-click=\"execCommand('bold')\"></button>\n" + " </li><! " --><li class=\"nw-toolbar__item\">\n" + " <button type=\"button\" class=\"nw-button nw-button--italic\" title=\"Italic\" ng-click=\"execCommand('italic')\"></button>\n" + " </li><! " --><li class=\"nw-toolbar__item\">\n" + " <button type=\"button\" class=\"nw-button nw-button--link\" title=\"link\" ng-click=\"execCommand('createlink')\"></button>\n" + " </li><! " --><li class=\"nw-toolbar__item\">\n" + " <button type=\"button\" class=\"nw-button nw-button--source\" ng-class=\"{ 'nw-button--active': editMode }\" ng-click=\"toggleEditMode()\"></button>\n" + " </li>\n" + " </ul>\n" + "\n" + " <div class=\"nw-editor-container\">\n" + " <div class=\"nw-editor\">\n" + " <textarea class=\"nw-editor__src\" ng-show=\"editMode\" ng-model=\"content\"></textarea>\n" + " <div ng-class=\"{'nw-invisible': editMode, 'nw-autoexpand': autoexpand}\" class=\"nw-editor__res\" ng-model=\"content\" ng-wig-editable></div>\n" + " </div>\n" + " </div>\n" + "</div>\n" + ""); }]);
/* <API key>: GPL-2.0 */ #ifndef <API key> #define <API key> #include <linux/percpu.h> #include <linux/atomic.h> typedef struct { atomic_long_t a; } local_t; #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) } #define local_read(l) atomic_long_read(&(l)->a) #define local_set(l,i) atomic_long_set(&(l)->a, (i)) #define local_add(i,l) atomic_long_add((i),(&(l)->a)) #define local_sub(i,l) atomic_long_sub((i),(&(l)->a)) #define local_inc(l) atomic_long_inc(&(l)->a) #define local_dec(l) atomic_long_dec(&(l)->a) static __inline__ long local_add_return(long a, local_t *l) { long t; __asm__ __volatile__( "1:" PPC_LLARX(%0,0,%2,0) " # local_add_return\n\ add %0,%1,%0\n" PPC405_ERR77(0,%2) PPC_STLCX "%0,0,%2 \n\ bne- 1b" : "=&r" (t) : "r" (a), "r" (&(l->a.counter)) : "cc", "memory"); return t; } #define local_add_negative(a, l) (local_add_return((a), (l)) < 0) static __inline__ long local_sub_return(long a, local_t *l) { long t; __asm__ __volatile__( "1:" PPC_LLARX(%0,0,%2,0) " # local_sub_return\n\ subf %0,%1,%0\n" PPC405_ERR77(0,%2) PPC_STLCX "%0,0,%2 \n\ bne- 1b" : "=&r" (t) : "r" (a), "r" (&(l->a.counter)) : "cc", "memory"); return t; } static __inline__ long local_inc_return(local_t *l) { long t; __asm__ __volatile__( "1:" PPC_LLARX(%0,0,%1,0) " # local_inc_return\n\ addic %0,%0,1\n" PPC405_ERR77(0,%1) PPC_STLCX "%0,0,%1 \n\ bne- 1b" : "=&r" (t) : "r" (&(l->a.counter)) : "cc", "xer", "memory"); return t; } /* * local_inc_and_test - increment and test * @l: pointer of type local_t * * Atomically increments @l by 1 * and returns true if the result is zero, or false for all * other cases. */ #define local_inc_and_test(l) (local_inc_return(l) == 0) static __inline__ long local_dec_return(local_t *l) { long t; __asm__ __volatile__( "1:" PPC_LLARX(%0,0,%1,0) " # local_dec_return\n\ addic %0,%0,-1\n" PPC405_ERR77(0,%1) PPC_STLCX "%0,0,%1\n\ bne- 1b" : "=&r" (t) : "r" (&(l->a.counter)) : "cc", "xer", "memory"); return t; } #define local_cmpxchg(l, o, n) \ (cmpxchg_local(&((l)->a.counter), (o), (n))) #define local_xchg(l, n) (xchg_local(&((l)->a.counter), (n))) /** * local_add_unless - add unless the number is a given value * @l: pointer of type local_t * @a: the amount to add to v... * @u: ...unless v is equal to u. * * Atomically adds @a to @l, so long as it was not @u. * Returns non-zero if @l was not @u, and zero otherwise. */ static __inline__ int local_add_unless(local_t *l, long a, long u) { long t; __asm__ __volatile__ ( "1:" PPC_LLARX(%0,0,%1,0) " # local_add_unless\n\ cmpw 0,%0,%3 \n\ beq- 2f \n\ add %0,%2,%0 \n" PPC405_ERR77(0,%2) PPC_STLCX "%0,0,%1 \n\ bne- 1b \n" " subf %0,%2,%0 \n\ 2:" : "=&r" (t) : "r" (&(l->a.counter)), "r" (a), "r" (u) : "cc", "memory"); return t != u; } #define local_inc_not_zero(l) local_add_unless((l), 1, 0) #define local_sub_and_test(a, l) (local_sub_return((a), (l)) == 0) #define local_dec_and_test(l) (local_dec_return((l)) == 0) /* * Atomically test *l and decrement if it is greater than 0. * The function returns the old value of *l minus 1. */ static __inline__ long <API key>(local_t *l) { long t; __asm__ __volatile__( "1:" PPC_LLARX(%0,0,%1,0) " # <API key>\n\ cmpwi %0,1\n\ addi %0,%0,-1\n\ blt- 2f\n" PPC405_ERR77(0,%1) PPC_STLCX "%0,0,%1\n\ bne- 1b" "\n\ 2:" : "=&b" (t) : "r" (&(l->a.counter)) : "cc", "memory"); return t; } /* Use these for per-cpu local_t variables: on some archs they are * much more efficient than these naive implementations. Note they take * a variable, not an address. */ #define __local_inc(l) ((l)->a.counter++) #define __local_dec(l) ((l)->a.counter++) #define __local_add(i,l) ((l)->a.counter+=(i)) #define __local_sub(i,l) ((l)->a.counter-=(i)) #endif /* <API key> */
#include <drm/drmP.h> #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_crtc.h> #include <drm/drm_crtc_helper.h> #include <linux/clk.h> #include <linux/pm_runtime.h> #include <video/videomode.h> #include "malidp_drv.h" #include "malidp_hw.h" static enum drm_mode_status <API key>(struct drm_crtc *crtc, const struct drm_display_mode *mode) { struct malidp_drm *malidp = <API key>(crtc); struct malidp_hw_device *hwdev = malidp->dev; /* * check that the hardware can drive the required clock rate, * but skip the check if the clock is meant to be disabled (req_rate = 0) */ long rate, req_rate = mode->crtc_clock * 1000; if (req_rate) { rate = clk_round_rate(hwdev->pxlclk, req_rate); if (rate != req_rate) { DRM_DEBUG_DRIVER("pxlclk doesn't support %ld Hz\n", req_rate); return MODE_NOCLOCK; } } return MODE_OK; } static void <API key>(struct drm_crtc *crtc, struct drm_crtc_state *old_state) { struct malidp_drm *malidp = <API key>(crtc); struct malidp_hw_device *hwdev = malidp->dev; struct videomode vm; int err = pm_runtime_get_sync(crtc->dev->dev); if (err < 0) { DRM_DEBUG_DRIVER("Failed to enable runtime power management: %d\n", err); return; } <API key>(&crtc->state->adjusted_mode, &vm); clk_prepare_enable(hwdev->pxlclk); /* We rely on firmware to set mclk to a sensible level. */ clk_set_rate(hwdev->pxlclk, crtc->state->adjusted_mode.crtc_clock * 1000); hwdev->modeset(hwdev, &vm); hwdev->leave_config_mode(hwdev); drm_crtc_vblank_on(crtc); } static void <API key>(struct drm_crtc *crtc, struct drm_crtc_state *old_state) { struct malidp_drm *malidp = <API key>(crtc); struct malidp_hw_device *hwdev = malidp->dev; int err; drm_crtc_vblank_off(crtc); hwdev->enter_config_mode(hwdev); <API key>(hwdev->pxlclk); err = pm_runtime_put(crtc->dev->dev); if (err < 0) { DRM_DEBUG_DRIVER("Failed to disable runtime power management: %d\n", err); } } static const struct gamma_curve_segment { u16 start; u16 end; } segments[<API key>] = { /* sector 0 */ { 0, 0 }, { 1, 1 }, { 2, 2 }, { 3, 3 }, { 4, 4 }, { 5, 5 }, { 6, 6 }, { 7, 7 }, { 8, 8 }, { 9, 9 }, { 10, 10 }, { 11, 11 }, { 12, 12 }, { 13, 13 }, { 14, 14 }, { 15, 15 }, /* sector 1 */ { 16, 19 }, { 20, 23 }, { 24, 27 }, { 28, 31 }, /* sector 2 */ { 32, 39 }, { 40, 47 }, { 48, 55 }, { 56, 63 }, /* sector 3 */ { 64, 79 }, { 80, 95 }, { 96, 111 }, { 112, 127 }, /* sector 4 */ { 128, 159 }, { 160, 191 }, { 192, 223 }, { 224, 255 }, /* sector 5 */ { 256, 319 }, { 320, 383 }, { 384, 447 }, { 448, 511 }, /* sector 6 */ { 512, 639 }, { 640, 767 }, { 768, 895 }, { 896, 1023 }, { 1024, 1151 }, { 1152, 1279 }, { 1280, 1407 }, { 1408, 1535 }, { 1536, 1663 }, { 1664, 1791 }, { 1792, 1919 }, { 1920, 2047 }, { 2048, 2175 }, { 2176, 2303 }, { 2304, 2431 }, { 2432, 2559 }, { 2560, 2687 }, { 2688, 2815 }, { 2816, 2943 }, { 2944, 3071 }, { 3072, 3199 }, { 3200, 3327 }, { 3328, 3455 }, { 3456, 3583 }, { 3584, 3711 }, { 3712, 3839 }, { 3840, 3967 }, { 3968, 4095 }, }; #define DE_COEFTAB_DATA(a, b) ((((a) & 0xfff) << 16) | (((b) & 0xfff))) static void <API key>(struct drm_property_blob *lut_blob, u32 coeffs[<API key>]) { struct drm_color_lut *lut = (struct drm_color_lut *)lut_blob->data; int i; for (i = 0; i < <API key>; ++i) { u32 a, b, delta_in, out_start, out_end; delta_in = segments[i].end - segments[i].start; /* DP has 12-bit internal precision for its LUTs. */ out_start = <API key>(lut[segments[i].start].green, 12); out_end = <API key>(lut[segments[i].end].green, 12); a = (delta_in == 0) ? 0 : ((out_end - out_start) * 256) / delta_in; b = out_start; coeffs[i] = DE_COEFTAB_DATA(a, b); } } /* * Check if there is a new gamma LUT and if it is of an acceptable size. Also, * reject any LUTs that use distinct red, green, and blue curves. */ static int <API key>(struct drm_crtc *crtc, struct drm_crtc_state *state) { struct malidp_crtc_state *mc = <API key>(state); struct drm_color_lut *lut; size_t lut_size; int i; if (!state->color_mgmt_changed || !state->gamma_lut) return 0; if (crtc->state->gamma_lut && (crtc->state->gamma_lut->base.id == state->gamma_lut->base.id)) return 0; if (state->gamma_lut->length % sizeof(struct drm_color_lut)) return -EINVAL; lut_size = state->gamma_lut->length / sizeof(struct drm_color_lut); if (lut_size != <API key>) return -EINVAL; lut = (struct drm_color_lut *)state->gamma_lut->data; for (i = 0; i < lut_size; ++i) if (!((lut[i].red == lut[i].green) && (lut[i].red == lut[i].blue))) return -EINVAL; if (!state->mode_changed) { int ret; state->mode_changed = true; /* * Kerneldoc for <API key> mandates that * it be invoked when the driver sets ->mode_changed. Since * changing the gamma LUT doesn't depend on any external * resources, it is safe to call it only once. */ ret = <API key>(crtc->dev, state->state); if (ret) return ret; } <API key>(state->gamma_lut, mc->gamma_coeffs); return 0; } /* * Check if there is a new CTM and if it contains valid input. Valid here means * that the number is inside the representable range for a Q3.12 number, * excluding truncating the fractional part of the input data. * * The COLORADJ registers can be changed atomically. */ static int <API key>(struct drm_crtc *crtc, struct drm_crtc_state *state) { struct malidp_crtc_state *mc = <API key>(state); struct drm_color_ctm *ctm; int i; if (!state->color_mgmt_changed) return 0; if (!state->ctm) return 0; if (crtc->state->ctm && (crtc->state->ctm->base.id == state->ctm->base.id)) return 0; /* * The size of the ctm is checked in * <API key>. */ ctm = (struct drm_color_ctm *)state->ctm->data; for (i = 0; i < ARRAY_SIZE(ctm->matrix); ++i) { /* Convert from S31.32 to Q3.12. */ s64 val = ctm->matrix[i]; u32 mag = ((((u64)val) & ~BIT_ULL(63)) >> 20) & GENMASK_ULL(14, 0); /* * Convert to 2s complement and check the destination's top bit * for overflow. NB: Can't check before converting or it'd * incorrectly reject the case: * sign == 1 * mag == 0x2000 */ if (val & BIT_ULL(63)) mag = ~mag + 1; if (!!(val & BIT_ULL(63)) != !!(mag & BIT(14))) return -EINVAL; mc->coloradj_coeffs[i] = mag; } return 0; } static int <API key>(struct drm_crtc *crtc, struct drm_crtc_state *state) { struct malidp_drm *malidp = <API key>(crtc); struct malidp_hw_device *hwdev = malidp->dev; struct malidp_crtc_state *cs = <API key>(state); struct malidp_se_config *s = &cs->scaler_config; struct drm_plane *plane; struct videomode vm; const struct drm_plane_state *pstate; u32 h_upscale_factor = 0; /* U16.16 */ u32 v_upscale_factor = 0; /* U16.16 */ u8 scaling = cs->scaled_planes_mask; int ret; if (!scaling) { s->scale_enable = false; goto mclk_calc; } /* The scaling engine can only handle one plane at a time. */ if (scaling & (scaling - 1)) return -EINVAL; <API key>(plane, pstate, state) { struct malidp_plane *mp = to_malidp_plane(plane); u32 phase; if (!(mp->layer->id & scaling)) continue; /* * Convert crtc_[w|h] to U32.32, then divide by U16.16 src_[w|h] * to get the U16.16 result. */ h_upscale_factor = div_u64((u64)pstate->crtc_w << 32, pstate->src_w); v_upscale_factor = div_u64((u64)pstate->crtc_h << 32, pstate->src_h); s->enhancer_enable = ((h_upscale_factor >> 16) >= 2 || (v_upscale_factor >> 16) >= 2); s->input_w = pstate->src_w >> 16; s->input_h = pstate->src_h >> 16; s->output_w = pstate->crtc_w; s->output_h = pstate->crtc_h; #define SE_N_PHASE 4 #define SE_SHIFT_N_PHASE 12 /* Calculate initial_phase and delta_phase for horizontal. */ phase = s->input_w; s->h_init_phase = ((phase << SE_N_PHASE) / s->output_w + 1) / 2; phase = s->input_w; phase <<= (SE_SHIFT_N_PHASE + SE_N_PHASE); s->h_delta_phase = phase / s->output_w; /* Same for vertical. */ phase = s->input_h; s->v_init_phase = ((phase << SE_N_PHASE) / s->output_h + 1) / 2; phase = s->input_h; phase <<= (SE_SHIFT_N_PHASE + SE_N_PHASE); s->v_delta_phase = phase / s->output_h; #undef SE_N_PHASE #undef SE_SHIFT_N_PHASE s->plane_src_id = mp->layer->id; } s->scale_enable = true; s->hcoeff = <API key>(h_upscale_factor); s->vcoeff = <API key>(v_upscale_factor); mclk_calc: <API key>(&state->adjusted_mode, &vm); ret = hwdev->se_calc_mclk(hwdev, s, &vm); if (ret < 0) return -EINVAL; return 0; } static int <API key>(struct drm_crtc *crtc, struct drm_crtc_state *state) { struct malidp_drm *malidp = <API key>(crtc); struct malidp_hw_device *hwdev = malidp->dev; struct drm_plane *plane; const struct drm_plane_state *pstate; u32 rot_mem_free, rot_mem_usable; int rotated_planes = 0; int ret; /* first count the number of rotated planes */ <API key>(plane, pstate, state) { if (pstate->rotation & MALIDP_ROTATED_MASK) rotated_planes++; } rot_mem_free = hwdev->rotation_memory[0]; /* * if we have more than 1 plane using rotation memory, use the second * block of rotation memory as well */ if (rotated_planes > 1) rot_mem_free += hwdev->rotation_memory[1]; /* now validate the rotation memory requirements */ <API key>(plane, pstate, state) { struct malidp_plane *mp = to_malidp_plane(plane); struct malidp_plane_state *ms = <API key>(pstate); if (pstate->rotation & MALIDP_ROTATED_MASK) { /* process current plane */ rotated_planes if (!rotated_planes) { /* no more rotated planes, we can use what's left */ rot_mem_usable = rot_mem_free; } else { if ((mp->layer->id != DE_VIDEO1) || (hwdev->rotation_memory[1] == 0)) rot_mem_usable = rot_mem_free / 2; else rot_mem_usable = hwdev->rotation_memory[0]; } rot_mem_free -= rot_mem_usable; if (ms->rotmem_size > rot_mem_usable) return -EINVAL; } } ret = <API key>(crtc, state); ret = ret ? ret : <API key>(crtc, state); ret = ret ? ret : <API key>(crtc, state); return ret; } static const struct <API key> <API key> = { .mode_valid = <API key>, .atomic_check = <API key>, .atomic_enable = <API key>, .atomic_disable = <API key>, }; static struct drm_crtc_state *<API key>(struct drm_crtc *crtc) { struct malidp_crtc_state *state, *old_state; if (WARN_ON(!crtc->state)) return NULL; old_state = <API key>(crtc->state); state = kmalloc(sizeof(*state), GFP_KERNEL); if (!state) return NULL; <API key>(crtc, &state->base); memcpy(state->gamma_coeffs, old_state->gamma_coeffs, sizeof(state->gamma_coeffs)); memcpy(state->coloradj_coeffs, old_state->coloradj_coeffs, sizeof(state->coloradj_coeffs)); memcpy(&state->scaler_config, &old_state->scaler_config, sizeof(state->scaler_config)); state->scaled_planes_mask = 0; return &state->base; } static void malidp_crtc_reset(struct drm_crtc *crtc) { struct malidp_crtc_state *state = NULL; if (crtc->state) { state = <API key>(crtc->state); <API key>(crtc->state); } kfree(state); state = kzalloc(sizeof(*state), GFP_KERNEL); if (state) { crtc->state = &state->base; crtc->state->crtc = crtc; } } static void <API key>(struct drm_crtc *crtc, struct drm_crtc_state *state) { struct malidp_crtc_state *mali_state = NULL; if (state) { mali_state = <API key>(state); <API key>(state); } kfree(mali_state); } static int <API key>(struct drm_crtc *crtc) { struct malidp_drm *malidp = <API key>(crtc); struct malidp_hw_device *hwdev = malidp->dev; <API key>(hwdev, MALIDP_DE_BLOCK, hwdev->map.de_irq_map.vsync_irq); return 0; } static void <API key>(struct drm_crtc *crtc) { struct malidp_drm *malidp = <API key>(crtc); struct malidp_hw_device *hwdev = malidp->dev; <API key>(hwdev, MALIDP_DE_BLOCK, hwdev->map.de_irq_map.vsync_irq); } static const struct drm_crtc_funcs malidp_crtc_funcs = { .gamma_set = <API key>, .destroy = drm_crtc_cleanup, .set_config = <API key>, .page_flip = <API key>, .reset = malidp_crtc_reset, .<API key> = <API key>, .<API key> = <API key>, .enable_vblank = <API key>, .disable_vblank = <API key>, }; int malidp_crtc_init(struct drm_device *drm) { struct malidp_drm *malidp = drm->dev_private; struct drm_plane *primary = NULL, *plane; int ret; ret = <API key>(drm); if (ret < 0) { DRM_ERROR("Failed to initialise planes\n"); return ret; } drm_for_each_plane(plane, drm) { if (plane->type == <API key>) { primary = plane; break; } } if (!primary) { DRM_ERROR("no primary plane found\n"); ret = -EINVAL; goto crtc_cleanup_planes; } ret = <API key>(drm, &malidp->crtc, primary, NULL, &malidp_crtc_funcs, NULL); if (ret) goto crtc_cleanup_planes; drm_crtc_helper_add(&malidp->crtc, &<API key>); <API key>(&malidp->crtc, <API key>); /* No inverse-gamma: it is per-plane. */ <API key>(&malidp->crtc, 0, true, <API key>); <API key>(malidp->dev); return 0; crtc_cleanup_planes: <API key>(drm); return ret; }
// This file is part of the GNU ISO C++ Library. This library is free // software; you can redistribute it and/or modify it under the // Free Software Foundation; either version 3, or (at your option) // any later version. // This library is distributed in the hope that it will be useful, // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // with this library; see the file COPYING3. If not see #include <<API key>.h> template<typename Container, int Iter> void do_loop() { // avoid excessive swap file use! static const unsigned max_size = 250000; // make results less random while static const unsigned iterations = 10; // keeping the total time reasonable static const unsigned step = 50000; using namespace std; typedef int test_type; typedef Container container_type; typedef vector<test_type> vector_type; // Initialize sorted array. vector_type v(max_size, 0); for (unsigned int i = 0; i != max_size; ++i) v[i] = i; for (unsigned int count = step; count <= max_size; count += step) { for (unsigned i = 0; i != iterations; ++i) { container_type test_set; typename container_type::iterator iter = test_set.end(); // Each insert in amortized constant time (Table 69) for (unsigned j = 0; j != count; ++j) iter = test_set.insert(iter, v[j]); } } } int main() { #ifdef TEST_S1 #define thread_type false #endif #ifdef TEST_T1 #define thread_type true #endif typedef __gnu_test::sets<int, thread_type>::type container_types; typedef test_sequence<thread_type> test_type; test_type test("insert_from_sorted"); __gnu_cxx::typelist::apply(test, container_types()); return 0; }
#include <linux/sched.h> #include <linux/kthread.h> #include <linux/completion.h> #include <linux/err.h> #include <linux/cpuset.h> #include <linux/unistd.h> #include <linux/file.h> #include <linux/export.h> #include <linux/mutex.h> #include <linux/slab.h> #include <linux/freezer.h> #include <linux/ptrace.h> #include <linux/uaccess.h> #include <trace/events/sched.h> static DEFINE_SPINLOCK(kthread_create_lock); static LIST_HEAD(kthread_create_list); struct task_struct *kthreadd_task; struct kthread_create_info { /* Information passed to kthread() from kthreadd. */ int (*threadfn)(void *data); void *data; int node; /* Result passed back to kthread_create() from kthreadd. */ struct task_struct *result; struct completion *done; struct list_head list; }; struct kthread { unsigned long flags; unsigned int cpu; void *data; struct completion parked; struct completion exited; }; enum KTHREAD_BITS { KTHREAD_IS_PER_CPU = 0, KTHREAD_SHOULD_STOP, KTHREAD_SHOULD_PARK, KTHREAD_IS_PARKED, }; #define __to_kthread(vfork) \ container_of(vfork, struct kthread, exited) static inline struct kthread *to_kthread(struct task_struct *k) { return __to_kthread(k->vfork_done); } static struct kthread *to_live_kthread(struct task_struct *k) { struct completion *vfork = ACCESS_ONCE(k->vfork_done); if (likely(vfork)) return __to_kthread(vfork); return NULL; } /** * kthread_should_stop - should this kthread return now? * * When someone calls kthread_stop() on your kthread, it will be woken * and this will return true. You should then return, and your return * value will be passed through to kthread_stop(). */ bool kthread_should_stop(void) { return test_bit(KTHREAD_SHOULD_STOP, &to_kthread(current)->flags); } EXPORT_SYMBOL(kthread_should_stop); /** * kthread_should_park - should this kthread park now? * * When someone calls kthread_park() on your kthread, it will be woken * and this will return true. You should then do the necessary * cleanup and call kthread_parkme() * * Similar to kthread_should_stop(), but this keeps the thread alive * and in a park position. kthread_unpark() "restarts" the thread and * calls the thread function again. */ bool kthread_should_park(void) { return test_bit(KTHREAD_SHOULD_PARK, &to_kthread(current)->flags); } /** * <API key> - should this freezable kthread return now? * @was_frozen: optional out parameter, indicates whether %current was frozen * * kthread_should_stop() for freezable kthreads, which will enter * refrigerator if necessary. This function is safe from kthread_stop() / * freezer deadlock and freezable kthreads should use this function instead * of calling try_to_freeze() directly. */ bool <API key>(bool *was_frozen) { bool frozen = false; might_sleep(); if (unlikely(freezing(current))) frozen = __refrigerator(true); if (was_frozen) *was_frozen = frozen; return kthread_should_stop(); } EXPORT_SYMBOL_GPL(<API key>); /** * kthread_data - return data value specified on kthread creation * @task: kthread task in question * * Return the data value specified when kthread @task was created. * The caller is responsible for ensuring the validity of @task when * calling this function. */ void *kthread_data(struct task_struct *task) { return to_kthread(task)->data; } /** * probe_kthread_data - speculative version of kthread_data() * @task: possible kthread task in question * * @task could be a kthread task. Return the data value specified when it * was created if accessible. If @task isn't a kthread task or its data is * inaccessible for any reason, %NULL is returned. This function requires * that @task itself is safe to dereference. */ void *probe_kthread_data(struct task_struct *task) { struct kthread *kthread = to_kthread(task); void *data = NULL; probe_kernel_read(&data, &kthread->data, sizeof(data)); return data; } static void __kthread_parkme(struct kthread *self) { __set_current_state(TASK_PARKED); while (test_bit(KTHREAD_SHOULD_PARK, &self->flags)) { if (!test_and_set_bit(KTHREAD_IS_PARKED, &self->flags)) complete(&self->parked); schedule(); __set_current_state(TASK_PARKED); } clear_bit(KTHREAD_IS_PARKED, &self->flags); __set_current_state(TASK_RUNNING); } void kthread_parkme(void) { __kthread_parkme(to_kthread(current)); } static int kthread(void *_create) { /* Copy data: it's on kthread's stack */ struct kthread_create_info *create = _create; int (*threadfn)(void *data) = create->threadfn; void *data = create->data; struct completion *done; struct kthread self; int ret; self.flags = 0; self.data = data; init_completion(&self.exited); init_completion(&self.parked); current->vfork_done = &self.exited; /* If user was SIGKILLed, I release the structure. */ done = xchg(&create->done, NULL); if (!done) { kfree(create); do_exit(-EINTR); } /* OK, tell user we're spawned, wait for stop or wakeup */ __set_current_state(<API key>); create->result = current; complete(done); schedule(); ret = -EINTR; if (!test_bit(KTHREAD_SHOULD_STOP, &self.flags)) { __kthread_parkme(&self); ret = threadfn(data); } /* we can't just return, we must preserve "self" on stack */ do_exit(ret); } /* called from do_fork() to get node information for about to be created task */ int tsk_fork_get_node(struct task_struct *tsk) { #ifdef CONFIG_NUMA if (tsk == kthreadd_task) return tsk->pref_node_fork; #endif return NUMA_NO_NODE; } static void create_kthread(struct kthread_create_info *create) { int pid; #ifdef CONFIG_NUMA current->pref_node_fork = create->node; #endif /* We want our own signal handler (we take no signals by default). */ pid = kernel_thread(kthread, create, CLONE_FS | CLONE_FILES | SIGCHLD); if (pid < 0) { /* If user was SIGKILLed, I release the structure. */ struct completion *done = xchg(&create->done, NULL); if (!done) { kfree(create); return; } create->result = ERR_PTR(pid); complete(done); } } /** * <API key> - create a kthread. * @threadfn: the function to run until signal_pending(current). * @data: data ptr for @threadfn. * @node: memory node number. * @namefmt: printf-style name for the thread. * * Description: This helper function creates and names a kernel * thread. The thread will be stopped: use wake_up_process() to start * it. See also kthread_run(). * * If thread is going to be bound on a particular cpu, give its node * in @node, to get NUMA affinity for kthread stack, or else give -1. * When woken, the thread will run @threadfn() with @data as its * argument. @threadfn() can either call do_exit() directly if it is a * standalone thread for which no one will call kthread_stop(), or * return when 'kthread_should_stop()' is true (which means * kthread_stop() has been called). The return value should be zero * or a negative error number; it will be passed to kthread_stop(). * * Returns a task_struct or ERR_PTR(-ENOMEM). */ struct task_struct *<API key>(int (*threadfn)(void *data), void *data, int node, const char namefmt[], ) { <API key>(done); struct task_struct *task; struct kthread_create_info *create = kmalloc(sizeof(*create), GFP_KERNEL); if (!create) return ERR_PTR(-ENOMEM); create->threadfn = threadfn; create->data = data; create->node = node; create->done = &done; spin_lock(&kthread_create_lock); list_add_tail(&create->list, &kthread_create_list); spin_unlock(&kthread_create_lock); wake_up_process(kthreadd_task); /* * Wait for completion in killable state, for I might be chosen by * the OOM killer while kthreadd is trying to allocate memory for * new kernel thread. */ if (unlikely(<API key>(&done))) { /* * If I was SIGKILLed before kthreadd (or new kernel thread) * calls complete(), leave the cleanup of this structure to * that thread. */ if (xchg(&create->done, NULL)) return ERR_PTR(-ENOMEM); /* * kthreadd (or new kernel thread) will call complete() * shortly. */ wait_for_completion(&done); } task = create->result; if (!IS_ERR(task)) { static const struct sched_param param = { .sched_priority = 0 }; va_list args; va_start(args, namefmt); vsnprintf(task->comm, sizeof(task->comm), namefmt, args); va_end(args); /* * root may have changed our (kthreadd's) priority or CPU mask. * The kernel thread should not inherit these properties. */ <API key>(task, SCHED_NORMAL, &param); <API key>(task, cpu_all_mask); } kfree(create); return task; } EXPORT_SYMBOL(<API key>); static void __kthread_bind(struct task_struct *p, unsigned int cpu, long state) { /* Must have done schedule() in kthread() before we set_task_cpu */ if (!wait_task_inactive(p, state)) { WARN_ON(1); return; } /* It's safe because the task is inactive. */ do_set_cpus_allowed(p, cpumask_of(cpu)); p->flags |= PF_NO_SETAFFINITY; } void kthread_bind(struct task_struct *p, unsigned int cpu) { __kthread_bind(p, cpu, <API key>); } EXPORT_SYMBOL(kthread_bind); /** * <API key> - Create a cpu bound kthread * @threadfn: the function to run until signal_pending(current). * @data: data ptr for @threadfn. * @cpu: The cpu on which the thread should be bound, * @namefmt: printf-style name for the thread. Format is restricted * to "name.*%u". Code fills in cpu number. * * Description: This helper function creates and names a kernel thread * The thread will be woken and put into park mode. */ struct task_struct *<API key>(int (*threadfn)(void *data), void *data, unsigned int cpu, const char *namefmt) { struct task_struct *p; p = <API key>(threadfn, data, cpu_to_mem(cpu), namefmt, cpu); if (IS_ERR(p)) return p; set_bit(KTHREAD_IS_PER_CPU, &to_kthread(p)->flags); to_kthread(p)->cpu = cpu; /* Park the thread to get it out of <API key> state */ kthread_park(p); return p; } static void __kthread_unpark(struct task_struct *k, struct kthread *kthread) { clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags); /* * We clear the IS_PARKED bit here as we don't wait * until the task has left the park code. So if we'd * park before that happens we'd see the IS_PARKED bit * which might be about to be cleared. */ if (test_and_clear_bit(KTHREAD_IS_PARKED, &kthread->flags)) { if (test_bit(KTHREAD_IS_PER_CPU, &kthread->flags)) __kthread_bind(k, kthread->cpu, TASK_PARKED); wake_up_state(k, TASK_PARKED); } } void kthread_unpark(struct task_struct *k) { struct kthread *kthread = to_live_kthread(k); if (kthread) __kthread_unpark(k, kthread); } int kthread_park(struct task_struct *k) { struct kthread *kthread = to_live_kthread(k); int ret = -ENOSYS; if (kthread) { if (!test_bit(KTHREAD_IS_PARKED, &kthread->flags)) { set_bit(KTHREAD_SHOULD_PARK, &kthread->flags); if (k != current) { wake_up_process(k); wait_for_completion(&kthread->parked); } } ret = 0; } return ret; } int kthread_stop(struct task_struct *k) { struct kthread *kthread; int ret; <API key>(k); get_task_struct(k); kthread = to_live_kthread(k); if (kthread) { set_bit(KTHREAD_SHOULD_STOP, &kthread->flags); __kthread_unpark(k, kthread); wake_up_process(k); wait_for_completion(&kthread->exited); } ret = k->exit_code; put_task_struct(k); <API key>(ret); return ret; } EXPORT_SYMBOL(kthread_stop); int kthreadd(void *unused) { struct task_struct *tsk = current; /* Setup a clean context for our children to inherit. */ set_task_comm(tsk, "kthreadd"); ignore_signals(tsk); <API key>(tsk, cpu_all_mask); set_mems_allowed(node_states[N_MEMORY]); current->flags |= PF_NOFREEZE; for (;;) { set_current_state(TASK_INTERRUPTIBLE); if (list_empty(&kthread_create_list)) schedule(); __set_current_state(TASK_RUNNING); spin_lock(&kthread_create_lock); while (!list_empty(&kthread_create_list)) { struct kthread_create_info *create; create = list_entry(kthread_create_list.next, struct kthread_create_info, list); list_del_init(&create->list); spin_unlock(&kthread_create_lock); create_kthread(create); spin_lock(&kthread_create_lock); } spin_unlock(&kthread_create_lock); } return 0; } void <API key>(struct kthread_worker *worker, const char *name, struct lock_class_key *key) { spin_lock_init(&worker->lock); <API key>(&worker->lock, key, name); INIT_LIST_HEAD(&worker->work_list); worker->task = NULL; } EXPORT_SYMBOL_GPL(<API key>); /** * kthread_worker_fn - kthread function to process kthread_worker * @worker_ptr: pointer to initialized kthread_worker * * This function can be used as @threadfn to kthread_create() or * kthread_run() with @worker_ptr argument pointing to an initialized * kthread_worker. The started kthread will process work_list until * the it is stopped with kthread_stop(). A kthread can also call * this function directly after extra initialization. * * Different kthreads can be used for the same kthread_worker as long * as there's only one kthread attached to it at any given time. A * kthread_worker without an attached kthread simply collects queued * kthread_works. */ int kthread_worker_fn(void *worker_ptr) { struct kthread_worker *worker = worker_ptr; struct kthread_work *work; WARN_ON(worker->task); worker->task = current; repeat: set_current_state(TASK_INTERRUPTIBLE); /* mb paired w/ kthread_stop */ if (kthread_should_stop()) { __set_current_state(TASK_RUNNING); spin_lock_irq(&worker->lock); worker->task = NULL; spin_unlock_irq(&worker->lock); return 0; } work = NULL; spin_lock_irq(&worker->lock); if (!list_empty(&worker->work_list)) { work = list_first_entry(&worker->work_list, struct kthread_work, node); list_del_init(&work->node); } worker->current_work = work; spin_unlock_irq(&worker->lock); if (work) { __set_current_state(TASK_RUNNING); work->func(work); } else if (!freezing(current)) schedule(); try_to_freeze(); goto repeat; } EXPORT_SYMBOL_GPL(kthread_worker_fn); /* insert @work before @pos in @worker */ static void insert_kthread_work(struct kthread_worker *worker, struct kthread_work *work, struct list_head *pos) { lockdep_assert_held(&worker->lock); list_add_tail(&work->node, pos); work->worker = worker; if (likely(worker->task)) wake_up_process(worker->task); } /** * queue_kthread_work - queue a kthread_work * @worker: target kthread_worker * @work: kthread_work to queue * * Queue @work to work processor @task for async execution. @task * must have been created with <API key>(). Returns %true * if @work was successfully queued, %false if it was already pending. */ bool queue_kthread_work(struct kthread_worker *worker, struct kthread_work *work) { bool ret = false; unsigned long flags; spin_lock_irqsave(&worker->lock, flags); if (list_empty(&work->node)) { insert_kthread_work(worker, work, &worker->work_list); ret = true; } <API key>(&worker->lock, flags); return ret; } EXPORT_SYMBOL_GPL(queue_kthread_work); struct kthread_flush_work { struct kthread_work work; struct completion done; }; static void <API key>(struct kthread_work *work) { struct kthread_flush_work *fwork = container_of(work, struct kthread_flush_work, work); complete(&fwork->done); } /** * flush_kthread_work - flush a kthread_work * @work: work to flush * * If @work is queued or executing, wait for it to finish execution. */ void flush_kthread_work(struct kthread_work *work) { struct kthread_flush_work fwork = { KTHREAD_WORK_INIT(fwork.work, <API key>), <API key>(fwork.done), }; struct kthread_worker *worker; bool noop = false; retry: worker = work->worker; if (!worker) return; spin_lock_irq(&worker->lock); if (work->worker != worker) { spin_unlock_irq(&worker->lock); goto retry; } if (!list_empty(&work->node)) insert_kthread_work(worker, &fwork.work, work->node.next); else if (worker->current_work == work) insert_kthread_work(worker, &fwork.work, worker->work_list.next); else noop = true; spin_unlock_irq(&worker->lock); if (!noop) wait_for_completion(&fwork.done); } EXPORT_SYMBOL_GPL(flush_kthread_work); /** * <API key> - flush all current works on a kthread_worker * @worker: worker to flush * * Wait until all currently executing or pending works on @worker are * finished. */ void <API key>(struct kthread_worker *worker) { struct kthread_flush_work fwork = { KTHREAD_WORK_INIT(fwork.work, <API key>), <API key>(fwork.done), }; queue_kthread_work(worker, &fwork.work); wait_for_completion(&fwork.done); } EXPORT_SYMBOL_GPL(<API key>);
// This file is part of the GNU ISO C++ Library. This library is free // software; you can redistribute it and/or modify it under the // Free Software Foundation; either version 3, or (at your option) // any later version. // This library is distributed in the hope that it will be useful, // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // with this library; see the file COPYING3. If not see #include "1.h" #include <list> int main() { cons01<std::list< A<B> > >(); return 0; }
#include "libgfortran.h" #include <stdlib.h> #include <assert.h> #include <limits.h> #if defined (HAVE_GFC_REAL_10) && defined (HAVE_GFC_INTEGER_16) extern void maxloc0_16_r10 (gfc_array_i16 * const restrict retarray, gfc_array_r10 * const restrict array); export_proto(maxloc0_16_r10); void maxloc0_16_r10 (gfc_array_i16 * const restrict retarray, gfc_array_r10 * const restrict array) { index_type count[GFC_MAX_DIMENSIONS]; index_type extent[GFC_MAX_DIMENSIONS]; index_type sstride[GFC_MAX_DIMENSIONS]; index_type dstride; const GFC_REAL_10 *base; GFC_INTEGER_16 * restrict dest; index_type rank; index_type n; rank = GFC_DESCRIPTOR_RANK (array); if (rank <= 0) runtime_error ("Rank of array needs to be > 0"); if (retarray->base_addr == NULL) { GFC_DIMENSION_SET(retarray->dim[0], 0, rank-1, 1); retarray->dtype = (retarray->dtype & ~GFC_DTYPE_RANK_MASK) | 1; retarray->offset = 0; retarray->base_addr = xmallocarray (rank, sizeof (GFC_INTEGER_16)); } else { if (unlikely (compile_options.bounds_check)) <API key> ((array_t *) retarray, (array_t *) array, "MAXLOC"); } dstride = <API key>(retarray,0); dest = retarray->base_addr; for (n = 0; n < rank; n++) { sstride[n] = <API key>(array,n); extent[n] = <API key>(array,n); count[n] = 0; if (extent[n] <= 0) { /* Set the return value. */ for (n = 0; n < rank; n++) dest[n * dstride] = 0; return; } } base = array->base_addr; /* Initialize the return value. */ for (n = 0; n < rank; n++) dest[n * dstride] = 1; { GFC_REAL_10 maxval; #if defined(<API key>) int fast = 0; #endif #if defined(<API key>) maxval = -<API key>; #else maxval = -GFC_REAL_10_HUGE; #endif while (base) { do { /* Implementation start. */ #if defined(<API key>) } while (0); if (unlikely (!fast)) { do { if (*base >= maxval) { fast = 1; maxval = *base; for (n = 0; n < rank; n++) dest[n * dstride] = count[n] + 1; break; } base += sstride[0]; } while (++count[0] != extent[0]); if (likely (fast)) continue; } else do { #endif if (*base > maxval) { maxval = *base; for (n = 0; n < rank; n++) dest[n * dstride] = count[n] + 1; } /* Implementation end. */ /* Advance to the next element. */ base += sstride[0]; } while (++count[0] != extent[0]); n = 0; do { /* When we get to the end of a dimension, reset it and increment the next dimension. */ count[n] = 0; /* We could precalculate these products, but this is a less frequently used path so probably not worth it. */ base -= sstride[n] * extent[n]; n++; if (n == rank) { /* Break out of the loop. */ base = NULL; break; } else { count[n]++; base += sstride[n]; } } while (count[n] == extent[n]); } } } extern void mmaxloc0_16_r10 (gfc_array_i16 * const restrict, gfc_array_r10 * const restrict, gfc_array_l1 * const restrict); export_proto(mmaxloc0_16_r10); void mmaxloc0_16_r10 (gfc_array_i16 * const restrict retarray, gfc_array_r10 * const restrict array, gfc_array_l1 * const restrict mask) { index_type count[GFC_MAX_DIMENSIONS]; index_type extent[GFC_MAX_DIMENSIONS]; index_type sstride[GFC_MAX_DIMENSIONS]; index_type mstride[GFC_MAX_DIMENSIONS]; index_type dstride; GFC_INTEGER_16 *dest; const GFC_REAL_10 *base; GFC_LOGICAL_1 *mbase; int rank; index_type n; int mask_kind; rank = GFC_DESCRIPTOR_RANK (array); if (rank <= 0) runtime_error ("Rank of array needs to be > 0"); if (retarray->base_addr == NULL) { GFC_DIMENSION_SET(retarray->dim[0], 0, rank - 1, 1); retarray->dtype = (retarray->dtype & ~GFC_DTYPE_RANK_MASK) | 1; retarray->offset = 0; retarray->base_addr = xmallocarray (rank, sizeof (GFC_INTEGER_16)); } else { if (unlikely (compile_options.bounds_check)) { <API key> ((array_t *) retarray, (array_t *) array, "MAXLOC"); <API key> ((array_t *) mask, (array_t *) array, "MASK argument", "MAXLOC"); } } mask_kind = GFC_DESCRIPTOR_SIZE (mask); mbase = mask->base_addr; if (mask_kind == 1 || mask_kind == 2 || mask_kind == 4 || mask_kind == 8 #ifdef HAVE_GFC_LOGICAL_16 || mask_kind == 16 #endif ) mbase = GFOR_POINTER_TO_L1 (mbase, mask_kind); else runtime_error ("Funny sized logical array"); dstride = <API key>(retarray,0); dest = retarray->base_addr; for (n = 0; n < rank; n++) { sstride[n] = <API key>(array,n); mstride[n] = <API key>(mask,n); extent[n] = <API key>(array,n); count[n] = 0; if (extent[n] <= 0) { /* Set the return value. */ for (n = 0; n < rank; n++) dest[n * dstride] = 0; return; } } base = array->base_addr; /* Initialize the return value. */ for (n = 0; n < rank; n++) dest[n * dstride] = 0; { GFC_REAL_10 maxval; int fast = 0; #if defined(<API key>) maxval = -<API key>; #else maxval = -GFC_REAL_10_HUGE; #endif while (base) { do { /* Implementation start. */ } while (0); if (unlikely (!fast)) { do { if (*mbase) { #if defined(<API key>) if (unlikely (dest[0] == 0)) for (n = 0; n < rank; n++) dest[n * dstride] = count[n] + 1; if (*base >= maxval) #endif { fast = 1; maxval = *base; for (n = 0; n < rank; n++) dest[n * dstride] = count[n] + 1; break; } } base += sstride[0]; mbase += mstride[0]; } while (++count[0] != extent[0]); if (likely (fast)) continue; } else do { if (*mbase && *base > maxval) { maxval = *base; for (n = 0; n < rank; n++) dest[n * dstride] = count[n] + 1; } /* Implementation end. */ /* Advance to the next element. */ base += sstride[0]; mbase += mstride[0]; } while (++count[0] != extent[0]); n = 0; do { /* When we get to the end of a dimension, reset it and increment the next dimension. */ count[n] = 0; /* We could precalculate these products, but this is a less frequently used path so probably not worth it. */ base -= sstride[n] * extent[n]; mbase -= mstride[n] * extent[n]; n++; if (n == rank) { /* Break out of the loop. */ base = NULL; break; } else { count[n]++; base += sstride[n]; mbase += mstride[n]; } } while (count[n] == extent[n]); } } } extern void smaxloc0_16_r10 (gfc_array_i16 * const restrict, gfc_array_r10 * const restrict, GFC_LOGICAL_4 *); export_proto(smaxloc0_16_r10); void smaxloc0_16_r10 (gfc_array_i16 * const restrict retarray, gfc_array_r10 * const restrict array, GFC_LOGICAL_4 * mask) { index_type rank; index_type dstride; index_type n; GFC_INTEGER_16 *dest; if (*mask) { maxloc0_16_r10 (retarray, array); return; } rank = GFC_DESCRIPTOR_RANK (array); if (rank <= 0) runtime_error ("Rank of array needs to be > 0"); if (retarray->base_addr == NULL) { GFC_DIMENSION_SET(retarray->dim[0], 0, rank-1, 1); retarray->dtype = (retarray->dtype & ~GFC_DTYPE_RANK_MASK) | 1; retarray->offset = 0; retarray->base_addr = xmallocarray (rank, sizeof (GFC_INTEGER_16)); } else if (unlikely (compile_options.bounds_check)) { <API key> ((array_t *) retarray, (array_t *) array, "MAXLOC"); } dstride = <API key>(retarray,0); dest = retarray->base_addr; for (n = 0; n<rank; n++) dest[n * dstride] = 0 ; } #endif
#include <linux/types.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/gfp.h> #include "smumgr.h" #include "tonga_smumgr.h" #include "pp_debug.h" #include "smu_ucode_xfer_vi.h" #include "tonga_ppsmc.h" #include "smu/smu_7_1_2_d.h" #include "smu/smu_7_1_2_sh_mask.h" #include "cgs_common.h" #define TONGA_SMC_SIZE 0x20000 #define BUFFER_SIZE 80000 #define MAX_STRING_SIZE 15 #define BUFFER_SIZETWO 131072 /*128 *1024*/ /** * Set the address for reading/writing the SMC SRAM space. * @param smumgr the address of the powerplay hardware manager. * @param smcAddress the address in the SMC RAM to access. */ static int <API key>(struct pp_smumgr *smumgr, uint32_t smcAddress, uint32_t limit) { if (smumgr == NULL || smumgr->device == NULL) return -EINVAL; PP_ASSERT_WITH_CODE((0 == (3 & smcAddress)), "SMC address must be 4 byte aligned.", return -1;); PP_ASSERT_WITH_CODE((limit > (smcAddress + 3)), "SMC address is beyond the SMC RAM area.", return -1;); cgs_write_register(smumgr->device, mmSMC_IND_INDEX_0, smcAddress); SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, <API key>, 0); return 0; } /** * Copy bytes from an array into the SMC RAM space. * * @param smumgr the address of the powerplay SMU manager. * @param smcStartAddress the start address in the SMC RAM to copy bytes to. * @param src the byte array to copy the bytes from. * @param byteCount the number of bytes to copy. */ int <API key>(struct pp_smumgr *smumgr, uint32_t smcStartAddress, const uint8_t *src, uint32_t byteCount, uint32_t limit) { uint32_t addr; uint32_t data, orig_data; int result = 0; uint32_t extra_shift; if (smumgr == NULL || smumgr->device == NULL) return -EINVAL; PP_ASSERT_WITH_CODE((0 == (3 & smcStartAddress)), "SMC address must be 4 byte aligned.", return 0;); PP_ASSERT_WITH_CODE((limit > (smcStartAddress + byteCount)), "SMC address is beyond the SMC RAM area.", return 0;); addr = smcStartAddress; while (byteCount >= 4) { /* * Bytes are written into the * SMC address space with the MSB first */ data = (src[0] << 24) + (src[1] << 16) + (src[2] << 8) + src[3]; result = <API key>(smumgr, addr, limit); if (result) goto out; cgs_write_register(smumgr->device, mmSMC_IND_DATA_0, data); src += 4; byteCount -= 4; addr += 4; } if (0 != byteCount) { /* Now write odd bytes left, do a read modify write cycle */ data = 0; result = <API key>(smumgr, addr, limit); if (result) goto out; orig_data = cgs_read_register(smumgr->device, mmSMC_IND_DATA_0); extra_shift = 8 * (4 - byteCount); while (byteCount > 0) { data = (data << 8) + *src++; byteCount } data <<= extra_shift; data |= (orig_data & ~((~0UL) << extra_shift)); result = <API key>(smumgr, addr, limit); if (result) goto out; cgs_write_register(smumgr->device, mmSMC_IND_DATA_0, data); } out: return result; } int <API key>(struct pp_smumgr *smumgr) { static const unsigned char pData[] = { 0xE0, 0x00, 0x80, 0x40 }; <API key>(smumgr, 0x0, pData, 4, sizeof(pData)+1); return 0; } /** * Return if the SMC is currently running. * * @param smumgr the address of the powerplay hardware manager. */ static int <API key>(struct pp_smumgr *smumgr) { return ((0 == <API key>(smumgr->device, CGS_IND_REG__SMC, <API key>, ck_disable)) && (0x20100 <= <API key>(smumgr->device, CGS_IND_REG__SMC, ixSMC_PC_C))); } static int <API key>(struct pp_smumgr *smumgr) { if (smumgr == NULL || smumgr->device == NULL) return -EINVAL; <API key>(smumgr, SMC_RESP_0, SMC_RESP, 0); cgs_write_register(smumgr->device, mmSMC_MSG_ARG_0, 0x20000); cgs_write_register(smumgr->device, mmSMC_MESSAGE_0, PPSMC_MSG_Test); <API key>(smumgr, SMC_RESP_0, SMC_RESP, 0); return 0; } /** * Send a message to the SMC, and wait for its response. * * @param smumgr the address of the powerplay hardware manager. * @param msg the message to send. * @return The response that came from the SMC. */ static int <API key>(struct pp_smumgr *smumgr, uint16_t msg) { if (smumgr == NULL || smumgr->device == NULL) return -EINVAL; if (!<API key>(smumgr)) return -1; <API key>(smumgr, SMC_RESP_0, SMC_RESP, 0); PP_ASSERT_WITH_CODE( 1 == SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP), "Failed to send Previous Message.", ); cgs_write_register(smumgr->device, mmSMC_MESSAGE_0, msg); <API key>(smumgr, SMC_RESP_0, SMC_RESP, 0); PP_ASSERT_WITH_CODE( 1 == SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP), "Failed to send Message.", ); return 0; } /* * Send a message to the SMC, and do not wait for its response. * * @param smumgr the address of the powerplay hardware manager. * @param msg the message to send. * @return The response that came from the SMC. */ static int <API key> (struct pp_smumgr *smumgr, uint16_t msg) { if (smumgr == NULL || smumgr->device == NULL) return -EINVAL; <API key>(smumgr, SMC_RESP_0, SMC_RESP, 0); PP_ASSERT_WITH_CODE( 1 == SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP), "Failed to send Previous Message.", ); cgs_write_register(smumgr->device, mmSMC_MESSAGE_0, msg); return 0; } /* * Send a message to the SMC with parameter * * @param smumgr: the address of the powerplay hardware manager. * @param msg: the message to send. * @param parameter: the parameter to send * @return The response that came from the SMC. */ static int <API key>(struct pp_smumgr *smumgr, uint16_t msg, uint32_t parameter) { if (smumgr == NULL || smumgr->device == NULL) return -EINVAL; if (!<API key>(smumgr)) return PPSMC_Result_Failed; <API key>(smumgr, SMC_RESP_0, SMC_RESP, 0); cgs_write_register(smumgr->device, mmSMC_MSG_ARG_0, parameter); return <API key>(smumgr, msg); } /* * Send a message to the SMC with parameter, do not wait for response * * @param smumgr: the address of the powerplay hardware manager. * @param msg: the message to send. * @param parameter: the parameter to send * @return The response that came from the SMC. */ static int <API key>( struct pp_smumgr *smumgr, uint16_t msg, uint32_t parameter) { if (smumgr == NULL || smumgr->device == NULL) return -EINVAL; <API key>(smumgr, SMC_RESP_0, SMC_RESP, 0); cgs_write_register(smumgr->device, mmSMC_MSG_ARG_0, parameter); return <API key>(smumgr, msg); } /* * Read a 32bit value from the SMC SRAM space. * ALL PARAMETERS ARE IN HOST BYTE ORDER. * @param smumgr the address of the powerplay hardware manager. * @param smcAddress the address in the SMC RAM to access. * @param value and output parameter for the data read from the SMC SRAM. */ int <API key>(struct pp_smumgr *smumgr, uint32_t smcAddress, uint32_t *value, uint32_t limit) { int result; result = <API key>(smumgr, smcAddress, limit); if (0 != result) return result; *value = cgs_read_register(smumgr->device, mmSMC_IND_DATA_0); return 0; } /* * Write a 32bit value to the SMC SRAM space. * ALL PARAMETERS ARE IN HOST BYTE ORDER. * @param smumgr the address of the powerplay hardware manager. * @param smcAddress the address in the SMC RAM to access. * @param value to write to the SMC SRAM. */ int <API key>(struct pp_smumgr *smumgr, uint32_t smcAddress, uint32_t value, uint32_t limit) { int result; result = <API key>(smumgr, smcAddress, limit); if (0 != result) return result; cgs_write_register(smumgr->device, mmSMC_IND_DATA_0, value); return 0; } static int tonga_smu_fini(struct pp_smumgr *smumgr) { struct tonga_smumgr *priv = (struct tonga_smumgr *)(smumgr->backend); smu_free_memory(smumgr->device, (void *)priv->smu_buffer.handle); smu_free_memory(smumgr->device, (void *)priv->header_buffer.handle); if (smumgr->backend != NULL) { kfree(smumgr->backend); smumgr->backend = NULL; } cgs_rel_firmware(smumgr->device, CGS_UCODE_ID_SMU); return 0; } static enum cgs_ucode_id <API key>(uint32_t fw_type) { enum cgs_ucode_id result = <API key>; switch (fw_type) { case UCODE_ID_SMU: result = CGS_UCODE_ID_SMU; break; case UCODE_ID_SDMA0: result = CGS_UCODE_ID_SDMA0; break; case UCODE_ID_SDMA1: result = CGS_UCODE_ID_SDMA1; break; case UCODE_ID_CP_CE: result = CGS_UCODE_ID_CP_CE; break; case UCODE_ID_CP_PFP: result = CGS_UCODE_ID_CP_PFP; break; case UCODE_ID_CP_ME: result = CGS_UCODE_ID_CP_ME; break; case UCODE_ID_CP_MEC: result = CGS_UCODE_ID_CP_MEC; break; case UCODE_ID_CP_MEC_JT1: result = <API key>; break; case UCODE_ID_CP_MEC_JT2: result = <API key>; break; case UCODE_ID_RLC_G: result = CGS_UCODE_ID_RLC_G; break; default: break; } return result; } /** * Convert the PPIRI firmware type to SMU type mask. * For MEC, we need to check all MEC related type */ static uint16_t <API key>(uint16_t firmwareType) { uint16_t result = 0; switch (firmwareType) { case UCODE_ID_SDMA0: result = UCODE_ID_SDMA0_MASK; break; case UCODE_ID_SDMA1: result = UCODE_ID_SDMA1_MASK; break; case UCODE_ID_CP_CE: result = UCODE_ID_CP_CE_MASK; break; case UCODE_ID_CP_PFP: result = <API key>; break; case UCODE_ID_CP_ME: result = UCODE_ID_CP_ME_MASK; break; case UCODE_ID_CP_MEC: case UCODE_ID_CP_MEC_JT1: case UCODE_ID_CP_MEC_JT2: result = <API key>; break; case UCODE_ID_RLC_G: result = UCODE_ID_RLC_G_MASK; break; default: break; } return result; } /** * Check if the FW has been loaded, * SMU will not return if loading has not finished. */ static int <API key>(struct pp_smumgr *smumgr, uint32_t fwType) { uint16_t fwMask = <API key>(fwType); if (0 != <API key>(smumgr, SMC_IND, <API key>, fwMask, fwMask)) { printk(KERN_ERR "[ powerplay ] check firmware loading failed\n"); return -EINVAL; } return 0; } /* Populate one firmware image to the data structure */ static int <API key>(struct pp_smumgr *smumgr, uint16_t firmware_type, struct SMU_Entry *pentry) { int result; struct cgs_firmware_info info = {0}; result = <API key>( smumgr->device, <API key>(firmware_type), &info); if (result == 0) { pentry->version = 0; pentry->id = (uint16_t)firmware_type; pentry->image_addr_high = smu_upper_32_bits(info.mc_addr); pentry->image_addr_low = smu_lower_32_bits(info.mc_addr); pentry->meta_data_addr_high = 0; pentry->meta_data_addr_low = 0; pentry->data_size_byte = info.image_size; pentry-><API key> = 0; if (firmware_type == UCODE_ID_RLC_G) pentry->flags = 1; else pentry->flags = 0; } else { return result; } return result; } static int <API key>(struct pp_smumgr *smumgr) { struct tonga_smumgr *tonga_smu = (struct tonga_smumgr *)(smumgr->backend); uint16_t fw_to_load; struct SMU_DRAMData_TOC *toc; /** * First time this gets called during SmuMgr init, * we haven't processed SMU header file yet, * so Soft Register Start offset is unknown. * However, for this case, UcodeLoadStatus is already 0, * so we can skip this if the Soft Registers Start offset is 0. */ <API key>(smumgr->device, CGS_IND_REG__SMC, <API key>, 0); <API key>(smumgr, <API key>, tonga_smu->smu_buffer.mc_addr_high); <API key>(smumgr, <API key>, tonga_smu->smu_buffer.mc_addr_low); toc = (struct SMU_DRAMData_TOC *)tonga_smu->pHeader; toc->num_entries = 0; toc->structure_version = 1; PP_ASSERT_WITH_CODE( 0 == <API key>(smumgr, UCODE_ID_RLC_G, &toc->entry[toc->num_entries++]), "Failed to Get Firmware Entry.\n", return -1); PP_ASSERT_WITH_CODE( 0 == <API key>(smumgr, UCODE_ID_CP_CE, &toc->entry[toc->num_entries++]), "Failed to Get Firmware Entry.\n", return -1); PP_ASSERT_WITH_CODE( 0 == <API key> (smumgr, UCODE_ID_CP_PFP, &toc->entry[toc->num_entries++]), "Failed to Get Firmware Entry.\n", return -1); PP_ASSERT_WITH_CODE( 0 == <API key> (smumgr, UCODE_ID_CP_ME, &toc->entry[toc->num_entries++]), "Failed to Get Firmware Entry.\n", return -1); PP_ASSERT_WITH_CODE( 0 == <API key> (smumgr, UCODE_ID_CP_MEC, &toc->entry[toc->num_entries++]), "Failed to Get Firmware Entry.\n", return -1); PP_ASSERT_WITH_CODE( 0 == <API key> (smumgr, UCODE_ID_CP_MEC_JT1, &toc->entry[toc->num_entries++]), "Failed to Get Firmware Entry.\n", return -1); PP_ASSERT_WITH_CODE( 0 == <API key> (smumgr, UCODE_ID_CP_MEC_JT2, &toc->entry[toc->num_entries++]), "Failed to Get Firmware Entry.\n", return -1); PP_ASSERT_WITH_CODE( 0 == <API key> (smumgr, UCODE_ID_SDMA0, &toc->entry[toc->num_entries++]), "Failed to Get Firmware Entry.\n", return -1); PP_ASSERT_WITH_CODE( 0 == <API key> (smumgr, UCODE_ID_SDMA1, &toc->entry[toc->num_entries++]), "Failed to Get Firmware Entry.\n", return -1); <API key>(smumgr, <API key>, tonga_smu->header_buffer.mc_addr_high); <API key>(smumgr, <API key>, tonga_smu->header_buffer.mc_addr_low); fw_to_load = UCODE_ID_RLC_G_MASK + UCODE_ID_SDMA0_MASK + UCODE_ID_SDMA1_MASK + UCODE_ID_CP_CE_MASK + UCODE_ID_CP_ME_MASK + <API key> + <API key>; PP_ASSERT_WITH_CODE( 0 == <API key>( smumgr, <API key>, fw_to_load), "Fail to Request SMU Load uCode", return 0); return 0; } static int <API key>(struct pp_smumgr *smumgr, uint32_t firmwareType) { return 0; } /** * Upload the SMC firmware to the SMC microcontroller. * * @param smumgr the address of the powerplay hardware manager. * @param pFirmware the data structure containing the various sections of the firmware. */ static int <API key>(struct pp_smumgr *smumgr) { const uint8_t *src; uint32_t byte_count; uint32_t *data; struct cgs_firmware_info info = {0}; if (smumgr == NULL || smumgr->device == NULL) return -EINVAL; <API key>(smumgr->device, <API key>(UCODE_ID_SMU), &info); if (info.image_size & 3) { printk(KERN_ERR "[ powerplay ] SMC ucode is not 4 bytes aligned\n"); return -EINVAL; } if (info.image_size > TONGA_SMC_SIZE) { printk(KERN_ERR "[ powerplay ] SMC address is beyond the SMC RAM area\n"); return -EINVAL; } cgs_write_register(smumgr->device, mmSMC_IND_INDEX_0, 0x20000); SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, <API key>, 1); byte_count = info.image_size; src = (const uint8_t *)info.kptr; data = (uint32_t *)src; for (; byte_count >= 4; data++, byte_count -= 4) cgs_write_register(smumgr->device, mmSMC_IND_DATA_0, data[0]); SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, <API key>, 0); return 0; } static int <API key>(struct pp_smumgr *smumgr) { int result; /* Assert reset */ <API key>(smumgr->device, CGS_IND_REG__SMC, <API key>, rst_reg, 1); result = <API key>(smumgr); if (result) return result; /* Clear status */ <API key>(smumgr->device, CGS_IND_REG__SMC, ixSMU_STATUS, 0); /* Enable clock */ <API key>(smumgr->device, CGS_IND_REG__SMC, <API key>, ck_disable, 0); /* De-assert reset */ <API key>(smumgr->device, CGS_IND_REG__SMC, <API key>, rst_reg, 0); /* Set SMU Auto Start */ <API key>(smumgr->device, CGS_IND_REG__SMC, SMU_INPUT_DATA, AUTO_START, 1); /* Clear firmware interrupt enable flag */ <API key>(smumgr->device, CGS_IND_REG__SMC, ixFIRMWARE_FLAGS, 0); <API key>(smumgr, SMC_IND, RCU_UC_EVENTS, INTERRUPTS_ENABLED, 1); /** * Call Test SMU message with 0x20000 offset to trigger SMU start */ <API key>(smumgr); /* Wait for done bit to be set */ <API key>(smumgr, SMC_IND, SMU_STATUS, SMU_DONE, 0); /* Check pass/failed indicator */ if (1 != <API key>(smumgr->device, CGS_IND_REG__SMC, SMU_STATUS, SMU_PASS)) { printk(KERN_ERR "[ powerplay ] SMU Firmware start failed\n"); return -EINVAL; } /* Wait for firmware to initialize */ <API key>(smumgr, SMC_IND, FIRMWARE_FLAGS, INTERRUPTS_ENABLED, 1); return 0; } static int <API key>(struct pp_smumgr *smumgr) { int result = 0; /* wait for smc boot up */ <API key>(smumgr, SMC_IND, RCU_UC_EVENTS, boot_seq_done, 0); /*Clear firmware interrupt enable flag*/ <API key>(smumgr->device, CGS_IND_REG__SMC, ixFIRMWARE_FLAGS, 0); <API key>(smumgr->device, CGS_IND_REG__SMC, <API key>, rst_reg, 1); result = <API key>(smumgr); if (result != 0) return result; /* Set smc instruct start point at 0x0 */ <API key>(smumgr); <API key>(smumgr->device, CGS_IND_REG__SMC, <API key>, ck_disable, 0); /*De-assert reset*/ <API key>(smumgr->device, CGS_IND_REG__SMC, <API key>, rst_reg, 0); /* Wait for firmware to initialize */ <API key>(smumgr, SMC_IND, FIRMWARE_FLAGS, INTERRUPTS_ENABLED, 1); return result; } static int tonga_start_smu(struct pp_smumgr *smumgr) { int result; /* Only start SMC if SMC RAM is not running */ if (!<API key>(smumgr)) { /*Check if SMU is running in protected mode*/ if (0 == <API key>(smumgr->device, CGS_IND_REG__SMC, SMU_FIRMWARE, SMU_MODE)) { result = <API key>(smumgr); if (result) return result; } else { result = <API key>(smumgr); if (result) return result; } } result = <API key>(smumgr); return result; } /** * Write a 32bit value to the SMC SRAM space. * ALL PARAMETERS ARE IN HOST BYTE ORDER. * @param smumgr the address of the powerplay hardware manager. * @param smcAddress the address in the SMC RAM to access. * @param value to write to the SMC SRAM. */ static int tonga_smu_init(struct pp_smumgr *smumgr) { struct tonga_smumgr *tonga_smu; uint8_t *internal_buf; uint64_t mc_addr = 0; /* Allocate memory for backend private data */ tonga_smu = (struct tonga_smumgr *)(smumgr->backend); tonga_smu->header_buffer.data_size = ((sizeof(struct SMU_DRAMData_TOC) / 4096) + 1) * 4096; tonga_smu->smu_buffer.data_size = 200*4096; smu_allocate_memory(smumgr->device, tonga_smu->header_buffer.data_size, <API key>, PAGE_SIZE, &mc_addr, &tonga_smu->header_buffer.kaddr, &tonga_smu->header_buffer.handle); tonga_smu->pHeader = tonga_smu->header_buffer.kaddr; tonga_smu->header_buffer.mc_addr_high = smu_upper_32_bits(mc_addr); tonga_smu->header_buffer.mc_addr_low = smu_lower_32_bits(mc_addr); PP_ASSERT_WITH_CODE((NULL != tonga_smu->pHeader), "Out of memory.", kfree(smumgr->backend); cgs_free_gpu_mem(smumgr->device, (cgs_handle_t)tonga_smu->header_buffer.handle); return -1); smu_allocate_memory(smumgr->device, tonga_smu->smu_buffer.data_size, <API key>, PAGE_SIZE, &mc_addr, &tonga_smu->smu_buffer.kaddr, &tonga_smu->smu_buffer.handle); internal_buf = tonga_smu->smu_buffer.kaddr; tonga_smu->smu_buffer.mc_addr_high = smu_upper_32_bits(mc_addr); tonga_smu->smu_buffer.mc_addr_low = smu_lower_32_bits(mc_addr); PP_ASSERT_WITH_CODE((NULL != internal_buf), "Out of memory.", kfree(smumgr->backend); cgs_free_gpu_mem(smumgr->device, (cgs_handle_t)tonga_smu->smu_buffer.handle); return -1;); return 0; } static const struct pp_smumgr_func tonga_smu_funcs = { .smu_init = &tonga_smu_init, .smu_fini = &tonga_smu_fini, .start_smu = &tonga_start_smu, .<API key> = &<API key>, .request_smu_load_fw = &<API key>, .<API key> = &<API key>, .send_msg_to_smc = &<API key>, .<API key> = &<API key>, .<API key> = NULL, .<API key> = NULL, }; int tonga_smum_init(struct pp_smumgr *smumgr) { struct tonga_smumgr *tonga_smu = NULL; tonga_smu = kzalloc(sizeof(struct tonga_smumgr), GFP_KERNEL); if (tonga_smu == NULL) return -ENOMEM; smumgr->backend = tonga_smu; smumgr->smumgr_funcs = &tonga_smu_funcs; return 0; }
#ifndef GRUB_COMMAND_HEADER #define GRUB_COMMAND_HEADER 1 #include <grub/symbol.h> #include <grub/err.h> #include <grub/list.h> #include <grub/misc.h> typedef enum grub_command_flags { /* This is an extended command. */ <API key> = 0x10, /* This is an dynamic command. */ <API key> = 0x20, /* This command accepts block arguments. */ <API key> = 0x40, /* This command accepts unknown arguments as direct parameters. */ <API key> = 0x80, /* This command accepts only options preceding direct arguments. */ <API key> = 0x100, /* Can be executed in an entries extractor. */ <API key> = 0x200 } <API key>; struct grub_command; typedef grub_err_t (*grub_command_func_t) (struct grub_command *cmd, int argc, char **argv); #define <API key> 0xff #define <API key> 0x100 /* The command description. */ struct grub_command { /* The next element. */ struct grub_command *next; struct grub_command **prev; /* The name. */ const char *name; /* The priority. */ int prio; /* The callback function. */ grub_command_func_t func; /* The flags. */ <API key> flags; /* The summary of the command usage. */ const char *summary; /* The description of the command. */ const char *description; /* Arbitrary data. */ void *data; }; typedef struct grub_command *grub_command_t; extern grub_command_t EXPORT_VAR(grub_command_list); grub_command_t EXPORT_FUNC(<API key>) (const char *name, grub_command_func_t func, const char *summary, const char *description, int prio); void EXPORT_FUNC(<API key>) (grub_command_t cmd); static inline grub_command_t <API key> (const char *name, grub_command_func_t func, const char *summary, const char *description) { return <API key> (name, func, summary, description, 0); } static inline grub_command_t <API key> (const char *name, grub_command_func_t func, const char *summary, const char *description) { return <API key> (name, func, summary, description, 1); } static inline grub_command_t grub_command_find (const char *name) { return <API key> (GRUB_AS_NAMED_LIST (grub_command_list), name); } static inline grub_err_t <API key> (const char *name, int argc, char **argv) { grub_command_t cmd; cmd = grub_command_find (name); return (cmd) ? cmd->func (cmd, argc, argv) : <API key>; } #define FOR_COMMANDS(var) FOR_LIST_ELEMENTS((var), grub_command_list) #define FOR_COMMANDS_SAFE(var, next) <API key>((var), (next), grub_command_list) void <API key> (void); #endif /* ! GRUB_COMMAND_HEADER */
<!DOCTYPE html> <html> <head> <meta charset="UTF-8"> <title>Validate DateBox - jQuery EasyUI Demo</title> <link rel="stylesheet" type="text/css" href="../../themes/default/easyui.css"> <link rel="stylesheet" type="text/css" href="../../themes/icon.css"> <link rel="stylesheet" type="text/css" href="../demo.css"> <script type="text/javascript" src="../../jquery.min.js"></script> <script type="text/javascript" src="../../jquery.easyui.min.js"></script> </head> <body> <h2>Validate DateBox</h2> <div class="demo-info"> <div class="demo-tip icon-tip"></div> <div>When the selected date is greater than specified date. The field validator will raise an error.</div> </div> <div style="margin:10px 0;"></div> <input class="easyui-datebox" required data-options="validType:'md[\'10/11/2012\']'"></input> <script> $.extend($.fn.validatebox.defaults.rules, { md: { validator: function(value, param){ var d1 = $.fn.datebox.defaults.parser(param[0]); var d2 = $.fn.datebox.defaults.parser(value); return d2<=d1; }, message: 'The date must be less than or equals to {0}.' } }) </script> </body> </html>
# $Id: UTF7.pm,v 2.4 2006/06/03 20:28:48 dankogai Exp $ package Encode::Unicode::UTF7; use strict; use warnings; no warnings 'redefine'; use base qw(Encode::Encoding); __PACKAGE__->Define('UTF-7'); our $VERSION = do { my @r = ( q$Revision: 2.4 $ =~ /\d+/g ); sprintf "%d." . "%02d" x $#r, @r }; use MIME::Base64; use Encode; # Algorithms taken from Unicode::String by Gisle Aas our $<API key> = 1; my $specials = quotemeta "\'(),-./:?"; $<API key> and $specials .= quotemeta "!\" # \s will not work because it matches U+3000 DEOGRAPHIC SPACE # We use qr/[\n\r\t\ ] instead my $re_asis = qr/(?:[\n\r\t\ A-Za-z0-9$specials])/; my $re_encoded = qr/(?:[^\n\r\t\ A-Za-z0-9$specials])/; my $e_utf16 = find_encoding("UTF-16BE"); sub needs_lines { 1 } sub encode($$;$) { my ( $obj, $str, $chk ) = @_; my $len = length($str); pos($str) = 0; my $bytes = ''; while ( pos($str) < $len ) { if ( $str =~ /\G($re_asis+)/ogc ) { $bytes .= $1; } elsif ( $str =~ /\G($re_encoded+)/ogsc ) { if ( $1 eq "+" ) { $bytes .= "+-"; } else { my $s = $1; my $base64 = encode_base64( $e_utf16->encode($s), '' ); $base64 =~ s/=+$ $bytes .= "+$base64-"; } } else { die "This should not happen! (pos=" . pos($str) . ")"; } } $_[1] = '' if $chk; return $bytes; } sub decode($$;$) { my ( $obj, $bytes, $chk ) = @_; my $len = length($bytes); my $str = ""; no warnings 'uninitialized'; while ( pos($bytes) < $len ) { if ( $bytes =~ /\G([^+]+)/ogc ) { $str .= $1; } elsif ( $bytes =~ /\G\+-/ogc ) { $str .= "+"; } elsif ( $bytes =~ /\G\+([A-Za-z0-9+\/]+)-?/ogsc ) { my $base64 = $1; my $pad = length($base64) % 4; $base64 .= "=" x ( 4 - $pad ) if $pad; $str .= $e_utf16->decode( decode_base64($base64) ); } elsif ( $bytes =~ /\G\+/ogc ) { $^W and warn "Bad UTF7 data escape"; $str .= "+"; } else { die "This should not happen " . pos($bytes); } } $_[1] = '' if $chk; return $str; } 1; __END__ =head1 NAME Encode::Unicode::UTF7 -- UTF-7 encoding =head1 SYNOPSIS use Encode qw/encode decode/; $utf7 = encode("UTF-7", $utf8); $utf8 = decode("UTF-7", $ucs2); =head1 ABSTRACT This module implements UTF-7 encoding documented in RFC 2152. UTF-7, as its name suggests, is a 7-bit re-encoded version of UTF-16BE. It is designed to be MTA-safe and expected to be a standard way to exchange Unicoded mails via mails. But with the advent of UTF-8 and 8-bit compliant MTAs, UTF-7 is hardly ever used. UTF-7 was not supported by Encode until version 1.95 because of that. But Unicode::String, a module by Gisle Aas which adds Unicode supports to non-utf8-savvy perl did support UTF-7, the UTF-7 support was added so Encode can supersede Unicode::String 100%. =head1 In Practice When you want to encode Unicode for mails and web pages, however, do not use UTF-7 unless you are sure your recipients and readers can handle it. Very few MUAs and WWW Browsers support these days (only Mozilla seems to support one). For general cases, use UTF-8 for message body and MIME-Header for header instead. =head1 SEE ALSO L<Encode>, L<Encode::Unicode>, L<Unicode::String> RFC 2781 L<http: =cut
; lzo1y_s2.asm -- <API key> ; ; This file is part of the LZO real-time data compression library. ; ; Copyright (C) 2008 Markus Franz Xaver Johannes Oberhumer ; Copyright (C) 2007 Markus Franz Xaver Johannes Oberhumer ; Copyright (C) 2006 Markus Franz Xaver Johannes Oberhumer ; Copyright (C) 2005 Markus Franz Xaver Johannes Oberhumer ; Copyright (C) 2004 Markus Franz Xaver Johannes Oberhumer ; Copyright (C) 2003 Markus Franz Xaver Johannes Oberhumer ; Copyright (C) 2002 Markus Franz Xaver Johannes Oberhumer ; Copyright (C) 2001 Markus Franz Xaver Johannes Oberhumer ; Copyright (C) 2000 Markus Franz Xaver Johannes Oberhumer ; Copyright (C) 1999 Markus Franz Xaver Johannes Oberhumer ; Copyright (C) 1998 Markus Franz Xaver Johannes Oberhumer ; Copyright (C) 1997 Markus Franz Xaver Johannes Oberhumer ; Copyright (C) 1996 Markus Franz Xaver Johannes Oberhumer ; All Rights Reserved. ; ; The LZO library is free software; you can redistribute it and/or ; modify it under the terms of the GNU General Public License as ; published by the Free Software Foundation; either version 2 of ; the License, or (at your option) any later version. ; ; The LZO library is distributed in the hope that it will be useful, ; but WITHOUT ANY WARRANTY; without even the implied warranty of ; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ; GNU General Public License for more details. ; ; You should have received a copy of the GNU General Public License ; along with the LZO library; see the file COPYING. ; If not, write to the Free Software Foundation, Inc., ; 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. ; ; Markus F.X.J. Oberhumer ; <markus@oberhumer.com> ; http: ; ; include asminit.def public <API key> <API key>: db 85,87,86,83,81,82,131,236,12,252,139,116,36,40,139,124 db 36,48,189,3,0,0,0,141,70,253,3,68,36,44,137,68 db 36,4,137,248,139,84,36,52,3,2,137,4,36,49,192,49 db 219,172,60,17,118,87,44,17,60,4,115,92,141,20,7,57 db 20,36,15,130,130,2,0,0,141,20,6,57,84,36,4,15 db 130,110,2,0,0,137,193,235,110,5,255,0,0,0,141,84 db 6,18,57,84,36,4,15,130,87,2,0,0,138,30,70,8 db 219,116,230,141,68,24,18,235,31,141,180,38,0,0,0,0 db 57,116,36,4,15,130,57,2,0,0,138,6,70,60,16,115 db 127,8,192,116,215,131,192,3,141,84,7,0,57,20,36,15 db 130,37,2,0,0,141,84,6,0,57,84,36,4,15,130,16 db 2,0,0,137,193,193,232,2,33,233,139,22,131,198,4,137 db 23,131,199,4,72,117,243,243,164,138,6,70,60,16,115,64 db 141,87,3,57,20,36,15,130,238,1,0,0,193,232,2,138 db 30,141,151,255,251,255,255,141,4,152,70,41,194,59,84,36 db 48,15,130,218,1,0,0,138,2,136,7,138,66,1,136,71 db 1,138,66,2,136,71,2,1,239,233,163,0,0,0,137,246 db 60,64,114,68,137,193,193,232,2,141,87,255,33,232,138,30 db 193,233,4,141,4,152,70,41,194,73,57,232,115,76,233,181 db 0,0,0,5,255,0,0,0,141,86,3,57,84,36,4,15 db 130,126,1,0,0,138,30,70,8,219,116,231,141,76,24,33 db 49,192,235,20,141,116,38,0,60,32,15,130,200,0,0,0 db 131,224,31,116,224,141,72,2,102,139,6,141,87,255,193,232 db 2,131,198,2,41,194,57,232,114,110,59,84,36,48,15,130 db 77,1,0,0,141,4,15,57,4,36,15,130,58,1,0,0 db 137,203,193,235,2,116,17,139,2,131,194,4,137,7,131,199 db 4,75,117,243,33,233,116,9,138,2,66,136,7,71,73,117 db 247,138,70,254,33,232,15,132,196,254,255,255,141,20,7,57 db 20,36,15,130,2,1,0,0,141,20,6,57,84,36,4,15 db 130,238,0,0,0,138,14,70,136,15,71,72,117,247,138,6 db 70,233,42,255,255,255,137,246,59,84,36,48,15,130,223,0 db 0,0,141,68,15,0,57,4,36,15,130,203,0,0,0,135 db 214,243,164,137,214,235,170,129,193,255,0,0,0,141,86,3 db 57,84,36,4,15,130,169,0,0,0,138,30,70,8,219,116 db 230,141,76,11,9,235,21,144,60,16,114,44,137,193,131,224 db 8,193,224,13,131,225,7,116,225,131,193,2,102,139,6,131 db 198,2,141,151,0,192,255,255,193,232,2,116,57,41,194,233 db 38,255,255,255,141,116,38,0,141,87,2,57,20,36,114,106 db 193,232,2,138,30,141,87,255,141,4,152,70,41,194,59,84 db 36,48,114,93,138,2,136,7,138,90,1,136,95,1,131,199 db 2,233,43,255,255,255,131,249,3,15,149,192,59,60,36,119 db 57,139,84,36,40,3,84,36,44,57,214,119,38,114,29,43 db 124,36,48,139,84,36,52,137,58,247,216,131,196,12,90,89 db 91,94,95,93,195,184,1,0,0,0,235,227,184,8,0,0 db 0,235,220,184,4,0,0,0,235,213,184,5,0,0,0,235 db 206,184,6,0,0,0,235,199,144,141,180,38,0,0,0,0 end
// Type definitions for galleria.js v1.4.2 declare module GalleriaJS { interface GalleriaOptions { dataSource: GalleriaEntry[]; autoplay?: boolean; lightbox?: boolean; } interface GalleriaEntry { image?: string; thumbnail?: string; title?: string; description?: string; } interface GalleriaFactory { run(): GalleriaFactory; run(selector: String): GalleriaFactory; run(selector: String, options: GalleriaOptions): GalleriaFactory; loadTheme(url : String): GalleriaFactory; configure(options: GalleriaOptions): GalleriaFactory; ready( method: () => any): void; refreshImage(): GalleriaFactory; resize(): GalleriaFactory; load( data: GalleriaEntry[]): GalleriaFactory; setOptions( options: GalleriaOptions): GalleriaFactory; } } declare var Galleria: GalleriaJS.GalleriaFactory;
/** * Arabic translation (Syrian Localization, it may differ if you aren't from Syria or any Country in Middle East) * @author Tawfek Daghistani <tawfekov@gmail.com> * @version 2011-07-09 */ if (elFinder && elFinder.prototype && typeof(elFinder.prototype.i18) == 'object') { elFinder.prototype.i18.ar = { translator : 'Tawfek Daghistani &lt;tawfekov@gmail.com&gt;', language : 'العربية', direction : 'rtl', messages : { 'error' : 'خطأ', 'errUnknown' : 'خطأ غير معروف .', 'errUnknownCmd' : 'أمر غير معروف .', 'errJqui' : 'إعدادات jQuery UI غير كاملة الرجاء التأكد من وجود كل من selectable, draggable and droppable', 'errNode' : '. موجود DOM إلى عنصر elFinder تحتاج ', 'errURL' : 'إعدادات خاطئة , عليك وضع الرابط ضمن الإعدادات', 'errAccess' : 'وصول مرفوض .', 'errConnect' : 'غير قادر على الاتصال بالخادم الخلفي (backend)', 'errAbort' : 'تم فصل الإتصال', 'errTimeout' : 'مهلة الإتصال قد إنتهت .', 'errNotFound' : 'الخادم الخلفي غير موجود .', 'errResponse' : 'رد غير مقبول من الخادم الخلفي', 'errConf' : 'خطأ في الإعدادات الخاصة بالخادم الخلفي ', 'errJSON' : 'الميزة PHP JSON module غير موجودة ', 'errNoVolumes' : 'لا يمكن القراءة من أي من الوسائط الموجودة ', 'errCmdParams' : 'البيانات المرسلة للأمر غير مقبولة "$1".', 'errDataNotJSON' : 'المعلومات المرسلة ليست من نوع JSON ', 'errDataEmpty' : 'لا يوجد معلومات مرسلة', 'errCmdReq' : 'الخادم الخلفي يطلب وجود اسم الأمر ', 'errOpen' : 'غير قادر على فتح "$1".', 'errNotFolder' : 'العنصر المختار ليس مجلد', 'errNotFile' : 'العنصر المختار ليس ملف', 'errRead' : 'غير قادر على القراءة "$1".', 'errWrite' : 'غير قادر على الكتابة "$1".', 'errPerm' : 'وصول مرفوض ', 'errLocked' : ' محمي و لا يمكن التعديل أو النقل أو إعادة التسمية"$1"', 'errExists' : ' موجود مسبقاً "$1"', 'errInvName' : 'الاسم مرفوض', 'errFolderNotFound' : 'المجلد غير موجود', 'errFileNotFound' : 'الملف غير موجود', '<API key>' : 'الملف الهدف "$1" غير موجود ', 'errPopup' : 'يمنعني المتصفح من إنشاء نافذة منبثقة , الرجاء تعديل الخيارات الخاصة من إعدادات المتصفح ', 'errMkdir' : ' غير قادر على إنشاء مجلد جديد "$1".', 'errMkfile' : ' غير قادر على إنشاء ملف جديد"$1".', 'errRename' : 'غير قادر على إعادة تسمية ال "$1".', 'errCopyFrom' : 'نسخ الملفات من الوسط المحدد "$1"غير مسموح.', 'errCopyTo' : 'نسخ الملفات إلى الوسط المحدد "$1" غير مسموح .', 'errUploadCommon' : 'خطأ أثناء عملية الرفع', 'errUpload' : 'غير قادر على رفع "$1".', 'errUploadNoFiles' : 'لم يتم رفع أي ملف ', 'errMaxSize' : 'حجم البيانات أكبر من الحجم المسموح به ', 'errFileMaxSize' : 'حجم الملف أكبر من الحجم المسموح به', 'errUploadMime' : 'نوع ملف غير مسموح ', 'errUploadTransfer' : '"$1" خطأ أثناء عملية النقل', 'errSave' : 'غير قادر على الحفظ في "$1".', 'errCopy' : 'غير قادر على النسخ إلى"$1".', 'errMove' : 'غير قادر على القص إلى "$1".', 'errCopyInItself' : 'غير قادر على نسخ الملف "$1" ضمن الملف نفسه.', 'errRm' : 'غير قادر على الحذف "$1".', 'errExtract' : 'غير قادر على استخراج الملفات من "$1".', 'errArchive' : 'غير قادر على إنشاء ملف مضغوط', 'errArcType' : 'نوع الملف المضغوط غير مدعومة', 'errNoArchive' : 'هذا الملف ليس ملف مضغوط أو ذو صسغة غير مدعومة ', 'errCmdNoSupport' : 'الخادم الخلفي لا يدعم هذا الأمر ', 'errReplByChild' : 'The folder “$1” can’t be replaced by an item it contains.', 'errArcSymlinks' : 'For security reason denied to unpack archives contains symlinks.', 'errArcMaxSize' : 'Archive files exceeds maximum allowed size.', 'cmdarchive' : 'أنشئ مجلد مضغوط', 'cmdback' : 'الخلف', 'cmdcopy' : 'نسخ', 'cmdcut' : 'قص', 'cmddownload' : 'تحميل', 'cmdduplicate' : 'تكرار', 'cmdedit' : 'تعديل الملف', 'cmdextract' : 'استخراج الملفات', 'cmdforward' : 'الأمام', 'cmdgetfile' : 'أختيار الملفات', 'cmdhelp' : 'عن هذا المشروع', 'cmdhome' : 'المجلد الرئيسي', 'cmdinfo' : 'معلومات ', 'cmdmkdir' : 'مجلد جديد', 'cmdmkfile' : 'ملف نصي جديد', 'cmdopen' : 'فتح', 'cmdpaste' : 'لصق', 'cmdquicklook' : 'معاينة', 'cmdreload' : 'إعادة تحميل', 'cmdrename' : 'إعادة تسمية', 'cmdrm' : 'حذف', 'cmdsearch' : 'بحث عن ملفات', 'cmdup' : 'تغيير المسار إلى مستوى أعلى', 'cmdupload' : 'رفع ملفات', 'cmdview' : 'عرض', 'btnClose' : 'إغلاق', 'btnSave' : 'حفظ', 'btnRm' : 'إزالة', 'btnCancel' : 'إلغاء', 'btnNo' : 'لا', 'btnYes' : 'نعم', 'ntfopen' : 'فتح مجلد', 'ntffile' : 'فتح ملف', 'ntfreload' : 'إعادة عرض محتويات المجلد ', 'ntfmkdir' : 'ينشئ المجلدات', 'ntfmkfile' : 'ينشئ الملفات', 'ntfrm' : 'حذف الملفات', 'ntfcopy' : 'نسخ الملفات', 'ntfmove' : 'نقل الملفات', 'ntfprepare' : 'تحضير لنسخ الملفات', 'ntfrename' : 'إعادة تسمية الملفات', 'ntfupload' : 'رفع الملفات', 'ntfdownload' : 'تحميل الملفات', 'ntfsave' : 'حفظ الملفات', 'ntfarchive' : 'ينشئ ملف مضغوط', 'ntfextract' : 'استخراج ملفات من الملف المضغوط ', 'ntfsearch' : 'يبحث عن ملفات', 'ntfsmth' : 'يحضر لشيء ما >_<', 'dateUnknown' : 'غير معلوم', 'Today' : 'اليوم', 'Yesterday' : 'البارحة', 'Jan' : 'كانون الثاني', 'Feb' : 'شباط', 'Mar' : 'آذار', 'Apr' : 'نيسان', 'May' : 'أيار', 'Jun' : 'حزيران', 'Jul' : 'تموز', 'Aug' : 'آب', 'Sep' : 'أيلول', 'Oct' : 'تشرين الأول', 'Nov' : 'تشرين الثاني', 'Dec' : 'كانون الأول ', 'confirmReq' : 'يرجى التأكيد', 'confirmRm' : 'هل انت متأكد من انك تريد الحذف<br/>لا يمكن التراجع عن هذه العملية ', 'confirmRepl' : 'استبدال الملف القديم بملف جديد ؟', 'apllyAll' : 'تطبيق على الكل', 'name' : 'الأسم', 'size' : 'الحجم', 'perms' : 'الصلاحيات', 'modify' : 'أخر تعديل', 'kind' : 'نوع الملف', 'read' : 'قراءة', 'write' : 'كتابة', 'noaccess' : 'وصول ممنوع', 'and' : 'و', 'unknown' : 'غير معروف', 'selectall' : 'تحديد كل الملفات', 'selectfiles' : 'تحديد ملفات', 'selectffile' : 'تحديد الملف الاول', 'selectlfile' : 'تحديد الملف الأخير', 'viewlist' : 'اعرض ك قائمة', 'viewicons' : 'اعرض ك ايقونات', 'places' : 'المواقع', 'calc' : 'حساب', 'path' : 'مسار', 'aliasfor' : 'Alias for', 'locked' : 'مقفول', 'dim' : 'الابعاد', 'files' : 'ملفات', 'folders' : 'مجلدات', 'items' : 'عناصر', 'yes' : 'نعم', 'no' : 'لا', 'link' : 'اربتاط', 'searcresult' : 'نتائج البحث', 'selected' : 'العناصر المحددة', 'about' : 'عن البرنامج', 'shortcuts' : 'الاختصارات', 'help' : 'مساعدة', 'webfm' : 'مدير ملفات الويب', 'ver' : 'رقم الإصدار', 'protocol' : 'اصدار البرتوكول', 'homepage' : 'الصفحة الرئيسية', 'docs' : 'التعليمات', 'github' : 'شاركنا بتطوير المشروع على Github', 'twitter' : 'تابعنا على تويتر', 'facebook' : 'انضم إلينا على الفيس بوك', 'team' : 'الفريق', 'chiefdev' : 'رئيس المبرمجين', 'developer' : 'مبرمح', 'contributor' : 'مبرمح', 'maintainer' : 'مشارك', 'translator' : 'مترجم', 'icons' : 'أيقونات', 'dontforget' : 'and don\'t forget to take your towel', 'shortcutsof' : 'الاختصارات غير مفعلة', 'dropFiles' : 'لصق الملفات هنا', 'or' : 'أو', 'selectForUpload' : 'اختر الملفات التي تريد رفعها', 'moveFiles' : 'قص الملفات', 'copyFiles' : 'نسخ الملفات', 'rmFromPlaces' : 'Remove from places', 'untitled folder' : 'untitled folder', 'untitled file.txt' : 'untitled file.txt', 'kindUnknown' : 'غير معروف', 'kindFolder' : 'مجلد', 'kindAlias' : 'اختصار', 'kindAliasBroken' : 'اختصار غير صالح', // applications 'kindApp' : 'برنامج', 'kindPostscript' : 'Postscript ملف', 'kindMsOffice' : 'Microsoft Office ملف', 'kindMsWord' : 'Microsoft Word ملف', 'kindMsExcel' : 'Microsoft Excel ملف', 'kindMsPP' : 'Microsoft Powerpoint عرض تقديمي ', 'kindOO' : 'Open Office ملف', 'kindAppFlash' : 'تطبيق فلاش', 'kindPDF' : 'ملف (PDF)', 'kindTorrent' : 'Bittorrent ملف', 'kind7z' : '7z ملف', 'kindTAR' : 'TAR ملف', 'kindGZIP' : 'GZIP ملف', 'kindBZIP' : 'BZIP ملف', 'kindZIP' : 'ZIP ملف', 'kindRAR' : 'RAR ملف', 'kindJAR' : 'Java JAR ملف', 'kindTTF' : 'True Type خط ', 'kindOTF' : 'Open Type خط ', 'kindRPM' : 'RPM ملف تنصيب', // texts 'kindText' : 'Text ملف', 'kindTextPlain' : 'مستند نصي', 'kindPHP' : 'PHP ملف نصي برمجي لـ', 'kindCSS' : 'Cascading style sheet', 'kindHTML' : 'HTML ملف', 'kindJS' : 'Javascript ملف نصي برمجي لـ', 'kindRTF' : 'Rich Text Format', 'kindC' : 'C ملف نصي برمجي لـ', 'kindCHeader' : 'C header ملف نصي برمجي لـ', 'kindCPP' : 'C++ ملف نصي برمجي لـ', 'kindCPPHeader' : 'C++ header ملف نصي برمجي لـ', 'kindShell' : 'Unix shell script', 'kindPython' : 'Python ملف نصي برمجي لـ', 'kindJava' : 'Java ملف نصي برمجي لـ', 'kindRuby' : 'Ruby ملف نصي برمجي لـ', 'kindPerl' : 'Perl script', 'kindSQL' : 'SQL ملف نصي برمجي لـ', 'kindXML' : 'XML ملف', 'kindAWK' : 'AWK ملف نصي برمجي لـ', 'kindCSV' : 'ملف CSV', 'kindDOCBOOK' : 'Docbook XML ملف', // images 'kindصورة' : 'صورة', 'kindBMP' : 'BMP صورة', 'kindJPEG' : 'JPEG صورة', 'kindGIF' : 'GIF صورة', 'kindPNG' : 'PNG صورة', 'kindTIFF' : 'TIFF صورة', 'kindTGA' : 'TGA صورة', 'kindPSD' : 'Adobe Photoshop صورة', 'kindXBITMAP' : 'X bitmap صورة', 'kindPXM' : 'Pixelmator صورة', // media 'kindAudio' : 'ملف صوتي', 'kindAudioMPEG' : 'MPEG ملف صوتي', 'kindAudioMPEG4' : 'MPEG-4 ملف صوتي', 'kindAudioMIDI' : 'MIDI ملف صوتي', 'kindAudioOGG' : 'Ogg Vorbis ملف صوتي', 'kindAudioWAV' : 'WAV ملف صوتي', 'AudioPlaylist' : 'MP3 قائمة تشغيل', 'kindVideo' : 'ملف فيديو', 'kindVideoDV' : 'DV ملف فيديو', 'kindVideoMPEG' : 'MPEG ملف فيديو', 'kindVideoMPEG4' : 'MPEG-4 ملف فيديو', 'kindVideoAVI' : 'AVI ملف فيديو', 'kindVideoMOV' : 'Quick Time ملف فيديو', 'kindVideoWM' : 'Windows Media ملف فيديو', 'kindVideoFlash' : 'Flash ملف فيديو', 'kindVideoMKV' : 'Matroska ملف فيديو', 'kindVideoOGG' : 'Ogg ملف فيديو' } } }
using System.Collections.Generic; using System.Linq; using System.Runtime.Serialization; using System.Text; using System.Threading.Tasks; namespace Umbraco.Web.Models.ContentEditing { [DataContract(Name = "relation", Namespace = "")] public class Relation { public Relation() { RelationType = new RelationType(); } <summary> Gets or sets the Parent Id of the Relation (Source) </summary> [DataMember(Name = "parentId")] public int ParentId { get; set; } <summary> Gets or sets the Child Id of the Relation (Destination) </summary> [DataMember(Name = "childId")] public int ChildId { get; set; } <summary> Gets or sets the <see cref="RelationType"/> for the Relation </summary> [DataMember(Name = "relationType", IsRequired = true)] public RelationType RelationType { get; set; } <summary> Gets or sets a comment for the Relation </summary> [DataMember(Name = "comment")] public string Comment { get; set; } } }
#ifndef VMS_OPTS_H #define VMS_OPTS_H enum vms_pointer_size { <API key>, VMS_POINTER_SIZE_32, VMS_POINTER_SIZE_64 }; #endif
/* * multibytecodec.h: Common Multibyte Codec Implementation * * Written by Hye-Shik Chang <perky@FreeBSD.org> */ #ifndef <API key> #define <API key> #ifdef __cplusplus extern "C" { #endif #ifdef uint16_t typedef uint16_t ucs2_t, DBCHAR; #else typedef unsigned short ucs2_t, DBCHAR; #endif typedef union { void *p; int i; unsigned char c[8]; ucs2_t u2[4]; Py_UCS4 u4[2]; } <API key>; typedef int (*mbcodec_init)(const void *config); typedef Py_ssize_t (*mbencode_func)(<API key> *state, const void *config, int kind, void *data, Py_ssize_t *inpos, Py_ssize_t inlen, unsigned char **outbuf, Py_ssize_t outleft, int flags); typedef int (*mbencodeinit_func)(<API key> *state, const void *config); typedef Py_ssize_t (*mbencodereset_func)(<API key> *state, const void *config, unsigned char **outbuf, Py_ssize_t outleft); typedef Py_ssize_t (*mbdecode_func)(<API key> *state, const void *config, const unsigned char **inbuf, Py_ssize_t inleft, _PyUnicodeWriter *writer); typedef int (*mbdecodeinit_func)(<API key> *state, const void *config); typedef Py_ssize_t (*mbdecodereset_func)(<API key> *state, const void *config); typedef struct { const char *encoding; const void *config; mbcodec_init codecinit; mbencode_func encode; mbencodeinit_func encinit; mbencodereset_func encreset; mbdecode_func decode; mbdecodeinit_func decinit; mbdecodereset_func decreset; } MultibyteCodec; typedef struct { PyObject_HEAD MultibyteCodec *codec; } <API key>; #define <API key>(op) ((op)->ob_type == &MultibyteCodec_Type) #define <API key> \ PyObject_HEAD \ MultibyteCodec *codec; \ <API key> state; \ PyObject *errors; typedef struct { <API key> } <API key>; #define MAXENCPENDING 2 #define <API key> \ <API key> \ PyObject *pending; typedef struct { <API key> } <API key>; #define MAXDECPENDING 8 #define <API key> \ <API key> \ unsigned char pending[MAXDECPENDING]; \ Py_ssize_t pendingsize; typedef struct { <API key> } <API key>; typedef struct { <API key> } <API key>; typedef struct { <API key> } <API key>; typedef struct { <API key> PyObject *stream; } <API key>; typedef struct { <API key> PyObject *stream; } <API key>; #define MBERR_TOOSMALL (-1) /* insufficient output buffer space */ #define MBERR_TOOFEW (-2) /* incomplete input buffer */ #define MBERR_INTERNAL (-3) /* internal runtime error */ #define MBERR_EXCEPTION (-4) /* an exception has been raised */ #define ERROR_STRICT (PyObject *)(1) #define ERROR_IGNORE (PyObject *)(2) #define ERROR_REPLACE (PyObject *)(3) #define ERROR_ISCUSTOM(p) ((p) < ERROR_STRICT || ERROR_REPLACE < (p)) #define ERROR_DECREF(p) \ do { \ if (p != NULL && ERROR_ISCUSTOM(p)) \ Py_DECREF(p); \ } while (0); #define MBENC_FLUSH 0x0001 /* encode all characters encodable */ #define MBENC_MAX MBENC_FLUSH #define <API key> "multibytecodec.__map_*" #ifdef __cplusplus } #endif #endif
<!DOCTYPE html PUBLIC "- <html xmlns="http: <head> <meta http-equiv="Content-Type" content="text/html; charset=utf-8" /> <title>Upgrading from 1.7.2 to 2.0.0 : CodeIgniter User Guide</title> <style type='text/css' media='all'>@import url('../userguide.css');</style> <link rel='stylesheet' type='text/css' media='all' href='../userguide.css' /> <script type="text/javascript" src="../nav/nav.js"></script> <script type="text/javascript" src="../nav/prototype.lite.js"></script> <script type="text/javascript" src="../nav/moo.fx.js"></script> <script type="text/javascript" src="../nav/user_guide_menu.js"></script> <meta http-equiv='expires' content='-1' /> <meta http-equiv= 'pragma' content='no-cache' /> <meta name='robots' content='all' /> <meta name='author' content='ExpressionEngine Dev Team' /> <meta name='description' content='CodeIgniter User Guide' /> </head> <body> <!-- START NAVIGATION --> <div id="nav"><div id="nav_inner"><script type="text/javascript">create_menu('../');</script></div></div> <div id="nav2"><a name="top"></a><a href="javascript:void(0);" onclick="myHeight.toggle();"><img src="../images/nav_toggle_darker.jpg" width="154" height="43" border="0" title="Toggle Table of Contents" alt="Toggle Table of Contents" /></a></div> <div id="masthead"> <table cellpadding="0" cellspacing="0" border="0" style="width:100%"> <tr> <td><h1>CodeIgniter User Guide Version 2.2.1</h1></td> <td id="breadcrumb_right"><a href="../toc.html">Table of Contents Page</a></td> </tr> </table> </div> <!-- END NAVIGATION --> <!-- START BREADCRUMB --> <table cellpadding="0" cellspacing="0" border="0" style="width:100%"> <tr> <td id="breadcrumb"> <a href="http://codeigniter.com/">CodeIgniter Home</a> &nbsp;&#8250;&nbsp; <a href="../index.html">User Guide Home</a> &nbsp;&#8250;&nbsp; Upgrading from 1.7.2 to 2.0.0 </td> <td id="searchbox"><form method="get" action="http: </tr> </table> <!-- END BREADCRUMB --> <br clear="all" /> <!-- START CONTENT --> <div id="content"> <h1>Upgrading from 1.7.2 to 2.0.0</h1> <p>Before performing an update you should take your site offline by replacing the index.php file with a static one.</p> <h2>Step 1: Update your CodeIgniter files</h2> <p>Replace all files and directories in your "system" folder <strong>except</strong> your <kbd>application</kbd> folder.</p> <p class="important"><strong>Note:</strong> If you have any custom developed files in these folders please make copies of them first.</p> <h2>Step 2: Adjust get_dir_file_info() where necessary</h2> <p>Version 2.0.0 brings a non-backwards compatible change to <kbd>get_dir_file_info()</kbd> in the <a href="../helpers/file_helper.html">File Helper</a>. Non-backwards compatible changes are extremely rare in CodeIgniter, but this one we feel was warranted due to how easy it was to create serious server performance issues. If you <em>need</em> recursiveness where you are using this helper function, change such instances, setting the second parameter, <kbd>$top_level_only</kbd> to FALSE:</p> <code>get_dir_file_info('/path/to/directory', <kbd>FALSE</kbd>);</code> </p> <h2>Step 3: Convert your Plugins to Helpers</h2> <p>2.0.0 gets rid of the "Plugin" system as their functionality was identical to Helpers, but non-extensible. You will need to rename your plugin files from <var>filename_pi.php</var> to <var>filename_helper.php</var>, move them to your <kbd>helpers</kbd> folder, and change all instances of: <code>$this->load->plugin('foo');</code> to <code>$this->load->helper('foo');</code> </p> <h2>Step 4: Update stored encrypted data</h2> <p class="important"><strong>Note:</strong> If your application does not use the Encryption library, does not store Encrypted data permanently, or is on an environment that does not support Mcrypt, you may skip this step.</p> <p>The Encryption library has had a number of improvements, some for encryption strength and some for performance, that has an unavoidable consequence of making it no longer possible to decode encrypted data produced by the original version of this library. To help with the transition, a new method has been added, <kbd>encode_from_legacy()</kbd> that will decode the data with the original algorithm and return a re-encoded string using the improved methods. This will enable you to easily replace stale encrypted data with fresh in your applications, either on the fly or en masse.</p> <p>Please read <a href="../libraries/encryption.html#legacy">how to use this method</a> in the Encryption library documentation.</p> <h2>Step 5: Remove loading calls for the compatibility helper.</h2> <p>The compatibility helper has been removed from the CodeIgniter core. All methods in it should be natively available in supported PHP versions.</p> <h2>Step 6: Update Class extension</h2> <p>All core classes are now prefixed with <kbd>CI_</kbd>. Update Models and Controllers to extend CI_Model and CI_Controller, respectively.</p> <h2>Step 7: Update Parent Constructor calls</h2> <p>All native CodeIgniter classes now use the PHP 5 <kbd>__construct()</kbd> convention. Please update extended libraries to call <kbd>parent::__construct()</kbd>.</p> <h2>Step 8: Update your user guide</h2> <p>Please replace your local copy of the user guide with the new version, including the image files.</p> </div> <!-- END CONTENT --> <div id="footer"> <p> Previous Topic:&nbsp;&nbsp;<a href="index.html">Installation Instructions</a> &nbsp;&nbsp;&nbsp;&middot;&nbsp;&nbsp; <a href="#top">Top of Page</a>&nbsp;&nbsp;&nbsp;&middot;&nbsp;&nbsp; <a href="../index.html">User Guide Home</a>&nbsp;&nbsp;&nbsp;&middot;&nbsp;&nbsp; Next Topic:&nbsp;&nbsp;<a href="troubleshooting.html">Troubleshooting</a> </p> <p><a href="http://codeigniter.com">CodeIgniter</a> &nbsp;&middot;&nbsp; Copyright & </div> </body> </html>
[![npm][npm]][npm-url] [![node][node]][node-url] [![deps][deps]][deps-url] [![tests][tests]][tests-url] [![coverage][cover]][cover-url] [![chat][chat]][chat-url] <div align="center"> <a href="https://github.com/webpack/webpack"> <img width="200" height="200" src="https://cdn.rawgit.com/webpack/media/e7485eb2/logo/icon.svg"> </a> <h1>Url Loader</h1> </div> <h2 align="center">Install</h2> bash npm install --save-dev url-loader <h2 align="center">Usage</h2> [Documentation: Using loaders](http://webpack.github.io/docs/using-loaders.html) The `url` loader works like the `file` loader, but can return a Data Url if the file is smaller than a byte limit. The limit can be specified with a query parameter. (Defaults to no limit) If the file is greater than the limit (in bytes) the [`file-loader`](https://github.com/webpack/file-loader) is used and all query parameters are passed to it. javascript require("url-loader?limit=10000!./file.png"); // => DataUrl if "file.png" is smaller than 10kb require("url-loader?mimetype=image/png!./file.png"); // => Specify mimetype for the file (Otherwise it's inferred from extension.) require("url-loader?prefix=img/!./file.png"); // => Parameters for the file-loader are valid too // They are passed to the file-loader if used. <h2 align="center">Contributing</h2> Don't hesitate to create a pull request. Every contribution is appreciated. In development you can start the tests by calling `npm test`. <h2 align="center">Maintainers</h2> <table> <tbody> <tr> <td align="center"> <img width="150" height="150" src="https://avatars3.githubusercontent.com/u/166921?v=3&s=150"> </br> <a href="https: </td> <td align="center"> <img width="150" height="150" src="https://avatars2.githubusercontent.com/u/8420490?v=3&s=150"> </br> <a href="https://github.com/d3viant0ne">Joshua Wiens</a> </td> <td align="center"> <img width="150" height="150" src="https://avatars3.githubusercontent.com/u/533616?v=3&s=150"> </br> <a href="https://github.com/SpaceK33z">Kees Kluskens</a> </td> <td align="center"> <img width="150" height="150" src="https://avatars3.githubusercontent.com/u/3408176?v=3&s=150"> </br> <a href="https://github.com/TheLarkInn">Sean Larkin</a> </td> </tr> <tbody> </table> [npm]: https://img.shields.io/npm/v/url-loader.svg [npm-url]: https://npmjs.com/package/url-loader [node]: https://img.shields.io/node/v/url-loader.svg [node-url]: https://nodejs.org [deps]: https://david-dm.org/webpack/url-loader.svg [deps-url]: https://david-dm.org/webpack/url-loader [tests]: http://img.shields.io/travis/webpack/url-loader.svg [tests-url]: https://travis-ci.org/webpack/url-loader [cover]: https://coveralls.io/repos/github/webpack/url-loader/badge.svg [cover-url]: https://coveralls.io/github/webpack/url-loader [chat]: https://badges.gitter.im/webpack/webpack.svg [chat-url]: https://gitter.im/webpack/webpack
#include "sync/internal_api/public/test/fake_sync_manager.h" #include <cstddef> #include "base/bind.h" #include "base/bind_helpers.h" #include "base/location.h" #include "base/logging.h" #include "base/run_loop.h" #include "base/<API key>.h" #include "base/<API key>.h" #include "base/<API key>.h" #include "sync/internal_api/public/<API key>.h" #include "sync/internal_api/public/<API key>.h" #include "sync/internal_api/public/util/weak_handle.h" #include "sync/syncable/directory.h" #include "sync/test/<API key>.h" class GURL; namespace syncer { FakeSyncManager::FakeSyncManager(ModelTypeSet <API key>, ModelTypeSet <API key>, ModelTypeSet <API key>) : <API key>(<API key>), <API key>(<API key>), <API key>(<API key>), <API key>(<API key>) { <API key>.reset(new <API key>()); } FakeSyncManager::~FakeSyncManager() {} ModelTypeSet FakeSyncManager::<API key>() { ModelTypeSet cleaned_types = cleaned_types_; cleaned_types_.Clear(); return cleaned_types; } ModelTypeSet FakeSyncManager::<API key>() { ModelTypeSet downloaded_types = downloaded_types_; downloaded_types_.Clear(); return downloaded_types; } ModelTypeSet FakeSyncManager::<API key>() { ModelTypeSet enabled_types = enabled_types_; enabled_types_.Clear(); return enabled_types; } ConfigureReason FakeSyncManager::<API key>() { ConfigureReason reason = <API key>; <API key> = <API key>; return reason; } void FakeSyncManager::WaitForSyncThread() { // Post a task to |sync_task_runner_| and block until it runs. base::RunLoop run_loop; if (!sync_task_runner_->PostTaskAndReply( FROM_HERE, base::Bind(&base::DoNothing), run_loop.QuitClosure())) { NOTREACHED(); } run_loop.Run(); } void FakeSyncManager::Init(InitArgs* args) { sync_task_runner_ = base::<API key>::Get(); <API key>(); test_user_share_.SetUp(); UserShare* share = test_user_share_.user_share(); for (ModelTypeSet::Iterator it = <API key>.First(); it.Good(); it.Inc()) { TestUserShare::CreateRoot(it.Get(), share); } FOR_EACH_OBSERVER(SyncManager::Observer, observers_, <API key>( WeakHandle<JsBackend>(), WeakHandle<<API key>>(), true, <API key>)); } ModelTypeSet FakeSyncManager::<API key>() { return <API key>; } ModelTypeSet FakeSyncManager::<API key>( ModelTypeSet types) { ModelTypeSet empty_types = types; empty_types.RemoveAll(<API key>); return empty_types; } bool FakeSyncManager::<API key>() { ModelTypeSet partial_types; for (ModelTypeSet::Iterator i = <API key>.First(); i.Good(); i.Inc()) { if (!<API key>.Has(i.Get())) partial_types.Put(i.Get()); } <API key>.RemoveAll(partial_types); cleaned_types_.PutAll(partial_types); return true; } void FakeSyncManager::UpdateCredentials(const SyncCredentials& credentials) { NOTIMPLEMENTED(); } void FakeSyncManager::<API key>( const <API key>& routing_info) { // Do nothing. } void FakeSyncManager::ConfigureSyncer( ConfigureReason reason, ModelTypeSet to_download, ModelTypeSet to_purge, ModelTypeSet to_journal, ModelTypeSet to_unapply, const <API key>& new_routing_info, const base::Closure& ready_task, const base::Closure& retry_task) { <API key> = reason; enabled_types_ = GetRoutingInfoTypes(new_routing_info); ModelTypeSet success_types = to_download; success_types.RemoveAll(<API key>); DVLOG(1) << "Faking configuration. Downloading: " << <API key>(success_types) << ". Cleaning: " << <API key>(to_purge); // Update our fake directory by clearing and fake-downloading as necessary. UserShare* share = GetUserShare(); share->directory-><API key>(to_purge, to_journal, to_unapply); for (ModelTypeSet::Iterator it = success_types.First(); it.Good(); it.Inc()) { // We must be careful to not create the same root node twice. if (!<API key>.Has(it.Get())) { TestUserShare::CreateRoot(it.Get(), share); } } // Simulate cleaning up disabled types. // TODO(sync): consider only cleaning those types that were recently disabled, // if this isn't the first cleanup, which more accurately reflects the // behavior of the real cleanup logic. <API key>.RemoveAll(to_purge); <API key>.RemoveAll(to_purge); cleaned_types_.PutAll(to_purge); // Now simulate the actual configuration for those types that successfully // download + apply. <API key>.PutAll(success_types); <API key>.PutAll(success_types); downloaded_types_.PutAll(success_types); ready_task.Run(); } void FakeSyncManager::AddObserver(Observer* observer) { observers_.AddObserver(observer); } void FakeSyncManager::RemoveObserver(Observer* observer) { observers_.RemoveObserver(observer); } SyncStatus FakeSyncManager::GetDetailedStatus() const { NOTIMPLEMENTED(); return SyncStatus(); } void FakeSyncManager::SaveChanges() { // Do nothing. } void FakeSyncManager::<API key>(ShutdownReason reason) { DCHECK(sync_task_runner_-><API key>()); test_user_share_.TearDown(); } UserShare* FakeSyncManager::GetUserShare() { return test_user_share_.user_share(); } syncer::SyncContextProxy* FakeSyncManager::GetSyncContextProxy() { return &<API key>; } const std::string FakeSyncManager::cache_guid() { return test_user_share_.user_share()->directory->cache_guid(); } bool FakeSyncManager::ReceivedExperiment(Experiments* experiments) { return false; } bool FakeSyncManager::HasUnsyncedItems() { NOTIMPLEMENTED(); return false; } <API key>* FakeSyncManager::<API key>() { return <API key>.get(); } ScopedVector<syncer::ProtocolEvent> FakeSyncManager::<API key>() { return ScopedVector<syncer::ProtocolEvent>(); } scoped_ptr<base::ListValue> FakeSyncManager::GetAllNodesForType( syncer::ModelType type) { return scoped_ptr<base::ListValue>(new base::ListValue()); } void FakeSyncManager::RefreshTypes(ModelTypeSet types) { <API key> = types; } void FakeSyncManager::<API key>( syncer::<API key>* observer) {} void FakeSyncManager::<API key>( syncer::<API key>* observer) {} bool FakeSyncManager::<API key>( syncer::<API key>* observer) { return false; } void FakeSyncManager::<API key>() {} void FakeSyncManager::<API key>( syncer::ModelType type, scoped_ptr<<API key>> invalidation) { // Do nothing. } ModelTypeSet FakeSyncManager::<API key>() { return <API key>; } void FakeSyncManager::<API key>(bool invalidator_enabled) { // Do nothing. } } // namespace syncer
#!/bin/sh # This script generates a two roots - one legacy one signed with MD5, and # another (newer) one signed with SHA1 - and has a leaf certificate signed # by these without any distinguishers. # The "cross-signed" comes from the fact that both the MD5 and SHA1 roots share # the same Authority Key ID, Subject Key ID, Subject, and Subject Public Key # Info. When the chain building algorithm is evaluating paths, if it prefers # untrusted over trusted, then it will see the MD5 certificate as a self-signed # cert that is "cross-signed" by the trusted SHA1 root. # The SHA1 root should be (temporarily) trusted, and the resulting chain # should be leaf -> SHA1root, not leaf -> MD5root, leaf -> SHA1root -> MD5root, # or leaf -> MD5root -> SHA1root try() { echo "$@" $@ || exit 1 } try rm -rf out try mkdir out try echo 1 > out/<API key> try echo 2 > out/<API key> touch out/<API key>.txt touch out/2048-md5-root-index.txt # Generate the key try openssl genrsa -out out/2048-sha1-root.key 2048 # Generate the root certificate CA_COMMON_NAME="Test Dup-Hash Root CA" \ try openssl req \ -new \ -key out/2048-sha1-root.key \ -out out/2048-sha1-root.req \ -config ca.cnf CA_COMMON_NAME="Test Dup-Hash Root CA" \ try openssl x509 \ -req -days 3650 \ -sha1 \ -in out/2048-sha1-root.req \ -out out/2048-sha1-root.pem \ -text \ -signkey out/2048-sha1-root.key \ -extfile ca.cnf \ -extensions ca_cert CA_COMMON_NAME="Test Dup-Hash Root CA" \ try openssl x509 \ -req -days 3650 \ -md5 \ -in out/2048-sha1-root.req \ -out out/2048-md5-root.pem \ -text \ -signkey out/2048-sha1-root.key \ -extfile ca.cnf \ -extensions ca_cert # Generate the leaf certificate request try openssl req \ -new \ -keyout out/ok_cert.key \ -out out/ok_cert.req \ -config ee.cnf # Generate the leaf certificates CA_COMMON_NAME="Test Dup-Hash Root CA" \ try openssl ca \ -batch \ -extensions user_cert \ -days 3650 \ -in out/ok_cert.req \ -out out/ok_cert.pem \ -config ca.cnf try openssl x509 -text \ -in out/2048-md5-root.pem \ -out ../certificates/<API key>.pem try openssl x509 -text \ -in out/2048-sha1-root.pem \ -out ../certificates/<API key>.pem try openssl x509 -text \ -in out/ok_cert.pem \ -out ../certificates/cross-signed-leaf.pem
#include <linux/init.h> #include <linux/kernel.h> #include <linux/io.h> #include <linux/clk.h> #include <linux/clkdev.h> #include <asm/clock.h> #include <asm/hwblk.h> #include <cpu/sh7723.h> /* SH7723 registers */ #define FRQCR 0xa4150000 #define VCLKCR 0xa4150004 #define SCLKACR 0xa4150008 #define SCLKBCR 0xa415000c #define IRDACLKCR 0xa4150018 #define PLLCR 0xa4150024 #define DLLFRQ 0xa4150050 /* Fixed 32 KHz root clock for RTC and Power Management purposes */ static struct clk r_clk = { .rate = 32768, }; /* * Default rate for the root input clock, reset this with clk_set_rate() * from the platform code. */ struct clk extal_clk = { .rate = 33333333, }; /* The dll multiplies the 32khz r_clk, may be used instead of extal */ static unsigned long dll_recalc(struct clk *clk) { unsigned long mult; if (__raw_readl(PLLCR) & 0x1000) mult = __raw_readl(DLLFRQ); else mult = 0; return clk->parent->rate * mult; } static struct clk_ops dll_clk_ops = { .recalc = dll_recalc, }; static struct clk dll_clk = { .ops = &dll_clk_ops, .parent = &r_clk, .flags = CLK_ENABLE_ON_INIT, }; static unsigned long pll_recalc(struct clk *clk) { unsigned long mult = 1; unsigned long div = 1; if (__raw_readl(PLLCR) & 0x4000) mult = (((__raw_readl(FRQCR) >> 24) & 0x1f) + 1); else div = 2; return (clk->parent->rate * mult) / div; } static struct clk_ops pll_clk_ops = { .recalc = pll_recalc, }; static struct clk pll_clk = { .ops = &pll_clk_ops, .flags = CLK_ENABLE_ON_INIT, }; struct clk *main_clks[] = { &r_clk, &extal_clk, &dll_clk, &pll_clk, }; static int multipliers[] = { 1, 2, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1 }; static int divisors[] = { 1, 3, 2, 5, 3, 4, 5, 6, 8, 10, 12, 16, 20 }; static struct clk_div_mult_table div4_div_mult_table = { .divisors = divisors, .nr_divisors = ARRAY_SIZE(divisors), .multipliers = multipliers, .nr_multipliers = ARRAY_SIZE(multipliers), }; static struct clk_div4_table div4_table = { .div_mult_table = &div4_div_mult_table, }; enum { DIV4_I, DIV4_U, DIV4_SH, DIV4_B, DIV4_B3, DIV4_P, DIV4_NR }; #define DIV4(_reg, _bit, _mask, _flags) \ SH_CLK_DIV4(&pll_clk, _reg, _bit, _mask, _flags) struct clk div4_clks[DIV4_NR] = { [DIV4_I] = DIV4(FRQCR, 20, 0x0dbf, CLK_ENABLE_ON_INIT), [DIV4_U] = DIV4(FRQCR, 16, 0x0dbf, CLK_ENABLE_ON_INIT), [DIV4_SH] = DIV4(FRQCR, 12, 0x0dbf, CLK_ENABLE_ON_INIT), [DIV4_B] = DIV4(FRQCR, 8, 0x0dbf, CLK_ENABLE_ON_INIT), [DIV4_B3] = DIV4(FRQCR, 4, 0x0db4, CLK_ENABLE_ON_INIT), [DIV4_P] = DIV4(FRQCR, 0, 0x0dbf, 0), }; enum { DIV4_IRDA, DIV4_ENABLE_NR }; struct clk div4_enable_clks[DIV4_ENABLE_NR] = { [DIV4_IRDA] = DIV4(IRDACLKCR, 0, 0x0dbf, 0), }; enum { DIV4_SIUA, DIV4_SIUB, DIV4_REPARENT_NR }; struct clk div4_reparent_clks[DIV4_REPARENT_NR] = { [DIV4_SIUA] = DIV4(SCLKACR, 0, 0x0dbf, 0), [DIV4_SIUB] = DIV4(SCLKBCR, 0, 0x0dbf, 0), }; enum { DIV6_V, DIV6_NR }; struct clk div6_clks[DIV6_NR] = { [DIV6_V] = SH_CLK_DIV6(&pll_clk, VCLKCR, 0), }; static struct clk mstp_clks[] = { /* See page 60 of Datasheet V1.0: Overview -> Block Diagram */ SH_HWBLK_CLK(HWBLK_TLB, &div4_clks[DIV4_I], CLK_ENABLE_ON_INIT), SH_HWBLK_CLK(HWBLK_IC, &div4_clks[DIV4_I], CLK_ENABLE_ON_INIT), SH_HWBLK_CLK(HWBLK_OC, &div4_clks[DIV4_I], CLK_ENABLE_ON_INIT), SH_HWBLK_CLK(HWBLK_L2C, &div4_clks[DIV4_SH], CLK_ENABLE_ON_INIT), SH_HWBLK_CLK(HWBLK_ILMEM, &div4_clks[DIV4_I], CLK_ENABLE_ON_INIT), SH_HWBLK_CLK(HWBLK_FPU, &div4_clks[DIV4_I], CLK_ENABLE_ON_INIT), SH_HWBLK_CLK(HWBLK_INTC, &div4_clks[DIV4_I], CLK_ENABLE_ON_INIT), SH_HWBLK_CLK(HWBLK_DMAC0, &div4_clks[DIV4_B], 0), SH_HWBLK_CLK(HWBLK_SHYWAY, &div4_clks[DIV4_SH], CLK_ENABLE_ON_INIT), SH_HWBLK_CLK(HWBLK_HUDI, &div4_clks[DIV4_P], 0), SH_HWBLK_CLK(HWBLK_UBC, &div4_clks[DIV4_I], 0), SH_HWBLK_CLK(HWBLK_TMU0, &div4_clks[DIV4_P], 0), SH_HWBLK_CLK(HWBLK_CMT, &r_clk, 0), SH_HWBLK_CLK(HWBLK_RWDT, &r_clk, 0), SH_HWBLK_CLK(HWBLK_DMAC1, &div4_clks[DIV4_B], 0), SH_HWBLK_CLK(HWBLK_TMU1, &div4_clks[DIV4_P], 0), SH_HWBLK_CLK(HWBLK_FLCTL, &div4_clks[DIV4_P], 0), SH_HWBLK_CLK(HWBLK_SCIF0, &div4_clks[DIV4_P], 0), SH_HWBLK_CLK(HWBLK_SCIF1, &div4_clks[DIV4_P], 0), SH_HWBLK_CLK(HWBLK_SCIF2, &div4_clks[DIV4_P], 0), SH_HWBLK_CLK(HWBLK_SCIF3, &div4_clks[DIV4_B], 0), SH_HWBLK_CLK(HWBLK_SCIF4, &div4_clks[DIV4_B], 0), SH_HWBLK_CLK(HWBLK_SCIF5, &div4_clks[DIV4_B], 0), SH_HWBLK_CLK(HWBLK_MSIOF0, &div4_clks[DIV4_B], 0), SH_HWBLK_CLK(HWBLK_MSIOF1, &div4_clks[DIV4_B], 0), SH_HWBLK_CLK(HWBLK_MERAM, &div4_clks[DIV4_SH], 0), SH_HWBLK_CLK(HWBLK_IIC, &div4_clks[DIV4_P], 0), SH_HWBLK_CLK(HWBLK_RTC, &r_clk, 0), SH_HWBLK_CLK(HWBLK_ATAPI, &div4_clks[DIV4_SH], 0), SH_HWBLK_CLK(HWBLK_ADC, &div4_clks[DIV4_P], 0), SH_HWBLK_CLK(HWBLK_TPU, &div4_clks[DIV4_B], 0), SH_HWBLK_CLK(HWBLK_IRDA, &div4_clks[DIV4_P], 0), SH_HWBLK_CLK(HWBLK_TSIF, &div4_clks[DIV4_B], 0), SH_HWBLK_CLK(HWBLK_ICB, &div4_clks[DIV4_B], CLK_ENABLE_ON_INIT), SH_HWBLK_CLK(HWBLK_SDHI0, &div4_clks[DIV4_B], 0), SH_HWBLK_CLK(HWBLK_SDHI1, &div4_clks[DIV4_B], 0), SH_HWBLK_CLK(HWBLK_KEYSC, &r_clk, 0), SH_HWBLK_CLK(HWBLK_USB, &div4_clks[DIV4_B], 0), SH_HWBLK_CLK(HWBLK_2DG, &div4_clks[DIV4_B], 0), SH_HWBLK_CLK(HWBLK_SIU, &div4_clks[DIV4_B], 0), SH_HWBLK_CLK(HWBLK_VEU2H1, &div4_clks[DIV4_B], 0), SH_HWBLK_CLK(HWBLK_VOU, &div4_clks[DIV4_B], 0), SH_HWBLK_CLK(HWBLK_BEU, &div4_clks[DIV4_B], 0), SH_HWBLK_CLK(HWBLK_CEU, &div4_clks[DIV4_B], 0), SH_HWBLK_CLK(HWBLK_VEU2H0, &div4_clks[DIV4_B], 0), SH_HWBLK_CLK(HWBLK_VPU, &div4_clks[DIV4_B], 0), SH_HWBLK_CLK(HWBLK_LCDC, &div4_clks[DIV4_B], 0), }; static struct clk_lookup lookups[] = { /* main clocks */ CLKDEV_CON_ID("rclk", &r_clk), CLKDEV_CON_ID("extal", &extal_clk), CLKDEV_CON_ID("dll_clk", &dll_clk), CLKDEV_CON_ID("pll_clk", &pll_clk), /* DIV4 clocks */ CLKDEV_CON_ID("cpu_clk", &div4_clks[DIV4_I]), CLKDEV_CON_ID("umem_clk", &div4_clks[DIV4_U]), CLKDEV_CON_ID("shyway_clk", &div4_clks[DIV4_SH]), CLKDEV_CON_ID("bus_clk", &div4_clks[DIV4_B]), CLKDEV_CON_ID("b3_clk", &div4_clks[DIV4_B3]), CLKDEV_CON_ID("peripheral_clk", &div4_clks[DIV4_P]), CLKDEV_CON_ID("irda_clk", &div4_enable_clks[DIV4_IRDA]), CLKDEV_CON_ID("siua_clk", &div4_reparent_clks[DIV4_SIUA]), CLKDEV_CON_ID("siub_clk", &div4_reparent_clks[DIV4_SIUB]), /* DIV6 clocks */ CLKDEV_CON_ID("video_clk", &div6_clks[DIV6_V]), /* MSTP clocks */ CLKDEV_CON_ID("tlb0", &mstp_clks[HWBLK_TLB]), CLKDEV_CON_ID("ic0", &mstp_clks[HWBLK_IC]), CLKDEV_CON_ID("oc0", &mstp_clks[HWBLK_OC]), CLKDEV_CON_ID("l2c0", &mstp_clks[HWBLK_L2C]), CLKDEV_CON_ID("ilmem0", &mstp_clks[HWBLK_ILMEM]), CLKDEV_CON_ID("fpu0", &mstp_clks[HWBLK_FPU]), CLKDEV_CON_ID("intc0", &mstp_clks[HWBLK_INTC]), CLKDEV_CON_ID("dmac0", &mstp_clks[HWBLK_DMAC0]), CLKDEV_CON_ID("sh0", &mstp_clks[HWBLK_SHYWAY]), CLKDEV_CON_ID("hudi0", &mstp_clks[HWBLK_HUDI]), CLKDEV_CON_ID("ubc0", &mstp_clks[HWBLK_UBC]), { /* TMU0 */ .dev_id = "sh_tmu.0", .con_id = "tmu_fck", .clk = &mstp_clks[HWBLK_TMU0], }, { /* TMU1 */ .dev_id = "sh_tmu.1", .con_id = "tmu_fck", .clk = &mstp_clks[HWBLK_TMU0], }, { /* TMU2 */ .dev_id = "sh_tmu.2", .con_id = "tmu_fck", .clk = &mstp_clks[HWBLK_TMU0], }, CLKDEV_CON_ID("cmt_fck", &mstp_clks[HWBLK_CMT]), CLKDEV_CON_ID("rwdt0", &mstp_clks[HWBLK_RWDT]), CLKDEV_CON_ID("dmac1", &mstp_clks[HWBLK_DMAC1]), { /* TMU3 */ .dev_id = "sh_tmu.3", .con_id = "tmu_fck", .clk = &mstp_clks[HWBLK_TMU1], }, { /* TMU4 */ .dev_id = "sh_tmu.4", .con_id = "tmu_fck", .clk = &mstp_clks[HWBLK_TMU1], }, { /* TMU5 */ .dev_id = "sh_tmu.5", .con_id = "tmu_fck", .clk = &mstp_clks[HWBLK_TMU1], }, CLKDEV_CON_ID("flctl0", &mstp_clks[HWBLK_FLCTL]), { /* SCIF0 */ .dev_id = "sh-sci.0", .con_id = "sci_fck", .clk = &mstp_clks[HWBLK_SCIF0], }, { /* SCIF1 */ .dev_id = "sh-sci.1", .con_id = "sci_fck", .clk = &mstp_clks[HWBLK_SCIF1], }, { /* SCIF2 */ .dev_id = "sh-sci.2", .con_id = "sci_fck", .clk = &mstp_clks[HWBLK_SCIF2], }, { /* SCIF3 */ .dev_id = "sh-sci.3", .con_id = "sci_fck", .clk = &mstp_clks[HWBLK_SCIF3], }, { /* SCIF4 */ .dev_id = "sh-sci.4", .con_id = "sci_fck", .clk = &mstp_clks[HWBLK_SCIF4], }, { /* SCIF5 */ .dev_id = "sh-sci.5", .con_id = "sci_fck", .clk = &mstp_clks[HWBLK_SCIF5], }, CLKDEV_CON_ID("msiof0", &mstp_clks[HWBLK_MSIOF0]), CLKDEV_CON_ID("msiof1", &mstp_clks[HWBLK_MSIOF1]), CLKDEV_CON_ID("meram0", &mstp_clks[HWBLK_MERAM]), CLKDEV_DEV_ID("i2c-sh_mobile.0", &mstp_clks[HWBLK_IIC]), CLKDEV_CON_ID("rtc0", &mstp_clks[HWBLK_RTC]), CLKDEV_CON_ID("atapi0", &mstp_clks[HWBLK_ATAPI]), CLKDEV_CON_ID("adc0", &mstp_clks[HWBLK_ADC]), CLKDEV_CON_ID("tpu0", &mstp_clks[HWBLK_TPU]), CLKDEV_CON_ID("irda0", &mstp_clks[HWBLK_IRDA]), CLKDEV_CON_ID("tsif0", &mstp_clks[HWBLK_TSIF]), CLKDEV_CON_ID("icb0", &mstp_clks[HWBLK_ICB]), CLKDEV_CON_ID("sdhi0", &mstp_clks[HWBLK_SDHI0]), CLKDEV_CON_ID("sdhi1", &mstp_clks[HWBLK_SDHI1]), CLKDEV_CON_ID("keysc0", &mstp_clks[HWBLK_KEYSC]), CLKDEV_CON_ID("usb0", &mstp_clks[HWBLK_USB]), CLKDEV_CON_ID("2dg0", &mstp_clks[HWBLK_2DG]), CLKDEV_CON_ID("siu0", &mstp_clks[HWBLK_SIU]), CLKDEV_CON_ID("veu1", &mstp_clks[HWBLK_VEU2H1]), CLKDEV_CON_ID("vou0", &mstp_clks[HWBLK_VOU]), CLKDEV_CON_ID("beu0", &mstp_clks[HWBLK_BEU]), CLKDEV_CON_ID("ceu0", &mstp_clks[HWBLK_CEU]), CLKDEV_CON_ID("veu0", &mstp_clks[HWBLK_VEU2H0]), CLKDEV_CON_ID("vpu0", &mstp_clks[HWBLK_VPU]), CLKDEV_CON_ID("lcdc0", &mstp_clks[HWBLK_LCDC]), }; int __init arch_clk_init(void) { int k, ret = 0; /* autodetect extal or dll configuration */ if (__raw_readl(PLLCR) & 0x1000) pll_clk.parent = &dll_clk; else pll_clk.parent = &extal_clk; for (k = 0; !ret && (k < ARRAY_SIZE(main_clks)); k++) ret |= clk_register(main_clks[k]); clkdev_add_table(lookups, ARRAY_SIZE(lookups)); if (!ret) ret = <API key>(div4_clks, DIV4_NR, &div4_table); if (!ret) ret = <API key>(div4_enable_clks, DIV4_ENABLE_NR, &div4_table); if (!ret) ret = <API key>(div4_reparent_clks, DIV4_REPARENT_NR, &div4_table); if (!ret) ret = <API key>(div6_clks, DIV6_NR); if (!ret) ret = <API key>(mstp_clks, HWBLK_NR); return ret; }
var __decorate = (this && this.__decorate) || function (decorators, target, key, desc) { var c = arguments.length, r = c < 3 ? target : desc === null ? desc = Object.<API key>(target, key) : desc, d; if (typeof Reflect === "object" && typeof Reflect.decorate === "function") r = Reflect.decorate(decorators, target, key, desc); else for (var i = decorators.length - 1; i >= 0; i--) if (d = decorators[i]) r = (c < 3 ? d(r) : c > 3 ? d(target, key, r) : d(target, key)) || r; return c > 3 && r && Object.defineProperty(target, key, r), r; }; var __metadata = (this && this.__metadata) || function (k, v) { if (typeof Reflect === "object" && typeof Reflect.metadata === "function") return Reflect.metadata(k, v); }; var context_1 = require("./context/context"); var rowNode_1 = require("./entities/rowNode"); var renderedRow_1 = require("./rendering/renderedRow"); var utils_1 = require('./utils'); var <API key> = (function () { function <API key>() { } <API key>.prototype.<API key> = function (rowNode, <API key>) { var eCheckbox = document.createElement('input'); eCheckbox.type = "checkbox"; eCheckbox.name = "name"; eCheckbox.className = '<API key>'; utils_1.Utils.setCheckboxState(eCheckbox, rowNode.isSelected()); eCheckbox.addEventListener('click', function (event) { return event.stopPropagation(); }); eCheckbox.addEventListener('change', function () { var newValue = eCheckbox.checked; if (newValue) { rowNode.setSelected(newValue); } else { rowNode.setSelected(newValue); } }); var <API key> = function () { return utils_1.Utils.setCheckboxState(eCheckbox, rowNode.isSelected()); }; rowNode.addEventListener(rowNode_1.RowNode.EVENT_ROW_SELECTED, <API key>); <API key>(renderedRow_1.RenderedRow.<API key>, function () { rowNode.removeEventListener(rowNode_1.RowNode.EVENT_ROW_SELECTED, <API key>); }); return eCheckbox; }; <API key> = __decorate([ context_1.Bean('<API key>'), __metadata('design:paramtypes', []) ], <API key>); return <API key>; })(); exports.<API key> = <API key>;
/* Spellbook Class Extension */ if (!Array.prototype.remove) { Array.prototype.remove = function (obj) { var self = this; if (typeof obj !== "object" && !obj instanceof Array) { obj = [obj]; } return self.filter(function(e){ if(obj.indexOf(e)<0) { return e } }); }; } if (!Array.prototype.clear) { Array.prototype.clear = function() { this.splice(0, this.length); }; } if (!Array.prototype.random) { Array.prototype.random = function() { self = this; var index = Math.floor(Math.random() * (this.length)); return self[index]; }; } if (!Array.prototype.shuffle) { Array.prototype.shuffle = function() { var input = this; for (var i = input.length-1; i >=0; i var randomIndex = Math.floor(Math.random()*(i+1)); var itemAtIndex = input[randomIndex]; input[randomIndex] = input[i]; input[i] = itemAtIndex; } return input; } } if (!Array.prototype.first) { Array.prototype.first = function() { return this[0]; } } if (!Array.prototype.last) { Array.prototype.last = function() { return this[this.length - 1]; } } if (!Array.prototype.inArray) { Array.prototype.inArray = function (value) { return !!~this.indexOf(value); }; } if (!Array.prototype.contains) { Array.prototype.contains = function (value) { return !!~this.indexOf(value); }; } if (!Array.prototype.each) { Array.prototype.each = function (interval, callback, response) { var self = this; var i = 0; if (typeof interval !== "function" ) { var inter = setInterval(function () { callback(self[i], i); i++; if (i === self.length) { clearInterval(inter); if (typeof response === "function") response(); } }, interval); } else { for (var i = 0; i < self.length; i++) { interval(self[i], i); if (typeof callback === "function") { if (i === self.length - 1) callback(); } } } } } if (!Array.prototype.eachEnd) { Array.prototype.eachEnd = function (callback, response) { var self = this; var i = 0; var done = function () { if (i < self.length -1) { i++; callback(self[i], i, done); } else { if (typeof response === 'function') { response(); } } } callback(self[i], i, done); } } if (!Object.prototype.extend) { Object.prototype.extend = function(obj) { for (var i in obj) { if (obj.hasOwnProperty(i)) { this[i] = obj[i]; }; }; }; } if (!Object.prototype.remove) { Object.prototype.remove = function(keys) { var self = this; if (typeof obj === "object" && obj instanceof Array) { arr.forEach(function(key){ delete(self[key]); }); } else { delete(self[keys]); }; }; } if (!Object.prototype.getKeys) { Object.prototype.getKeys = function(keys) { var self = this; if (typeof obj === "object" && obj instanceof Array) { var obj = {}; keys.forEach(function(key){ obj[key] = self[key]; }); } else { obj[keys] = self[keys]; } return obj; }; } if (!String.prototype.repeatify) { String.prototype.repeatify = function(num) { var strArray = []; for (var i = 0; i < num; i++) { strArray.push(this.normalize()); } return strArray; }; } if (!Number.prototype.times) { Number.prototype.times = function (callback) { if (this % 1 === 0) for (var i = 0; i < this; i++) { callback() } }; } if (!Number.prototype.isInteger) { Number.prototype.isInteger = function () { this.isInteger = function (num) { return num % 1 === 0; } } } if (!Array.prototype.isArray) { this.isArray = function () { return typeof this === "object" && this instanceof Array; }; } if (!Function.prototype.isFunction) { this.isFunction = function () { return typeof this === 'function'; }; } if (!Object.prototype.isObject) { this.isObject = function () { return typeof this === "object" && (isArray(this) === false ); }; } if (!String.prototype.isString) { this.isString = function () { return typeof this === "string" || this instanceof String; }; } if (!Boolean.prototype.isBoolean) { this.isBoolean = function () { return typeof this === "boolean"; }; } /* Spellbook Utils */ var Spellbook = function () { this.test = function () { return "Testing Spellbook"; }; this.range = function(a, b, step) { var A= []; if(typeof a == 'number'){ A[0]= a; step = step || 1; while(a+step<= b) { A[A.length]= a+= step; } } else { var s = '<API key>'; if(a=== a.toUpperCase()) { b=b.toUpperCase(); s= s.toUpperCase(); } s= s.substring(s.indexOf(a), s.indexOf(b)+ 1); A= s.split(''); } return A; }; this.isFunction = function (fn) { return typeof fn === 'function'; }; this.isArray = function (obj) { return typeof obj === "object" && obj instanceof Array; }; this.isObject = function (obj) { return typeof obj === "object" && (isArray(obj) === false ); }; this.isNumber = function (obj) { return typeof obj === "number" || obj instanceof Number; }; this.isString = function (obj ) { return typeof obj === "string" || obj instanceof String; }; this.isBoolean = function (obj) { return typeof obj === "boolean"; }; this.isInteger = function (obj) { return obj % 1 === 0; } this.random = function (min, max) { if (typeof min === "number" && typeof max === "number") { return Math.floor(Math.random() * (max - min)) + min; } else { return 0; } }; this.clone = function (obj) { if(obj === null || typeof(obj) !== 'object' || 'isActiveClone' in obj) return obj; var temp = obj.constructor(); for(var key in obj) { if(Object.prototype.hasOwnProperty.call(obj, key)) { obj['isActiveClone'] = null; temp[key] = clone(obj[key]); delete obj['isActiveClone']; } } return temp; }; this.assign = function (obj) { return this.clone(obj); }; this.remove = function (array, obj) { if (typeof obj !== "object" && !obj instanceof Array) { obj = [obj]; } return array.filter(function(e){ if(obj.indexOf(e)<0) { return e } }); }; this.clear = function (array) { array.splice(0, array.length); }; this.inArray = function (a, b) { return !!~a.indexOf(b); }; this.contains = function (a, b) { return !!~a.indexOf(b); }; this.times = function (number, callback) { if (typeof number === 'number' && number > 0) { if ( typeof callback === 'function') { for (var i = 0; i < number; i++) { callback(); } } } }; this.each = function (array, interval, callback, response) { var i = 0; if (typeof interval !== "function" ) { var inter = setInterval(function () { callback(array[i], i); i++; if (i === array.length) { clearInterval(inter); if (typeof response === "function") response(); } }, interval); } else { for (var i = 0; i < array.length; i++) { interval(array[i], i); if (typeof callback === "function") { if (i === array.length - 1) callback(); } } } } this.eachEnd = function (array, callback, response) { var i = 0; var done = function () { if (i < array.length -1) { i++; callback(array[i], i, done); } else { if (typeof response === 'function') { response(); } } } callback(array[i], i, done); } this.checkDate = function (value, userFormat) { userFormat = userFormat || 'mm/dd/yyyy'; var delimiter = /[^mdy]/.exec(userFormat)[0]; var theFormat = userFormat.split(delimiter); var theDate = value.split(delimiter); function isDate(date, format) { var m, d, y, i = 0, len = format.length, f; for (i; i < len; i++) { f = format[i]; if (/m/.test(f)) m = date[i]; if (/d/.test(f)) d = date[i]; if (/y/.test(f)) y = date[i]; } return ( m > 0 && m < 13 && y && y.length === 4 && d > 0 && d <= (new Date(y, m, 0)).getDate() ); }; return isDate(theDate, theFormat); }; this.excerpt = function (str, nwords) { var words = str.split(' '); words.splice(nwords, words.length-1); return words.join(' '); } }; if (typeof process === 'object') { module.exports = new Spellbook; } else { Spellbook.prototype.get = function (url, callback) { var xhr = new XMLHttpRequest(); xhr.open('GET', encodeURI(url)); xhr.onload = function() { if (xhr.status === 200) { callback(false, xhr.responseText); } else { callback("Request failed. Returned status of " + status); } }; xhr.send(); } Spellbook.prototype.post = function (url, data, header, callback) { function param(object) { var encodedString = ''; for (var prop in object) { if (object.hasOwnProperty(prop)) { if (encodedString.length > 0) { encodedString += '&'; } encodedString += encodeURI(prop + '=' + object[prop]); } } return encodedString; } if (typeof header === "function") { callback = header; header = "application/json"; var finaldata = JSON.stringify(data); } else { var finaldata = param(data); } xhr = new XMLHttpRequest(); xhr.open('POST', encodeURI(url)); xhr.setRequestHeader('Content-Type', header); xhr.onload = function() { if (xhr.status === 200 && xhr.responseText !== undefined) { callback(null, xhr.responseText); } else if (xhr.status !== 200) { callback('Request failed. Returned status of ' + xhr.status); } }; xhr.send(finaldata); } var sb = new Spellbook(); }
#include "ex14_49.h" int main() { Date date(12, 4, 2015); if (static_cast<bool>(date)) std::cout << date << std::endl; }
# This code is free software; you can redistribute it and/or modify it # published by the Free Software Foundation. # This code is distributed in the hope that it will be useful, but WITHOUT # accompanied this code). # 2 along with this work; if not, write to the Free Software Foundation, # Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. # Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA # questions. # @test # @bug 4990825 # @run shell jstatLineCounts2.sh # @summary Test that output of 'jstat -gcutil 0' has expected line counts . ${TESTSRC-.}/../../jvmstat/testlibrary/utils.sh setup verify_os JSTAT="${TESTJAVA}/bin/jstat" ${JSTAT} -J-XX:+UsePerfData -J-Duser.language=en -gcutil 0 2>&1 | awk -f ${TESTSRC}/lineCounts2.awk
cask '<API key>' do version 'rev15' sha256 '<SHA256-like>' # github.com is the official download host per the vendor homepage url "https://github.com/sttz/<API key>/releases/download/#{version}/<API key>#{version}.zip" appcast 'https://github.com/sttz/<API key>/releases.atom', checkpoint: '<SHA256-like>' name 'MPlayer OSX Extended' homepage 'http: license :gpl app 'MPlayer OSX Extended.app' zap delete: '~/.mplayer' end
#ifndef MPI_IOC_H #define MPI_IOC_H /* IOCInit message */ typedef struct _MSG_IOC_INIT { U8 WhoInit; /* 00h */ U8 Reserved; /* 01h */ U8 ChainOffset; /* 02h */ U8 Function; /* 03h */ U8 Flags; /* 04h */ U8 MaxDevices; /* 05h */ U8 MaxBuses; /* 06h */ U8 MsgFlags; /* 07h */ U32 MsgContext; /* 08h */ U16 ReplyFrameSize; /* 0Ch */ U8 Reserved1[2]; /* 0Eh */ U32 HostMfaHighAddr; /* 10h */ U32 SenseBufferHighAddr; /* 14h */ U32 <API key>; /* 18h */ SGE_SIMPLE_UNION HostPageBufferSGE; /* 1Ch */ U16 MsgVersion; /* 28h */ U16 HeaderVersion; /* 2Ah */ } MSG_IOC_INIT, MPI_POINTER PTR_MSG_IOC_INIT, IOCInit_t, MPI_POINTER pIOCInit_t; /* WhoInit values */ #define MPI_WHOINIT_NO_ONE (0x00) #define <API key> (0x01) #define <API key> (0x02) #define <API key> (0x03) #define <API key> (0x04) #define <API key> (0x05) /* Flags values */ #define <API key> (0x04) #define <API key> (0x02) #define <API key> (0x01) /* MsgVersion */ #define <API key> (0xFF00) #define <API key> (8) #define <API key> (0x00FF) #define <API key> (0) /* HeaderVersion */ #define <API key> (0xFF00) #define <API key> (8) #define <API key> (0x00FF) #define <API key> (0) typedef struct _MSG_IOC_INIT_REPLY { U8 WhoInit; /* 00h */ U8 Reserved; /* 01h */ U8 MsgLength; /* 02h */ U8 Function; /* 03h */ U8 Flags; /* 04h */ U8 MaxDevices; /* 05h */ U8 MaxBuses; /* 06h */ U8 MsgFlags; /* 07h */ U32 MsgContext; /* 08h */ U16 Reserved2; /* 0Ch */ U16 IOCStatus; /* 0Eh */ U32 IOCLogInfo; /* 10h */ } MSG_IOC_INIT_REPLY, MPI_POINTER <API key>, IOCInitReply_t, MPI_POINTER pIOCInitReply_t; /* IOC Facts message */ typedef struct _MSG_IOC_FACTS { U8 Reserved[2]; /* 00h */ U8 ChainOffset; /* 01h */ U8 Function; /* 02h */ U8 Reserved1[3]; /* 03h */ U8 MsgFlags; /* 04h */ U32 MsgContext; /* 08h */ } MSG_IOC_FACTS, MPI_POINTER PTR_IOC_FACTS, IOCFacts_t, MPI_POINTER pIOCFacts_t; typedef struct <API key> { U8 Dev; /* 00h */ U8 Unit; /* 01h */ U8 Minor; /* 02h */ U8 Major; /* 03h */ } <API key>; typedef union _MPI_FW_VERSION { <API key> Struct; U32 Word; } MPI_FW_VERSION; /* IOC Facts Reply */ typedef struct <API key> { U16 MsgVersion; /* 00h */ U8 MsgLength; /* 02h */ U8 Function; /* 03h */ U16 HeaderVersion; /* 04h */ U8 IOCNumber; /* 06h */ U8 MsgFlags; /* 07h */ U32 MsgContext; /* 08h */ U16 IOCExceptions; /* 0Ch */ U16 IOCStatus; /* 0Eh */ U32 IOCLogInfo; /* 10h */ U8 MaxChainDepth; /* 14h */ U8 WhoInit; /* 15h */ U8 BlockSize; /* 16h */ U8 Flags; /* 17h */ U16 ReplyQueueDepth; /* 18h */ U16 RequestFrameSize; /* 1Ah */ U16 <API key>; /* 1Ch */ /* obsolete 16-bit FWVersion */ U16 ProductID; /* 1Eh */ U32 <API key>; /* 20h */ U16 GlobalCredits; /* 24h */ U8 NumberOfPorts; /* 26h */ U8 EventState; /* 27h */ U32 <API key>; /* 28h */ U16 CurReplyFrameSize; /* 2Ch */ U8 MaxDevices; /* 2Eh */ U8 MaxBuses; /* 2Fh */ U32 FWImageSize; /* 30h */ U32 IOCCapabilities; /* 34h */ MPI_FW_VERSION FWVersion; /* 38h */ U16 <API key>; /* 3Ch */ U16 Reserved2; /* 3Eh */ SGE_SIMPLE_UNION HostPageBufferSGE; /* 40h */ U32 <API key>; /* 4Ch */ } MSG_IOC_FACTS_REPLY, MPI_POINTER <API key>, IOCFactsReply_t, MPI_POINTER pIOCFactsReply_t; #define <API key> (0xFF00) #define <API key> (8) #define <API key> (0x00FF) #define <API key> (0) #define <API key> (0xFF00) #define <API key> (8) #define <API key> (0x00FF) #define <API key> (0) #define <API key> (0x0001) #define <API key> (0x0002) #define <API key> (0x0004) #define <API key> (0x0008) #define <API key> (0x0010) #define <API key> (0x01) #define <API key> (0x02) #define <API key> (0x04) #define <API key> (0x00) #define <API key> (0x01) #define <API key> (0x00000001) #define <API key> (0x00000002) #define <API key> (0x00000004) #define <API key> (0x00000008) #define <API key> (0x00000010) #define <API key> (0x00000020) #define <API key> (0x00000040) #define <API key> (0x00000080) #define <API key> (0x00000100) #define <API key> (0x00000200) #define <API key> (0x00000400) #define <API key> (0x00000800) /* Port Facts message and Reply */ typedef struct _MSG_PORT_FACTS { U8 Reserved[2]; /* 00h */ U8 ChainOffset; /* 02h */ U8 Function; /* 03h */ U8 Reserved1[2]; /* 04h */ U8 PortNumber; /* 06h */ U8 MsgFlags; /* 07h */ U32 MsgContext; /* 08h */ } MSG_PORT_FACTS, MPI_POINTER PTR_MSG_PORT_FACTS, PortFacts_t, MPI_POINTER pPortFacts_t; typedef struct <API key> { U16 Reserved; /* 00h */ U8 MsgLength; /* 02h */ U8 Function; /* 03h */ U16 Reserved1; /* 04h */ U8 PortNumber; /* 06h */ U8 MsgFlags; /* 07h */ U32 MsgContext; /* 08h */ U16 Reserved2; /* 0Ch */ U16 IOCStatus; /* 0Eh */ U32 IOCLogInfo; /* 10h */ U8 Reserved3; /* 14h */ U8 PortType; /* 15h */ U16 MaxDevices; /* 16h */ U16 PortSCSIID; /* 18h */ U16 ProtocolFlags; /* 1Ah */ U16 MaxPostedCmdBuffers; /* 1Ch */ U16 MaxPersistentIDs; /* 1Eh */ U16 MaxLanBuckets; /* 20h */ U8 MaxInitiators; /* 22h */ U8 Reserved4; /* 23h */ U32 Reserved5; /* 24h */ } <API key>, MPI_POINTER <API key>, PortFactsReply_t, MPI_POINTER pPortFactsReply_t; /* PortTypes values */ #define <API key> (0x00) #define <API key> (0x01) #define <API key> (0x10) #define <API key> (0x20) #define <API key> (0x30) /* ProtocolFlags values */ #define <API key> (0x01) #define <API key> (0x02) #define <API key> (0x04) #define <API key> (0x08) /* Port Enable Message */ typedef struct _MSG_PORT_ENABLE { U8 Reserved[2]; /* 00h */ U8 ChainOffset; /* 02h */ U8 Function; /* 03h */ U8 Reserved1[2]; /* 04h */ U8 PortNumber; /* 06h */ U8 MsgFlags; /* 07h */ U32 MsgContext; /* 08h */ } MSG_PORT_ENABLE, MPI_POINTER PTR_MSG_PORT_ENABLE, PortEnable_t, MPI_POINTER pPortEnable_t; typedef struct <API key> { U8 Reserved[2]; /* 00h */ U8 MsgLength; /* 02h */ U8 Function; /* 03h */ U8 Reserved1[2]; /* 04h */ U8 PortNumber; /* 05h */ U8 MsgFlags; /* 07h */ U32 MsgContext; /* 08h */ U16 Reserved2; /* 0Ch */ U16 IOCStatus; /* 0Eh */ U32 IOCLogInfo; /* 10h */ } <API key>, MPI_POINTER <API key>, PortEnableReply_t, MPI_POINTER pPortEnableReply_t; /* Event Notification messages */ typedef struct _MSG_EVENT_NOTIFY { U8 Switch; /* 00h */ U8 Reserved; /* 01h */ U8 ChainOffset; /* 02h */ U8 Function; /* 03h */ U8 Reserved1[3]; /* 04h */ U8 MsgFlags; /* 07h */ U32 MsgContext; /* 08h */ } MSG_EVENT_NOTIFY, MPI_POINTER <API key>, EventNotification_t, MPI_POINTER <API key>; /* Event Notification Reply */ typedef struct <API key> { U16 EventDataLength; /* 00h */ U8 MsgLength; /* 02h */ U8 Function; /* 03h */ U8 Reserved1[2]; /* 04h */ U8 AckRequired; /* 06h */ U8 MsgFlags; /* 07h */ U32 MsgContext; /* 08h */ U8 Reserved2[2]; /* 0Ch */ U16 IOCStatus; /* 0Eh */ U32 IOCLogInfo; /* 10h */ U32 Event; /* 14h */ U32 EventContext; /* 18h */ U32 Data[1]; /* 1Ch */ } <API key>, MPI_POINTER <API key>, <API key>, MPI_POINTER <API key>; /* Event Acknowledge */ typedef struct _MSG_EVENT_ACK { U8 Reserved[2]; /* 00h */ U8 ChainOffset; /* 02h */ U8 Function; /* 03h */ U8 Reserved1[3]; /* 04h */ U8 MsgFlags; /* 07h */ U32 MsgContext; /* 08h */ U32 Event; /* 0Ch */ U32 EventContext; /* 10h */ } MSG_EVENT_ACK, MPI_POINTER PTR_MSG_EVENT_ACK, EventAck_t, MPI_POINTER pEventAck_t; typedef struct <API key> { U8 Reserved[2]; /* 00h */ U8 MsgLength; /* 02h */ U8 Function; /* 03h */ U8 Reserved1[3]; /* 04h */ U8 MsgFlags; /* 07h */ U32 MsgContext; /* 08h */ U16 Reserved2; /* 0Ch */ U16 IOCStatus; /* 0Eh */ U32 IOCLogInfo; /* 10h */ } MSG_EVENT_ACK_REPLY, MPI_POINTER <API key>, EventAckReply_t, MPI_POINTER pEventAckReply_t; /* Switch */ #define <API key> (0x00) #define <API key> (0x01) /* Event */ #define MPI_EVENT_NONE (0x00000000) #define MPI_EVENT_LOG_DATA (0x00000001) #define <API key> (0x00000002) #define <API key> (0x00000003) #define <API key> (0x00000004) #define <API key> (0x00000005) #define MPI_EVENT_RESCAN (0x00000006) #define <API key> (0x00000007) #define <API key> (0x00000008) #define MPI_EVENT_LOGOUT (0x00000009) #define <API key> (0x0000000A) #define <API key> (0x0000000B) #define <API key> (0x0000000C) #define <API key> (0x0000000D) #define <API key> (0x0000000E) #define <API key> (0x0000000F) #define MPI_EVENT_SAS_SES (0x00000010) #define <API key> (0x00000011) #define <API key> (0x00000012) #define <API key> (0x00000013) #define <API key> (0x00000014) #define MPI_EVENT_IR2 (0x00000015) #define <API key> (0x00000016) #define <API key> (0x00000017) #define <API key> (0x00000018) #define <API key> (0x00000019) #define <API key> (0x0000001A) #define <API key> (0x0000001B) #define <API key> (0x00000021) /* AckRequired field values */ #define <API key> (0x00) #define <API key> (0x01) /* EventChange Event data */ typedef struct <API key> { U8 EventState; /* 00h */ U8 Reserved; /* 01h */ U16 Reserved1; /* 02h */ } <API key>, MPI_POINTER <API key>, <API key>, MPI_POINTER <API key>; /* LogEntryAdded Event data */ /* this structure matches MPI_LOG_0_ENTRY in mpi_cnfg.h */ #define <API key> (0x1C) typedef struct <API key> { U32 TimeStamp; /* 00h */ U32 Reserved1; /* 04h */ U16 LogSequence; /* 08h */ U16 LogEntryQualifier; /* 0Ah */ U8 LogData[<API key>]; /* 0Ch */ } <API key>, MPI_POINTER <API key>, <API key>, MPI_POINTER <API key>; typedef struct <API key> { U16 LogSequence; /* 00h */ U16 Reserved1; /* 02h */ U32 Reserved2; /* 04h */ <API key> LogEntry; /* 08h */ } <API key>, MPI_POINTER <API key>, <API key>, MPI_POINTER <API key>; /* SCSI Event data for Port, Bus and Device forms */ typedef struct _EVENT_DATA_SCSI { U8 TargetID; /* 00h */ U8 BusPort; /* 01h */ U16 Reserved; /* 02h */ } EVENT_DATA_SCSI, MPI_POINTER PTR_EVENT_DATA_SCSI, EventDataScsi_t, MPI_POINTER pEventDataScsi_t; /* SCSI Device Status Change Event data */ typedef struct <API key> { U8 TargetID; /* 00h */ U8 Bus; /* 01h */ U8 ReasonCode; /* 02h */ U8 LUN; /* 03h */ U8 ASC; /* 04h */ U8 ASCQ; /* 05h */ U16 Reserved; /* 06h */ } <API key>, MPI_POINTER <API key>, <API key>, MPI_POINTER <API key>; /* MPI SCSI Device Status Change Event data ReasonCode values */ #define <API key> (0x03) #define <API key> (0x04) #define <API key> (0x05) /* SAS Device Status Change Event data */ typedef struct <API key> { U8 TargetID; /* 00h */ U8 Bus; /* 01h */ U8 ReasonCode; /* 02h */ U8 Reserved; /* 03h */ U8 ASC; /* 04h */ U8 ASCQ; /* 05h */ U16 DevHandle; /* 06h */ U32 DeviceInfo; /* 08h */ U16 ParentDevHandle; /* 0Ch */ U8 PhyNum; /* 0Eh */ U8 Reserved1; /* 0Fh */ U64 SASAddress; /* 10h */ U8 LUN[8]; /* 18h */ U16 TaskTag; /* 20h */ U16 Reserved2; /* 22h */ } <API key>, MPI_POINTER <API key>, <API key>, MPI_POINTER <API key>; /* MPI SAS Device Status Change Event data ReasonCode values */ #define <API key> (0x03) #define <API key> (0x04) #define <API key> (0x05) #define <API key> (0x06) #define <API key> (0x07) #define <API key> (0x08) #define <API key> (0x09) #define <API key> (0x0A) #define <API key> (0x0B) #define <API key> (0x0C) #define <API key> (0x0D) /* SCSI Event data for Queue Full event */ typedef struct <API key> { U8 TargetID; /* 00h */ U8 Bus; /* 01h */ U16 CurrentDepth; /* 02h */ } <API key>, MPI_POINTER <API key>, <API key>, MPI_POINTER <API key>; /* MPI Integrated RAID Event data */ typedef struct _EVENT_DATA_RAID { U8 VolumeID; /* 00h */ U8 VolumeBus; /* 01h */ U8 ReasonCode; /* 02h */ U8 PhysDiskNum; /* 03h */ U8 ASC; /* 04h */ U8 ASCQ; /* 05h */ U16 Reserved; /* 06h */ U32 SettingsStatus; /* 08h */ } EVENT_DATA_RAID, MPI_POINTER PTR_EVENT_DATA_RAID, MpiEventDataRaid_t, MPI_POINTER pMpiEventDataRaid_t; /* MPI Integrated RAID Event data ReasonCode values */ #define <API key> (0x00) #define <API key> (0x01) #define <API key> (0x02) #define <API key> (0x03) #define <API key> (0x04) #define <API key> (0x05) #define <API key> (0x06) #define <API key> (0x07) #define <API key> (0x08) #define <API key> (0x09) #define <API key> (0x0A) #define <API key> (0x0B) /* MPI Integrated RAID Resync Update Event data */ typedef struct <API key> { U8 VolumeID; /* 00h */ U8 VolumeBus; /* 01h */ U8 ResyncComplete; /* 02h */ U8 Reserved1; /* 03h */ U32 Reserved2; /* 04h */ } <API key>, MPI_POINTER <API key>, <API key>, MPI_POINTER <API key>; /* MPI IR2 Event data */ /* MPI_LD_STATE or MPI_PD_STATE */ typedef struct _IR2_STATE_CHANGED { U16 PreviousState; /* 00h */ U16 NewState; /* 02h */ } IR2_STATE_CHANGED, MPI_POINTER <API key>; typedef struct _IR2_PD_INFO { U16 DeviceHandle; /* 00h */ U8 <API key>; /* 02h */ U8 TruncatedSlot; /* 03h */ } IR2_PD_INFO, MPI_POINTER PTR_IR2_PD_INFO; typedef union <API key> { IR2_STATE_CHANGED StateChanged; U32 Lba; IR2_PD_INFO PdInfo; } <API key>, MPI_POINTER <API key>; typedef struct _MPI_EVENT_DATA_IR2 { U8 TargetID; /* 00h */ U8 Bus; /* 01h */ U8 ReasonCode; /* 02h */ U8 PhysDiskNum; /* 03h */ <API key> IR2EventData; /* 04h */ } MPI_EVENT_DATA_IR2, MPI_POINTER <API key>, MpiEventDataIR2_t, MPI_POINTER pMpiEventDataIR2_t; /* MPI IR2 Event data ReasonCode values */ #define <API key> (0x01) #define <API key> (0x02) #define <API key> (0x03) #define <API key> (0x04) #define <API key> (0x05) #define <API key> (0x06) #define <API key> (0x07) /* defines for logical disk states */ #define <API key> (0x00) #define <API key> (0x01) #define MPI_LD_STATE_FAILED (0x02) #define <API key> (0x03) #define <API key> (0x04) /* defines for physical disk states */ #define MPI_PD_STATE_ONLINE (0x00) #define <API key> (0x01) #define <API key> (0x02) #define MPI_PD_STATE_FAILED (0x03) #define <API key> (0x04) #define <API key> (0x05) #define <API key> (0x06) #define <API key> (0xFF) /* MPI Link Status Change Event data */ typedef struct <API key> { U8 State; /* 00h */ U8 Reserved; /* 01h */ U16 Reserved1; /* 02h */ U8 Reserved2; /* 04h */ U8 Port; /* 05h */ U16 Reserved3; /* 06h */ } <API key>, MPI_POINTER <API key>, <API key>, MPI_POINTER <API key>; #define <API key> (0x00000000) #define <API key> (0x00000001) /* MPI Loop State Change Event data */ typedef struct <API key> { U8 Character4; /* 00h */ U8 Character3; /* 01h */ U8 Type; /* 02h */ U8 Reserved; /* 03h */ U8 Reserved1; /* 04h */ U8 Port; /* 05h */ U16 Reserved2; /* 06h */ } <API key>, MPI_POINTER <API key>, <API key>, MPI_POINTER <API key>; #define <API key> (0x0001) #define <API key> (0x0002) #define <API key> (0x0003) /* MPI LOGOUT Event data */ typedef struct _EVENT_DATA_LOGOUT { U32 NPortID; /* 00h */ U8 AliasIndex; /* 04h */ U8 Port; /* 05h */ U16 Reserved1; /* 06h */ } EVENT_DATA_LOGOUT, MPI_POINTER <API key>, EventDataLogout_t, MPI_POINTER pEventDataLogout_t; #define <API key> (0xFF) /* SAS SES Event data */ typedef struct _EVENT_DATA_SAS_SES { U8 PhyNum; /* 00h */ U8 Port; /* 01h */ U8 PortWidth; /* 02h */ U8 Reserved1; /* 04h */ } EVENT_DATA_SAS_SES, MPI_POINTER <API key>, <API key>, MPI_POINTER <API key>; /* SAS Broadcast Primitive Event data */ typedef struct <API key> { U8 PhyNum; /* 00h */ U8 Port; /* 01h */ U8 PortWidth; /* 02h */ U8 Primitive; /* 04h */ } <API key>, MPI_POINTER <API key>, <API key>, MPI_POINTER <API key>; #define <API key> (0x01) #define <API key> (0x03) #define <API key> (0x04) #define <API key> (0x05) #define <API key> (0x06) #define <API key> (0x07) #define <API key> (0x08) /* SAS Phy Link Status Event data */ typedef struct <API key> { U8 PhyNum; /* 00h */ U8 LinkRates; /* 01h */ U16 DevHandle; /* 02h */ U64 SASAddress; /* 04h */ } <API key>, MPI_POINTER <API key>, <API key>, MPI_POINTER <API key>; /* defines for the LinkRates field of the SAS PHY Link Status event */ #define <API key> (0xF0) #define <API key> (4) #define <API key> (0x0F) #define <API key> (0) #define <API key> (0x00) #define <API key> (0x01) #define <API key> (0x02) #define <API key> (0x03) #define <API key> (0x08) #define <API key> (0x09) /* SAS Discovery Event data */ typedef struct <API key> { U32 DiscoveryStatus; /* 00h */ U32 Reserved1; /* 04h */ } <API key>, MPI_POINTER <API key>, <API key>, MPI_POINTER <API key>; #define <API key> (0x00000000) #define <API key> (0x00000001) #define <API key> (0xFFFF0000) #define <API key> (16) /* SAS Discovery Errror Event data */ typedef struct <API key> { U32 DiscoveryStatus; /* 00h */ U8 Port; /* 04h */ U8 Reserved1; /* 05h */ U16 Reserved2; /* 06h */ } <API key>, MPI_POINTER <API key>, <API key>, MPI_POINTER <API key>; #define <API key> (0x00000001) #define <API key> (0x00000002) #define <API key> (0x00000004) #define <API key> (0x00000008) #define <API key> (0x00000010) #define <API key> (0x00000020) #define <API key> (0x00000040) #define <API key> (0x00000080) #define <API key> (0x00000100) #define <API key> (0x00000200) #define <API key> (0x00000400) #define <API key> (0x00000800) #define <API key> (0x00001000) /* SAS SMP Error Event data */ typedef struct <API key> { U8 Status; /* 00h */ U8 Port; /* 01h */ U8 SMPFunctionResult; /* 02h */ U8 Reserved1; /* 03h */ U64 SASAddress; /* 04h */ } <API key>, MPI_POINTER <API key>, <API key>, MPI_POINTER <API key>; /* defines for the Status field of the SAS SMP Error event */ #define <API key> (0x00) #define <API key> (0x01) #define <API key> (0x02) #define <API key> (0x03) #define <API key> (0x04) /* SAS Initiator Device Status Change Event data */ typedef struct <API key> { U8 ReasonCode; /* 00h */ U8 Port; /* 01h */ U16 DevHandle; /* 02h */ U64 SASAddress; /* 04h */ } <API key>, MPI_POINTER <API key>, <API key>, MPI_POINTER <API key>; /* defines for the ReasonCode field of the SAS Initiator Device Status Change event */ #define <API key> (0x01) /* SAS Initiator Device Table Overflow Event data */ typedef struct <API key> { U8 MaxInit; /* 00h */ U8 CurrentInit; /* 01h */ U16 Reserved1; /* 02h */ } <API key>, MPI_POINTER <API key>, <API key>, MPI_POINTER <API key>; /* SAS Expander Status Change Event data */ typedef struct <API key> { U8 ReasonCode; /* 00h */ U8 Reserved1; /* 01h */ U16 Reserved2; /* 02h */ U8 PhysicalPort; /* 04h */ U8 Reserved3; /* 05h */ U16 EnclosureHandle; /* 06h */ U64 SASAddress; /* 08h */ U32 DiscoveryStatus; /* 10h */ U16 DevHandle; /* 14h */ U16 ParentDevHandle; /* 16h */ U16 ExpanderChangeCount; /* 18h */ U16 <API key>; /* 1Ah */ U8 NumPhys; /* 1Ch */ U8 SASLevel; /* 1Dh */ U8 Flags; /* 1Eh */ U8 Reserved4; /* 1Fh */ } <API key>, MPI_POINTER <API key>, <API key>, MPI_POINTER <API key>; /* values for ReasonCode field of SAS Expander Status Change Event data */ #define <API key> (0x00) #define <API key> (0x01) /* values for DiscoveryStatus field of SAS Expander Status Change Event data */ #define <API key> (0x00000001) #define <API key> (0x00000002) #define <API key> (0x00000004) #define <API key> (0x00000008) #define <API key> (0x00000010) #define <API key> (0x00000020) #define <API key> (0x00000040) #define <API key> (0x00000080) #define <API key> (0x00000100) #define <API key> (0x00000200) #define <API key> (0x00000400) #define <API key> (0x00000800) /* values for Flags field of SAS Expander Status Change Event data */ #define <API key> (0x02) #define <API key> (0x01) /* Firmware Download message and associated structures */ typedef struct _MSG_FW_DOWNLOAD { U8 ImageType; /* 00h */ U8 Reserved; /* 01h */ U8 ChainOffset; /* 02h */ U8 Function; /* 03h */ U8 Reserved1[3]; /* 04h */ U8 MsgFlags; /* 07h */ U32 MsgContext; /* 08h */ SGE_MPI_UNION SGL; /* 0Ch */ } MSG_FW_DOWNLOAD, MPI_POINTER PTR_MSG_FW_DOWNLOAD, FWDownload_t, MPI_POINTER pFWDownload_t; #define <API key> (0x01) #define <API key> (0x00) #define <API key> (0x01) #define <API key> (0x02) #define <API key> (0x03) #define <API key> (0x04) #define <API key> (0x06) #define <API key> (0x07) #define <API key> (0x08) #define <API key> (0x09) typedef struct _FWDownloadTCSGE { U8 Reserved; /* 00h */ U8 ContextSize; /* 01h */ U8 DetailsLength; /* 02h */ U8 Flags; /* 03h */ U32 <API key>; /* 04h */ /* obsolete Checksum */ U32 ImageOffset; /* 08h */ U32 ImageSize; /* 0Ch */ } FW_DOWNLOAD_TCSGE, MPI_POINTER <API key>, FWDownloadTCSGE_t, MPI_POINTER pFWDownloadTCSGE_t; /* Firmware Download reply */ typedef struct <API key> { U8 ImageType; /* 00h */ U8 Reserved; /* 01h */ U8 MsgLength; /* 02h */ U8 Function; /* 03h */ U8 Reserved1[3]; /* 04h */ U8 MsgFlags; /* 07h */ U32 MsgContext; /* 08h */ U16 Reserved2; /* 0Ch */ U16 IOCStatus; /* 0Eh */ U32 IOCLogInfo; /* 10h */ } <API key>, MPI_POINTER <API key>, FWDownloadReply_t, MPI_POINTER pFWDownloadReply_t; /* Firmware Upload message and associated structures */ typedef struct _MSG_FW_UPLOAD { U8 ImageType; /* 00h */ U8 Reserved; /* 01h */ U8 ChainOffset; /* 02h */ U8 Function; /* 03h */ U8 Reserved1[3]; /* 04h */ U8 MsgFlags; /* 07h */ U32 MsgContext; /* 08h */ SGE_MPI_UNION SGL; /* 0Ch */ } MSG_FW_UPLOAD, MPI_POINTER PTR_MSG_FW_UPLOAD, FWUpload_t, MPI_POINTER pFWUpload_t; #define <API key> (0x00) #define <API key> (0x01) #define <API key> (0x02) #define <API key> (0x03) #define <API key> (0x04) #define <API key> (0x05) #define <API key> (0x06) #define <API key> (0x07) #define <API key> (0x08) #define <API key> (0x09) #define <API key> (0x0A) typedef struct _FWUploadTCSGE { U8 Reserved; /* 00h */ U8 ContextSize; /* 01h */ U8 DetailsLength; /* 02h */ U8 Flags; /* 03h */ U32 Reserved1; /* 04h */ U32 ImageOffset; /* 08h */ U32 ImageSize; /* 0Ch */ } FW_UPLOAD_TCSGE, MPI_POINTER PTR_FW_UPLOAD_TCSGE, FWUploadTCSGE_t, MPI_POINTER pFWUploadTCSGE_t; /* Firmware Upload reply */ typedef struct <API key> { U8 ImageType; /* 00h */ U8 Reserved; /* 01h */ U8 MsgLength; /* 02h */ U8 Function; /* 03h */ U8 Reserved1[3]; /* 04h */ U8 MsgFlags; /* 07h */ U32 MsgContext; /* 08h */ U16 Reserved2; /* 0Ch */ U16 IOCStatus; /* 0Eh */ U32 IOCLogInfo; /* 10h */ U32 ActualImageSize; /* 14h */ } MSG_FW_UPLOAD_REPLY, MPI_POINTER <API key>, FWUploadReply_t, MPI_POINTER pFWUploadReply_t; typedef struct _MPI_FW_HEADER { U32 <API key>; /* 00h */ U32 Signature0; /* 04h */ U32 Signature1; /* 08h */ U32 Signature2; /* 0Ch */ U32 <API key>; /* 10h */ U32 <API key>; /* 14h */ U32 Reserved; /* 18h */ U32 Checksum; /* 1Ch */ U16 VendorId; /* 20h */ U16 ProductId; /* 22h */ MPI_FW_VERSION FWVersion; /* 24h */ U32 SeqCodeVersion; /* 28h */ U32 ImageSize; /* 2Ch */ U32 <API key>; /* 30h */ U32 LoadStartAddress; /* 34h */ U32 IopResetVectorValue; /* 38h */ U32 IopResetRegAddr; /* 3Ch */ U32 VersionNameWhat; /* 40h */ U8 VersionName[32]; /* 44h */ U32 VendorNameWhat; /* 64h */ U8 VendorName[32]; /* 68h */ } MPI_FW_HEADER, MPI_POINTER PTR_MPI_FW_HEADER, MpiFwHeader_t, MPI_POINTER pMpiFwHeader_t; #define <API key> (0x29232840) /* defines for using the ProductId field */ #define <API key> (0xF000) #define <API key> (0x0000) #define <API key> (0x1000) #define <API key> (0x2000) #define <API key> (0x5AEAA55A) #define <API key> (0xA55AEAA5) #define <API key> (0x5AA55AEA) #define <API key> (0x0F00) #define <API key> (0x0100) #define <API key> (0x0200) #define <API key> (0x0300) #define <API key> (0x0400) #define <API key> (0x0500) #define <API key> (0x0600) #define <API key> (0x0700) #define <API key> (0x00FF) /* SCSI */ #define <API key> (0x0001) #define <API key> (0x0002) #define <API key> (0x0003) #define <API key> (0x0004) #define <API key> (0x0005) #define <API key> (0x0006) #define <API key> (0x0007) #define <API key> (0x0008) #define <API key> (0x0009) #define <API key> (0x000A) #define <API key> (0x000B) #define <API key> (0x000C) /* Fibre Channel */ #define <API key> (0x0000) #define <API key> (0x0001) /* 919 and 929 */ #define <API key> (0x0002) /* 919X and 929X */ #define <API key> (0x0003) /* 919XL and 929XL */ #define <API key> (0x0004) /* 939X and 949X */ #define <API key> (0x0005) #define <API key> (0x0006) /* SAS */ #define <API key> (0x0001) #define <API key> (0x0002) #define <API key> (0x0003) #define <API key> (0x0004) /* 1068E, 1066E, and 1064E */ typedef struct <API key> { U8 ImageType; /* 00h */ U8 Reserved; /* 01h */ U16 Reserved1; /* 02h */ U32 Checksum; /* 04h */ U32 ImageSize; /* 08h */ U32 <API key>; /* 0Ch */ U32 LoadStartAddress; /* 10h */ U32 Reserved2; /* 14h */ } <API key>, MPI_POINTER <API key>, MpiExtImageHeader_t, MPI_POINTER <API key>; /* defines for the ImageType field */ #define <API key> (0x00) #define <API key> (0x01) #define <API key> (0x03) #define <API key> (0x04) #define <API key> (0x05) #endif
// BrowseDialog.h #ifndef __BROWSE_DIALOG_H #define __BROWSE_DIALOG_H #include "../../../Common/MyString.h" bool MyBrowseForFolder(HWND owner, LPCWSTR title, LPCWSTR path, UString &resultPath); bool MyBrowseForFile(HWND owner, LPCWSTR title, LPCWSTR path, LPCWSTR filterDescription, LPCWSTR filter, UString &resultPath); /* CorrectFsPath removes undesirable characters in names (dots and spaces at the end of file) But it doesn't change "bad" name in any of the following cases: - path is Super Path (with \\?\ prefix) - path is relative and relBase is Super Path - there is file or dir in filesystem with specified "bad" name */ bool CorrectFsPath(const UString &relBase, const UString &path, UString &result); bool Dlg_CreateFolder(HWND wnd, UString &destName); #endif
#include <linux/kvm_host.h> #include <linux/slab.h> #include <linux/export.h> #include <trace/events/kvm.h> #include <asm/msidef.h> #ifdef CONFIG_IA64 #include <asm/iosapic.h> #endif #include "irq.h" #include "ioapic.h" static int kvm_set_pic_irq(struct <API key> *e, struct kvm *kvm, int irq_source_id, int level, bool line_status) { #ifdef CONFIG_X86 struct kvm_pic *pic = pic_irqchip(kvm); return kvm_pic_set_irq(pic, e->irqchip.pin, irq_source_id, level); #else return -1; #endif } static int kvm_set_ioapic_irq(struct <API key> *e, struct kvm *kvm, int irq_source_id, int level, bool line_status) { struct kvm_ioapic *ioapic = kvm->arch.vioapic; return kvm_ioapic_set_irq(ioapic, e->irqchip.pin, irq_source_id, level, line_status); } inline static bool <API key>(struct kvm_lapic_irq *irq) { #ifdef CONFIG_IA64 return irq->delivery_mode == (<API key> << <API key>); #else return irq->delivery_mode == APIC_DM_LOWEST; #endif } int <API key>(struct kvm *kvm, struct kvm_lapic *src, struct kvm_lapic_irq *irq, unsigned long *dest_map) { int i, r = -1; struct kvm_vcpu *vcpu, *lowest = NULL; if (irq->dest_mode == 0 && irq->dest_id == 0xff && <API key>(irq)) { printk(KERN_INFO "kvm: apic: phys broadcast and lowest prio\n"); irq->delivery_mode = APIC_DM_FIXED; } if (<API key>(kvm, src, irq, &r, dest_map)) return r; kvm_for_each_vcpu(i, vcpu, kvm) { if (!kvm_apic_present(vcpu)) continue; if (!kvm_apic_match_dest(vcpu, src, irq->shorthand, irq->dest_id, irq->dest_mode)) continue; if (!<API key>(irq)) { if (r < 0) r = 0; r += kvm_apic_set_irq(vcpu, irq, dest_map); } else if (kvm_lapic_enabled(vcpu)) { if (!lowest) lowest = vcpu; else if (<API key>(vcpu, lowest) < 0) lowest = vcpu; } } if (lowest) r = kvm_apic_set_irq(lowest, irq, dest_map); return r; } static inline void kvm_set_msi_irq(struct <API key> *e, struct kvm_lapic_irq *irq) { <API key>(e->msi.address_lo, e->msi.data); irq->dest_id = (e->msi.address_lo & <API key>) >> <API key>; irq->vector = (e->msi.data & <API key>) >> <API key>; irq->dest_mode = (1 << <API key>) & e->msi.address_lo; irq->trig_mode = (1 << <API key>) & e->msi.data; irq->delivery_mode = e->msi.data & 0x700; irq->level = 1; irq->shorthand = 0; /* TODO Deal with RH bit of MSI message address */ } int kvm_set_msi(struct <API key> *e, struct kvm *kvm, int irq_source_id, int level, bool line_status) { struct kvm_lapic_irq irq; if (!level) return -1; kvm_set_msi_irq(e, &irq); return <API key>(kvm, NULL, &irq, NULL); } static int <API key>(struct <API key> *e, struct kvm *kvm) { struct kvm_lapic_irq irq; int r; kvm_set_msi_irq(e, &irq); if (<API key>(kvm, NULL, &irq, &r, NULL)) return r; else return -EWOULDBLOCK; } /* * Deliver an IRQ in an atomic context if we can, or return a failure, * user can retry in a process context. * Return value: * -EWOULDBLOCK - Can't deliver in atomic context: retry in a process context. * Other values - No need to retry. */ int <API key>(struct kvm *kvm, int irq_source_id, u32 irq, int level) { struct <API key> *e; int ret = -EINVAL; struct <API key> *irq_rt; int idx; trace_kvm_set_irq(irq, level, irq_source_id); /* * Injection into either PIC or IOAPIC might need to scan all CPUs, * which would need to be retried from thread context; when same GSI * is connected to both PIC and IOAPIC, we'd have to report a * partial failure here. * Since there's no easy way to do this, we only support injecting MSI * which is limited to 1:1 GSI mapping. */ idx = srcu_read_lock(&kvm->irq_srcu); irq_rt = srcu_dereference(kvm->irq_routing, &kvm->irq_srcu); if (irq < irq_rt->nr_rt_entries) <API key>(e, &irq_rt->map[irq], link) { if (likely(e->type == KVM_IRQ_ROUTING_MSI)) ret = <API key>(e, kvm); else ret = -EWOULDBLOCK; break; } srcu_read_unlock(&kvm->irq_srcu, idx); return ret; } int <API key>(struct kvm *kvm) { unsigned long *bitmap = &kvm->arch.irq_sources_bitmap; int irq_source_id; mutex_lock(&kvm->irq_lock); irq_source_id = find_first_zero_bit(bitmap, BITS_PER_LONG); if (irq_source_id >= BITS_PER_LONG) { printk(KERN_WARNING "kvm: exhaust allocatable IRQ sources!\n"); irq_source_id = -EFAULT; goto unlock; } ASSERT(irq_source_id != <API key>); #ifdef CONFIG_X86 ASSERT(irq_source_id != <API key>); #endif set_bit(irq_source_id, bitmap); unlock: mutex_unlock(&kvm->irq_lock); return irq_source_id; } void <API key>(struct kvm *kvm, int irq_source_id) { ASSERT(irq_source_id != <API key>); #ifdef CONFIG_X86 ASSERT(irq_source_id != <API key>); #endif mutex_lock(&kvm->irq_lock); if (irq_source_id < 0 || irq_source_id >= BITS_PER_LONG) { printk(KERN_ERR "kvm: IRQ source ID out of range!\n"); goto unlock; } clear_bit(irq_source_id, &kvm->arch.irq_sources_bitmap); if (!irqchip_in_kernel(kvm)) goto unlock; <API key>(kvm->arch.vioapic, irq_source_id); #ifdef CONFIG_X86 kvm_pic_clear_all(pic_irqchip(kvm), irq_source_id); #endif unlock: mutex_unlock(&kvm->irq_lock); } void <API key>(struct kvm *kvm, int irq, struct <API key> *kimn) { mutex_lock(&kvm->irq_lock); kimn->irq = irq; hlist_add_head_rcu(&kimn->link, &kvm->mask_notifier_list); mutex_unlock(&kvm->irq_lock); } void <API key>(struct kvm *kvm, int irq, struct <API key> *kimn) { mutex_lock(&kvm->irq_lock); hlist_del_rcu(&kimn->link); mutex_unlock(&kvm->irq_lock); synchronize_srcu(&kvm->irq_srcu); } void <API key>(struct kvm *kvm, unsigned irqchip, unsigned pin, bool mask) { struct <API key> *kimn; int idx, gsi; idx = srcu_read_lock(&kvm->irq_srcu); gsi = srcu_dereference(kvm->irq_routing, &kvm->irq_srcu)->chip[irqchip][pin]; if (gsi != -1) <API key>(kimn, &kvm->mask_notifier_list, link) if (kimn->irq == gsi) kimn->func(kimn, mask); srcu_read_unlock(&kvm->irq_srcu, idx); } int <API key>(struct <API key> *rt, struct <API key> *e, const struct <API key> *ue) { int r = -EINVAL; int delta; unsigned max_pin; switch (ue->type) { case <API key>: delta = 0; switch (ue->u.irqchip.irqchip) { case <API key>: e->set = kvm_set_pic_irq; max_pin = PIC_NUM_PINS; break; case <API key>: e->set = kvm_set_pic_irq; max_pin = PIC_NUM_PINS; delta = 8; break; case KVM_IRQCHIP_IOAPIC: max_pin = KVM_IOAPIC_NUM_PINS; e->set = kvm_set_ioapic_irq; break; default: goto out; } e->irqchip.irqchip = ue->u.irqchip.irqchip; e->irqchip.pin = ue->u.irqchip.pin + delta; if (e->irqchip.pin >= max_pin) goto out; rt->chip[ue->u.irqchip.irqchip][e->irqchip.pin] = ue->gsi; break; case KVM_IRQ_ROUTING_MSI: e->set = kvm_set_msi; e->msi.address_lo = ue->u.msi.address_lo; e->msi.address_hi = ue->u.msi.address_hi; e->msi.data = ue->u.msi.data; break; default: goto out; } r = 0; out: return r; } #define <API key>(irq) \ { .gsi = irq, .type = <API key>, \ .u.irqchip.irqchip = KVM_IRQCHIP_IOAPIC, .u.irqchip.pin = (irq) } #define ROUTING_ENTRY1(irq) <API key>(irq) #ifdef CONFIG_X86 # define PIC_ROUTING_ENTRY(irq) \ { .gsi = irq, .type = <API key>, \ .u.irqchip.irqchip = SELECT_PIC(irq), .u.irqchip.pin = (irq) % 8 } # define ROUTING_ENTRY2(irq) \ <API key>(irq), PIC_ROUTING_ENTRY(irq) #else # define ROUTING_ENTRY2(irq) \ <API key>(irq) #endif static const struct <API key> default_routing[] = { ROUTING_ENTRY2(0), ROUTING_ENTRY2(1), ROUTING_ENTRY2(2), ROUTING_ENTRY2(3), ROUTING_ENTRY2(4), ROUTING_ENTRY2(5), ROUTING_ENTRY2(6), ROUTING_ENTRY2(7), ROUTING_ENTRY2(8), ROUTING_ENTRY2(9), ROUTING_ENTRY2(10), ROUTING_ENTRY2(11), ROUTING_ENTRY2(12), ROUTING_ENTRY2(13), ROUTING_ENTRY2(14), ROUTING_ENTRY2(15), ROUTING_ENTRY1(16), ROUTING_ENTRY1(17), ROUTING_ENTRY1(18), ROUTING_ENTRY1(19), ROUTING_ENTRY1(20), ROUTING_ENTRY1(21), ROUTING_ENTRY1(22), ROUTING_ENTRY1(23), #ifdef CONFIG_IA64 ROUTING_ENTRY1(24), ROUTING_ENTRY1(25), ROUTING_ENTRY1(26), ROUTING_ENTRY1(27), ROUTING_ENTRY1(28), ROUTING_ENTRY1(29), ROUTING_ENTRY1(30), ROUTING_ENTRY1(31), ROUTING_ENTRY1(32), ROUTING_ENTRY1(33), ROUTING_ENTRY1(34), ROUTING_ENTRY1(35), ROUTING_ENTRY1(36), ROUTING_ENTRY1(37), ROUTING_ENTRY1(38), ROUTING_ENTRY1(39), ROUTING_ENTRY1(40), ROUTING_ENTRY1(41), ROUTING_ENTRY1(42), ROUTING_ENTRY1(43), ROUTING_ENTRY1(44), ROUTING_ENTRY1(45), ROUTING_ENTRY1(46), ROUTING_ENTRY1(47), #endif }; int <API key>(struct kvm *kvm) { return kvm_set_irq_routing(kvm, default_routing, ARRAY_SIZE(default_routing), 0); }
var SECTION = "expression-014.js"; var VERSION = "ECMA_1"; var TITLE = "The new operator"; var BUGNUMBER= "327765"; startTest(); writeHeaderToLog( SECTION + " "+ TITLE); var tc = 0; var testcases = new Array(); var BOOLEAN = new Boolean(); var result = "Failed"; var exception = "No exception thrown"; var expect = "Passed"; try { result = new BOOLEAN(); } catch ( e ) { result = expect; exception = e.toString(); } testcases[tc++] = new TestCase( SECTION, "BOOLEAN = new Boolean(); result = new BOOLEAN()" + " (threw " + exception +")", expect, result ); test();
#include <plat/dma-core.h> extern struct bus_type dma_subsys; extern struct s3c2410_dma_chan s3c2410_chans[S3C_DMA_CHANNELS]; #define DMA_CH_VALID (1<<31) #define DMA_CH_NEVER (1<<30) /* struct s3c24xx_dma_map * * this holds the mapping information for the channel selected * to be connected to the specified device */ struct s3c24xx_dma_map { const char *name; unsigned long channels[S3C_DMA_CHANNELS]; }; struct <API key> { struct s3c24xx_dma_map *map; unsigned long map_size; unsigned long dcon_mask; void (*select)(struct s3c2410_dma_chan *chan, struct s3c24xx_dma_map *map); }; extern int <API key>(struct <API key> *sel); /* struct <API key> * * channel map for one of the `enum dma_ch` dma channels. the list * entry contains a set of low-level channel numbers, orred with * DMA_CH_VALID, which are checked in the order in the array. */ struct <API key> { unsigned int list[S3C_DMA_CHANNELS]; /* list of channels */ unsigned int flags; /* flags */ }; /* struct s3c24xx_dma_order * * information provided by either the core or the board to give the * dma system a hint on how to allocate channels */ struct s3c24xx_dma_order { struct <API key> channels[DMACH_MAX]; }; extern int <API key>(struct s3c24xx_dma_order *map); /* DMA init code, called from the cpu support code */ extern int s3c2410_dma_init(void); extern int s3c24xx_dma_init(unsigned int channels, unsigned int irq, unsigned int stride);
#ifndef <API key> #define <API key> #include "base/CCEventListener.h" /** * @addtogroup base * @{ */ NS_CC_BEGIN class EventCustom; /** @class EventListenerCustom * @brief Custom event listener. * @code Usage: * auto dispatcher = Director::getInstance()->getEventDispatcher(); * Adds a listener: * * auto callback = [](EventCustom* event){ do_some_thing(); }; * auto listener = EventListenerCustom::create(callback); * dispatcher-><API key>(listener, one_node); * * Dispatchs a custom event: * * EventCustom event("your_event_type"); * dispatcher->dispatchEvent(&event); * * Removes a listener * * dispatcher->removeEventListener(listener); * \endcode * @js cc.<API key> */ class CC_DLL EventListenerCustom : public EventListener { public: /** Creates an event listener with type and callback. * @param eventName The type of the event. * @param callback The callback function when the specified event was emitted. * @return An autoreleased EventListenerCustom object. */ static EventListenerCustom* create(const std::string& eventName, const std::function<void(EventCustom*)>& callback); Overrides virtual bool checkAvailable() override; virtual EventListenerCustom* clone() override; <API key>: /** Constructor */ EventListenerCustom(); /** Initializes event with type and callback function */ bool init(const ListenerID& listenerId, const std::function<void(EventCustom*)>& callback); protected: std::function<void(EventCustom*)> _onCustomEvent; friend class <API key>; }; NS_CC_END // end of base group @} #endif /* defined(<API key>) */
"use strict"; var __extends = (this && this.__extends) || function (d, b) { for (var p in b) if (b.hasOwnProperty(p)) d[p] = b[p]; function __() { this.constructor = d; } d.prototype = b === null ? Object.create(b) : (__.prototype = b.prototype, new __()); }; var AsyncAction_1 = require('./AsyncAction'); var AsyncScheduler_1 = require('./AsyncScheduler'); var <API key> = (function (_super) { __extends(<API key>, _super); function <API key>(SchedulerAction, maxFrames) { var _this = this; if (SchedulerAction === void 0) { SchedulerAction = VirtualAction; } if (maxFrames === void 0) { maxFrames = Number.POSITIVE_INFINITY; } _super.call(this, SchedulerAction, function () { return _this.frame; }); this.maxFrames = maxFrames; this.frame = 0; this.index = -1; } /** * Prompt the Scheduler to execute all of its queued actions, therefore * clearing its queue. * @return {void} */ <API key>.prototype.flush = function () { var _a = this, actions = _a.actions, maxFrames = _a.maxFrames; var error, action; while ((action = actions.shift()) && (this.frame = action.delay) <= maxFrames) { if (error = action.execute(action.state, action.delay)) { break; } } if (error) { while (action = actions.shift()) { action.unsubscribe(); } throw error; } }; <API key>.frameTimeFactor = 10; return <API key>; }(AsyncScheduler_1.AsyncScheduler)); exports.<API key> = <API key>; /** * We need this JSDoc comment for affecting ESDoc. * @ignore * @extends {Ignored} */ var VirtualAction = (function (_super) { __extends(VirtualAction, _super); function VirtualAction(scheduler, work, index) { if (index === void 0) { index = scheduler.index += 1; } _super.call(this, scheduler, work); this.scheduler = scheduler; this.work = work; this.index = index; this.index = scheduler.index = index; } VirtualAction.prototype.schedule = function (state, delay) { if (delay === void 0) { delay = 0; } if (!this.id) { return _super.prototype.schedule.call(this, state, delay); } // If an action is rescheduled, we save allocations by mutating its state, // pushing it to the end of the scheduler queue, and recycling the action. // But since the <API key> is used for testing, VirtualActions // must be immutable so they can be inspected later. var action = new VirtualAction(this.scheduler, this.work); this.add(action); return action.schedule(state, delay); }; VirtualAction.prototype.requestAsyncId = function (scheduler, id, delay) { if (delay === void 0) { delay = 0; } this.delay = scheduler.frame + delay; var actions = scheduler.actions; actions.push(this); actions.sort(VirtualAction.sortActions); return true; }; VirtualAction.prototype.recycleAsyncId = function (scheduler, id, delay) { if (delay === void 0) { delay = 0; } return undefined; }; VirtualAction.sortActions = function (a, b) { if (a.delay === b.delay) { if (a.index === b.index) { return 0; } else if (a.index > b.index) { return 1; } else { return -1; } } else if (a.delay > b.delay) { return 1; } else { return -1; } }; return VirtualAction; }(AsyncAction_1.AsyncAction)); exports.VirtualAction = VirtualAction; //# sourceMappingURL=<API key>.js.map
#!/bin/sh test_description='git branch assorted tests' . ./test-lib.sh test_expect_success 'prepare a trivial repository' ' echo Hello >A && git update-index --add A && git commit -m "Initial commit." && echo World >>A && git update-index --add A && git commit -m "Second commit." && HEAD=$(git rev-parse --verify HEAD) ' test_expect_success 'git branch --help should not have created a bogus branch' ' test_might_fail git branch --man --help </dev/null >/dev/null 2>&1 && <API key> .git/refs/heads/--help ' test_expect_success 'branch -h in broken repository' ' mkdir broken && ( cd broken && git init && >.git/refs/heads/master && test_expect_code 129 git branch -h >usage 2>&1 ) && test_i18ngrep "[Uu]sage" broken/usage ' test_expect_success 'git branch abc should create a branch' ' git branch abc && test_path_is_file .git/refs/heads/abc ' test_expect_success 'git branch a/b/c should create a branch' ' git branch a/b/c && test_path_is_file .git/refs/heads/a/b/c ' test_expect_success 'git branch HEAD should fail' ' test_must_fail git branch HEAD ' cat >expect <<EOF $_z40 $HEAD $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> 1117150200 +0000 branch: Created from master EOF test_expect_success 'git branch -l d/e/f should create a branch and a log' ' GIT_COMMITTER_DATE="2005-05-26 23:30" \ git branch -l d/e/f && test_path_is_file .git/refs/heads/d/e/f && test_path_is_file .git/logs/refs/heads/d/e/f && test_cmp expect .git/logs/refs/heads/d/e/f ' test_expect_success 'git branch -d d/e/f should delete a branch and a log' ' git branch -d d/e/f && <API key> .git/refs/heads/d/e/f && <API key> .git/logs/refs/heads/d/e/f ' test_expect_success 'git branch j/k should work after branch j has been deleted' ' git branch j && git branch -d j && git branch j/k ' test_expect_success 'git branch l should work after branch l/m has been deleted' ' git branch l/m && git branch -d l/m && git branch l ' test_expect_success 'git branch -m dumps usage' ' test_expect_code 128 git branch -m 2>err && test_i18ngrep "branch name required" err ' test_expect_success 'git branch -m m m/m should work' ' git branch -l m && git branch -m m m/m && test_path_is_file .git/logs/refs/heads/m/m ' test_expect_success 'git branch -m n/n n should work' ' git branch -l n/n && git branch -m n/n n && test_path_is_file .git/logs/refs/heads/n ' test_expect_success 'git branch -m o/o o should fail when o/p exists' ' git branch o/o && git branch o/p && test_must_fail git branch -m o/o o ' test_expect_success 'git branch -m q r/q should fail when r exists' ' git branch q && git branch r && test_must_fail git branch -m q r/q ' test_expect_success 'git branch -M foo bar should fail when bar is checked out' ' git branch bar && git checkout -b foo && test_must_fail git branch -M bar foo ' test_expect_success 'git branch -M baz bam should succeed when baz is checked out' ' git checkout -b baz && git branch bam && git branch -M baz bam ' test_expect_success 'git branch -M master should work when master is checked out' ' git checkout master && git branch -M master ' test_expect_success 'git branch -M master master should work when master is checked out' ' git checkout master && git branch -M master master ' test_expect_success 'git branch -M master2 master2 should work when master is checked out' ' git checkout master && git branch master2 && git branch -M master2 master2 ' test_expect_success 'git branch -v -d t should work' ' git branch t && test_path_is_file .git/refs/heads/t && git branch -v -d t && <API key> .git/refs/heads/t ' test_expect_success 'git branch -v -m t s should work' ' git branch t && test_path_is_file .git/refs/heads/t && git branch -v -m t s && <API key> .git/refs/heads/t && test_path_is_file .git/refs/heads/s && git branch -d s ' test_expect_success 'git branch -m -d t s should fail' ' git branch t && test_path_is_file .git/refs/heads/t && test_must_fail git branch -m -d t s && git branch -d t && <API key> .git/refs/heads/t ' test_expect_success 'git branch --list -d t should fail' ' git branch t && test_path_is_file .git/refs/heads/t && test_must_fail git branch --list -d t && git branch -d t && <API key> .git/refs/heads/t ' test_expect_success 'git branch --column' ' COLUMNS=81 git branch --column=column >actual && cat >expected <<\EOF && a/b/c bam foo l * master n o/p r abc bar j/k m/m master2 o/o q EOF test_cmp expected actual ' test_expect_success 'git branch --column with an extremely long branch name' ' long=this/is/a/part/of/long/branch/name && long=z$long/$long/$long/$long && test_when_finished "git branch -d $long" && git branch $long && COLUMNS=80 git branch --column=column >actual && cat >expected <<EOF && a/b/c abc bam bar foo j/k l m/m * master master2 n o/o o/p q r $long EOF test_cmp expected actual ' test_expect_success 'git branch with column.*' ' git config column.ui column && git config column.branch "dense" && COLUMNS=80 git branch >actual && git config --unset column.branch && git config --unset column.ui && cat >expected <<\EOF && a/b/c bam foo l * master n o/p r abc bar j/k m/m master2 o/o q EOF test_cmp expected actual ' test_expect_success 'git branch --column -v should fail' ' test_must_fail git branch --column -v ' test_expect_success 'git branch -v with column.ui ignored' ' git config column.ui column && COLUMNS=80 git branch -v | cut -c -10 | sed "s/ *$//" >actual && git config --unset column.ui && cat >expected <<\EOF && a/b/c abc bam bar foo j/k l m/m * master master2 n o/o o/p q r EOF test_cmp expected actual ' mv .git/config .git/config-saved test_expect_success 'git branch -m q q2 without config should succeed' ' git branch -m q q2 && git branch -m q2 q ' mv .git/config-saved .git/config git config branch.s/s.dummy Hello test_expect_success 'git branch -m s/s s should work when s/t is deleted' ' git branch -l s/s && test_path_is_file .git/logs/refs/heads/s/s && git branch -l s/t && test_path_is_file .git/logs/refs/heads/s/t && git branch -d s/t && git branch -m s/s s && test_path_is_file .git/logs/refs/heads/s ' test_expect_success 'config information was renamed, too' ' test $(git config branch.s.dummy) = Hello && test_must_fail git config branch.s/s/dummy ' test_expect_success 'deleting a symref' ' git branch target && git symbolic-ref refs/heads/symref refs/heads/target && echo "Deleted branch symref (was refs/heads/target)." >expect && git branch -d symref >actual && test_path_is_file .git/refs/heads/target && <API key> .git/refs/heads/symref && test_i18ncmp expect actual ' test_expect_success 'deleting a dangling symref' ' git symbolic-ref refs/heads/dangling-symref nowhere && test_path_is_file .git/refs/heads/dangling-symref && echo "Deleted branch dangling-symref (was nowhere)." >expect && git branch -d dangling-symref >actual && <API key> .git/refs/heads/dangling-symref && test_i18ncmp expect actual ' test_expect_success 'renaming a symref is not allowed' ' git symbolic-ref refs/heads/master2 refs/heads/master && test_must_fail git branch -m master2 master3 && git symbolic-ref refs/heads/master2 && test_path_is_file .git/refs/heads/master && <API key> .git/refs/heads/master3 ' test_expect_success SYMLINKS 'git branch -m u v should fail when the reflog for u is a symlink' ' git branch -l u && mv .git/logs/refs/heads/u real-u && ln -s real-u .git/logs/refs/heads/u && test_must_fail git branch -m u v ' test_expect_success 'test tracking setup via --track' ' git config remote.local.url . &&
#include <linux/kernel.h> #include <linux/init.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/platform_device.h> #include <linux/gpio.h> #include <linux/interrupt.h> #include <linux/workqueue.h> #include <linux/delay.h> #include <linux/fs.h> #include <linux/usb.h> #include <linux/uaccess.h> #include <linux/platform_data/tegra_usb.h> #include <mach/usb_phy.h> #include <mach/tegra-bb-power.h> #include "bb-power.h" static struct tegra_bb_callback *callback; static int attr_load_val; static struct <API key> *mdata; static bb_get_cblist get_cblist[] = { NULL, NULL, NULL, M7400_CB, }; static int <API key>(struct <API key> *gdata) { int ret; int irq; unsigned gpio_id; const char *gpio_label; unsigned long gpio_flags; struct tegra_bb_gpio_data *gpiolist; struct <API key> *gpioirq; gpiolist = gdata->gpio; for (; gpiolist->data.gpio != GPIO_INVALID; ++gpiolist) { gpio_id = (gpiolist->data.gpio); gpio_label = (gpiolist->data.label); gpio_flags = (gpiolist->data.flags); /* Request the gpio */ ret = gpio_request(gpio_id, gpio_label); if (ret) { pr_err("%s: Error: gpio_request for gpio %d failed.\n", __func__, gpio_id); return ret; } /* Set gpio direction, as requested */ if (gpio_flags == GPIOF_IN) <API key>(gpio_id); else <API key>(gpio_id, (!gpio_flags ? 0 : 1)); /* Create a sysfs node, if requested */ if (gpiolist->doexport) gpio_export(gpio_id, false); } gpioirq = gdata->gpioirq; for (; gpioirq->id != GPIO_INVALID; ++gpioirq) { /* Create interrupt handler, if requested */ if (gpioirq->handler != NULL) { irq = gpio_to_irq(gpioirq->id); ret = <API key>(irq, NULL, gpioirq->handler, gpioirq->flags, gpioirq->name, gpioirq->cookie); if (ret < 0) { pr_err("%s: Error: threaded_irq req fail.\n" , __func__); return ret; } if (gpioirq->wake_capable) { ret = enable_irq_wake(irq); if (ret) { pr_err("%s: Error: irqwake req fail.\n", __func__); return ret; } } } } return 0; } static int <API key>(struct <API key> *gdata) { struct tegra_bb_gpio_data *gpiolist; struct <API key> *gpioirq; gpiolist = gdata->gpio; for (; gpiolist->data.gpio != GPIO_INVALID; ++gpiolist) { /* Free the gpio */ gpio_free(gpiolist->data.gpio); } gpioirq = gdata->gpioirq; for (; gpioirq->id != GPIO_INVALID; ++gpioirq) { /* Free the irq */ free_irq(gpio_to_irq(gpioirq->id), gpioirq->cookie); } return 0; } static ssize_t tegra_bb_attr_write(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int val; if (sscanf(buf, "%d", &val) != 1) return -EINVAL; if (callback && callback->attrib) { if (!callback->attrib(dev, val)) attr_load_val = val; } return count; } static ssize_t tegra_bb_attr_read(struct device *dev, struct device_attribute *attr, char *buf) { return sprintf(buf, "%d", attr_load_val); } static DEVICE_ATTR(load, S_IRUSR | S_IWUSR | S_IRGRP, tegra_bb_attr_read, tegra_bb_attr_write); static void <API key>(struct usb_device *udev) { const struct <API key> *desc = &udev->descriptor; if (desc->idVendor == mdata->vid && desc->idProduct == mdata->pid) { pr_debug("%s: Device %s added.\n", udev->product, __func__); if (mdata->wake_capable) <API key>(&udev->dev, true); if (mdata->autosuspend_ready) <API key>(udev); if (mdata->reg_cb) mdata->reg_cb(udev); } } static void <API key>(struct usb_device *udev) { const struct <API key> *desc = &udev->descriptor; if (desc->idVendor == mdata->vid && desc->idProduct == mdata->pid) { pr_debug("%s: Device %s removed.\n", udev->product, __func__); } } static int tegra_usb_notify(struct notifier_block *self, unsigned long action, void *dev) { switch (action) { case USB_DEVICE_ADD: <API key>((struct usb_device *)dev); break; case USB_DEVICE_REMOVE: <API key>((struct usb_device *)dev); break; } return NOTIFY_OK; } static struct notifier_block tegra_usb_nb = { .notifier_call = tegra_usb_notify, }; static int <API key>(struct platform_device *device) { struct device *dev = &device->dev; struct tegra_bb_pdata *pdata; struct tegra_bb_power_data *data; struct <API key> *gdata; int err; unsigned int bb_id; pdata = (struct tegra_bb_pdata *) dev->platform_data; if (!pdata) { pr_err("%s - Error: platform data is empty.\n", __func__); return -ENODEV; } /* Obtain BB specific callback list */ bb_id = pdata->bb_id; if (get_cblist[bb_id] != NULL) { callback = (struct tegra_bb_callback *) get_cblist[bb_id](); if (callback && callback->init) { data = (struct tegra_bb_power_data *) callback->init((void *)pdata); gdata = data->gpio_data; if (!gdata) { pr_err("%s - Error: Gpio data is empty.\n", __func__); return -ENODEV; } /* Initialize gpio as required */ <API key>(gdata); mdata = data->modem_data; if (mdata && mdata->vid && mdata->pid) /* Register to notifications from usb core */ usb_register_notify(&tegra_usb_nb); } else { pr_err("%s - Error: init callback is empty.\n", __func__); return -ENODEV; } } else { pr_err("%s - Error: callback data is empty.\n", __func__); return -ENODEV; } /* Create the control sysfs node */ err = device_create_file(dev, &dev_attr_load); if (err < 0) { pr_err("%s - Error: device_create_file failed.\n", __func__); return -ENODEV; } attr_load_val = 0; return 0; } static int <API key>(struct platform_device *device) { struct device *dev = &device->dev; struct tegra_bb_power_data *data; struct <API key> *gdata; /* BB specific callback */ if (callback && callback->deinit) { data = (struct tegra_bb_power_data *) callback->deinit(); /* Deinitialize gpios */ gdata = data->gpio_data; if (gdata) <API key>(gdata); else { pr_err("%s - Error: Gpio data is empty.\n", __func__); return -ENODEV; } mdata = data->modem_data; if (mdata && mdata->vid && mdata->pid) /* Register to notifications from usb core */ <API key>(&tegra_usb_nb); } /* Remove the control sysfs node */ device_remove_file(dev, &dev_attr_load); return 0; } #ifdef CONFIG_PM static int <API key>(struct platform_device *device, pm_message_t state) { /* BB specific callback */ if (callback && callback->power) callback->power(PWRSTATE_L2L3); return 0; } static int <API key>(struct platform_device *device) { /* BB specific callback */ if (callback && callback->power) callback->power(PWRSTATE_L3L0); return 0; } #endif static struct platform_driver <API key> = { .probe = <API key>, .remove = <API key>, #ifdef CONFIG_PM .suspend = <API key>, .resume = <API key>, #endif .driver = { .name = "<API key>", }, }; static int __init <API key>(void) { pr_debug("%s\n", __func__); return <API key>(&<API key>); } static void __exit <API key>(void) { pr_debug("%s\n", __func__); <API key>(&<API key>); } module_init(<API key>) module_exit(<API key>) MODULE_AUTHOR("NVIDIA Corporation"); MODULE_DESCRIPTION("Tegra modem power management driver"); MODULE_LICENSE("GPL");
#include <linux/prime_numbers.h> #include "gt/intel_engine_pm.h" #include "gt/intel_gpu_commands.h" #include "gt/intel_gt.h" #include "gt/intel_gt_pm.h" #include "gem/i915_gem_region.h" #include "huge_gem_object.h" #include "i915_selftest.h" #include "selftests/i915_random.h" #include "selftests/igt_flush_test.h" #include "selftests/igt_mmap.h" struct tile { unsigned int width; unsigned int height; unsigned int stride; unsigned int size; unsigned int tiling; unsigned int swizzle; }; static u64 swizzle_bit(unsigned int bit, u64 offset) { return (offset & BIT_ULL(bit)) >> (bit - 6); } static u64 tiled_offset(const struct tile *tile, u64 v) { u64 x, y; if (tile->tiling == I915_TILING_NONE) return v; y = div64_u64_rem(v, tile->stride, &x); v = div64_u64_rem(y, tile->height, &y) * tile->stride * tile->height; if (tile->tiling == I915_TILING_X) { v += y * tile->width; v += div64_u64_rem(x, tile->width, &x) << tile->size; v += x; } else if (tile->width == 128) { const unsigned int ytile_span = 16; const unsigned int ytile_height = 512; v += y * ytile_span; v += div64_u64_rem(x, ytile_span, &x) * ytile_height; v += x; } else { const unsigned int ytile_span = 32; const unsigned int ytile_height = 256; v += y * ytile_span; v += div64_u64_rem(x, ytile_span, &x) * ytile_height; v += x; } switch (tile->swizzle) { case <API key>: v ^= swizzle_bit(9, v); break; case <API key>: v ^= swizzle_bit(9, v) ^ swizzle_bit(10, v); break; case <API key>: v ^= swizzle_bit(9, v) ^ swizzle_bit(11, v); break; case <API key>: v ^= swizzle_bit(9, v) ^ swizzle_bit(10, v) ^ swizzle_bit(11, v); break; } return v; } static int <API key>(struct drm_i915_gem_object *obj, const struct tile *tile, struct rnd_state *prng) { const unsigned long npages = obj->base.size / PAGE_SIZE; struct i915_ggtt_view view; struct i915_vma *vma; unsigned long page; u32 __iomem *io; struct page *p; unsigned int n; u64 offset; u32 *cpu; int err; err = <API key>(obj, tile->tiling, tile->stride); if (err) { pr_err("Failed to set tiling mode=%u, stride=%u, err=%d\n", tile->tiling, tile->stride, err); return err; } GEM_BUG_ON(<API key>(obj) != tile->tiling); GEM_BUG_ON(<API key>(obj) != tile->stride); <API key>(obj, NULL); err = <API key>(obj, true); <API key>(obj); if (err) { pr_err("Failed to flush to GTT write domain; err=%d\n", err); return err; } page = <API key>(npages, prng); view = <API key>(obj, page, MIN_CHUNK_PAGES); vma = <API key>(obj, &view, 0, 0, PIN_MAPPABLE); if (IS_ERR(vma)) { pr_err("Failed to pin partial view: offset=%lu; err=%d\n", page, (int)PTR_ERR(vma)); return PTR_ERR(vma); } n = page - view.partial.offset; GEM_BUG_ON(n >= view.partial.size); io = i915_vma_pin_iomap(vma); i915_vma_unpin(vma); if (IS_ERR(io)) { pr_err("Failed to iomap partial view: offset=%lu; err=%d\n", page, (int)PTR_ERR(io)); err = PTR_ERR(io); goto out; } iowrite32(page, io + n * PAGE_SIZE / sizeof(*io)); <API key>(vma); offset = tiled_offset(tile, page << PAGE_SHIFT); if (offset >= obj->base.size) goto out; <API key>(&to_i915(obj->base.dev)->gt); p = <API key>(obj, offset >> PAGE_SHIFT); cpu = kmap(p) + offset_in_page(offset); <API key>(cpu, sizeof(*cpu)); if (*cpu != (u32)page) { pr_err("Partial view for %lu [%u] (offset=%llu, size=%u [%llu, row size %u], fence=%d, tiling=%d, stride=%d) misalignment, expected write to page (%llu + %u [0x%llx]) of 0x%x, found 0x%x\n", page, n, view.partial.offset, view.partial.size, vma->size >> PAGE_SHIFT, tile->tiling ? tile_row_pages(obj) : 0, vma->fence ? vma->fence->id : -1, tile->tiling, tile->stride, offset >> PAGE_SHIFT, (unsigned int)offset_in_page(offset), offset, (u32)page, *cpu); err = -EINVAL; } *cpu = 0; <API key>(cpu, sizeof(*cpu)); kunmap(p); out: __i915_vma_put(vma); return err; } static int <API key>(struct drm_i915_gem_object *obj, const struct tile *tile, unsigned long end_time) { const unsigned int nreal = obj->scratch / PAGE_SIZE; const unsigned long npages = obj->base.size / PAGE_SIZE; struct i915_vma *vma; unsigned long page; int err; err = <API key>(obj, tile->tiling, tile->stride); if (err) { pr_err("Failed to set tiling mode=%u, stride=%u, err=%d\n", tile->tiling, tile->stride, err); return err; } GEM_BUG_ON(<API key>(obj) != tile->tiling); GEM_BUG_ON(<API key>(obj) != tile->stride); <API key>(obj, NULL); err = <API key>(obj, true); <API key>(obj); if (err) { pr_err("Failed to flush to GTT write domain; err=%d\n", err); return err; } <API key>(page, 1, npages) { struct i915_ggtt_view view = <API key>(obj, page, MIN_CHUNK_PAGES); u32 __iomem *io; struct page *p; unsigned int n; u64 offset; u32 *cpu; GEM_BUG_ON(view.partial.size > nreal); cond_resched(); vma = <API key>(obj, &view, 0, 0, PIN_MAPPABLE); if (IS_ERR(vma)) { pr_err("Failed to pin partial view: offset=%lu; err=%d\n", page, (int)PTR_ERR(vma)); return PTR_ERR(vma); } n = page - view.partial.offset; GEM_BUG_ON(n >= view.partial.size); io = i915_vma_pin_iomap(vma); i915_vma_unpin(vma); if (IS_ERR(io)) { pr_err("Failed to iomap partial view: offset=%lu; err=%d\n", page, (int)PTR_ERR(io)); return PTR_ERR(io); } iowrite32(page, io + n * PAGE_SIZE / sizeof(*io)); <API key>(vma); offset = tiled_offset(tile, page << PAGE_SHIFT); if (offset >= obj->base.size) continue; <API key>(&to_i915(obj->base.dev)->gt); p = <API key>(obj, offset >> PAGE_SHIFT); cpu = kmap(p) + offset_in_page(offset); <API key>(cpu, sizeof(*cpu)); if (*cpu != (u32)page) { pr_err("Partial view for %lu [%u] (offset=%llu, size=%u [%llu, row size %u], fence=%d, tiling=%d, stride=%d) misalignment, expected write to page (%llu + %u [0x%llx]) of 0x%x, found 0x%x\n", page, n, view.partial.offset, view.partial.size, vma->size >> PAGE_SHIFT, tile->tiling ? tile_row_pages(obj) : 0, vma->fence ? vma->fence->id : -1, tile->tiling, tile->stride, offset >> PAGE_SHIFT, (unsigned int)offset_in_page(offset), offset, (u32)page, *cpu); err = -EINVAL; } *cpu = 0; <API key>(cpu, sizeof(*cpu)); kunmap(p); if (err) return err; __i915_vma_put(vma); if (igt_timeout(end_time, "%s: timed out after tiling=%d stride=%d\n", __func__, tile->tiling, tile->stride)) return -EINTR; } return 0; } static unsigned int setup_tile_size(struct tile *tile, struct drm_i915_private *i915) { if (GRAPHICS_VER(i915) <= 2) { tile->height = 16; tile->width = 128; tile->size = 11; } else if (tile->tiling == I915_TILING_Y && <API key>(i915)) { tile->height = 32; tile->width = 128; tile->size = 12; } else { tile->height = 8; tile->width = 512; tile->size = 12; } if (GRAPHICS_VER(i915) < 4) return 8192 / tile->width; else if (GRAPHICS_VER(i915) < 7) return 128 * <API key> / tile->width; else return 128 * <API key> / tile->width; } static int igt_partial_tiling(void *arg) { const unsigned int nreal = 1 << 12; /* largest tile row x2 */ struct drm_i915_private *i915 = arg; struct drm_i915_gem_object *obj; intel_wakeref_t wakeref; int tiling; int err; if (!<API key>(&i915->ggtt)) return 0; /* We want to check the page mapping and fencing of a large object * mmapped through the GTT. The object we create is larger than can * possibly be mmaped as a whole, and so we must use partial GGTT vma. * We then check that a write through each partial GGTT vma ends up * in the right set of pages within the object, and with the expected * tiling, which we verify by manual swizzling. */ obj = huge_gem_object(i915, nreal << PAGE_SHIFT, (1 + next_prime_number(i915->ggtt.vm.total >> PAGE_SHIFT)) << PAGE_SHIFT); if (IS_ERR(obj)) return PTR_ERR(obj); err = <API key>(obj); if (err) { pr_err("Failed to allocate %u pages (%lu total), err=%d\n", nreal, obj->base.size / PAGE_SIZE, err); goto out; } wakeref = <API key>(&i915->runtime_pm); if (1) { IGT_TIMEOUT(end); struct tile tile; tile.height = 1; tile.width = 1; tile.size = 0; tile.stride = 0; tile.swizzle = <API key>; tile.tiling = I915_TILING_NONE; err = <API key>(obj, &tile, end); if (err && err != -EINTR) goto out_unlock; } for (tiling = I915_TILING_X; tiling <= I915_TILING_Y; tiling++) { IGT_TIMEOUT(end); unsigned int max_pitch; unsigned int pitch; struct tile tile; if (i915->quirks & <API key>) /* * The swizzling pattern is actually unknown as it * varies based on physical address of each page. * See <API key>(). */ break; tile.tiling = tiling; switch (tiling) { case I915_TILING_X: tile.swizzle = i915->ggtt.bit_6_swizzle_x; break; case I915_TILING_Y: tile.swizzle = i915->ggtt.bit_6_swizzle_y; break; } GEM_BUG_ON(tile.swizzle == <API key>); if (tile.swizzle == <API key> || tile.swizzle == <API key>) continue; max_pitch = setup_tile_size(&tile, i915); for (pitch = max_pitch; pitch; pitch >>= 1) { tile.stride = tile.width * pitch; err = <API key>(obj, &tile, end); if (err == -EINTR) goto next_tiling; if (err) goto out_unlock; if (pitch > 2 && GRAPHICS_VER(i915) >= 4) { tile.stride = tile.width * (pitch - 1); err = <API key>(obj, &tile, end); if (err == -EINTR) goto next_tiling; if (err) goto out_unlock; } if (pitch < max_pitch && GRAPHICS_VER(i915) >= 4) { tile.stride = tile.width * (pitch + 1); err = <API key>(obj, &tile, end); if (err == -EINTR) goto next_tiling; if (err) goto out_unlock; } } if (GRAPHICS_VER(i915) >= 4) { <API key>(pitch, max_pitch) { tile.stride = tile.width * pitch; err = <API key>(obj, &tile, end); if (err == -EINTR) goto next_tiling; if (err) goto out_unlock; } } next_tiling: ; } out_unlock: <API key>(&i915->runtime_pm, wakeref); <API key>(obj); out: i915_gem_object_put(obj); return err; } static int igt_smoke_tiling(void *arg) { const unsigned int nreal = 1 << 12; /* largest tile row x2 */ struct drm_i915_private *i915 = arg; struct drm_i915_gem_object *obj; intel_wakeref_t wakeref; I915_RND_STATE(prng); unsigned long count; IGT_TIMEOUT(end); int err; if (!<API key>(&i915->ggtt)) return 0; /* * igt_partial_tiling() does an exhastive check of partial tiling * chunking, but will undoubtably run out of time. Here, we do a * randomised search and hope over many runs of 1s with different * seeds we will do a thorough check. * * Remember to look at the st_seed if we see a flip-flop in BAT! */ if (i915->quirks & <API key>) return 0; obj = huge_gem_object(i915, nreal << PAGE_SHIFT, (1 + next_prime_number(i915->ggtt.vm.total >> PAGE_SHIFT)) << PAGE_SHIFT); if (IS_ERR(obj)) return PTR_ERR(obj); err = <API key>(obj); if (err) { pr_err("Failed to allocate %u pages (%lu total), err=%d\n", nreal, obj->base.size / PAGE_SIZE, err); goto out; } wakeref = <API key>(&i915->runtime_pm); count = 0; do { struct tile tile; tile.tiling = <API key>(I915_TILING_Y + 1, &prng); switch (tile.tiling) { case I915_TILING_NONE: tile.height = 1; tile.width = 1; tile.size = 0; tile.stride = 0; tile.swizzle = <API key>; break; case I915_TILING_X: tile.swizzle = i915->ggtt.bit_6_swizzle_x; break; case I915_TILING_Y: tile.swizzle = i915->ggtt.bit_6_swizzle_y; break; } if (tile.swizzle == <API key> || tile.swizzle == <API key>) continue; if (tile.tiling != I915_TILING_NONE) { unsigned int max_pitch = setup_tile_size(&tile, i915); tile.stride = <API key>(max_pitch, &prng); tile.stride = (1 + tile.stride) * tile.width; if (GRAPHICS_VER(i915) < 4) tile.stride = <API key>(tile.stride); } err = <API key>(obj, &tile, &prng); if (err) break; count++; } while (!__igt_timeout(end, NULL)); pr_info("%s: Completed %lu trials\n", __func__, count); <API key>(&i915->runtime_pm, wakeref); <API key>(obj); out: i915_gem_object_put(obj); return err; } static int make_obj_busy(struct drm_i915_gem_object *obj) { struct drm_i915_private *i915 = to_i915(obj->base.dev); struct intel_engine_cs *engine; <API key>(engine, i915) { struct i915_request *rq; struct i915_vma *vma; struct i915_gem_ww_ctx ww; int err; vma = i915_vma_instance(obj, &engine->gt->ggtt->vm, NULL); if (IS_ERR(vma)) return PTR_ERR(vma); <API key>(&ww, false); retry: err = <API key>(obj, &ww); if (!err) err = i915_vma_pin_ww(vma, &ww, 0, 0, PIN_USER); if (err) goto err; rq = <API key>(engine); if (IS_ERR(rq)) { err = PTR_ERR(rq); goto err_unpin; } err = <API key>(rq, vma->obj, true); if (err == 0) err = <API key>(vma, rq, EXEC_OBJECT_WRITE); i915_request_add(rq); err_unpin: i915_vma_unpin(vma); err: if (err == -EDEADLK) { err = <API key>(&ww); if (!err) goto retry; } <API key>(&ww); if (err) return err; } i915_gem_object_put(obj); /* leave it only alive via its active ref */ return 0; } static enum i915_mmap_type default_mapping(struct drm_i915_private *i915) { if (HAS_LMEM(i915)) return <API key>; return I915_MMAP_TYPE_GTT; } static struct drm_i915_gem_object * <API key>(struct drm_i915_private *i915, unsigned long size) { if (HAS_LMEM(i915)) { struct intel_memory_region *sys_region = i915->mm.regions[INTEL_REGION_SMEM]; return <API key>(i915, size, &sys_region, 1); } return <API key>(i915, size); } static bool assert_mmap_offset(struct drm_i915_private *i915, unsigned long size, int expected) { struct drm_i915_gem_object *obj; u64 offset; int ret; obj = <API key>(i915, size); if (IS_ERR(obj)) return expected && expected == PTR_ERR(obj); ret = <API key>(obj, default_mapping(i915), &offset, NULL); i915_gem_object_put(obj); return ret == expected; } static void <API key>(struct drm_i915_private *i915) { <API key>(i915); intel_gt_pm_get(&i915->gt); <API key>(&i915->gt.requests.retire_work); } static void <API key>(struct drm_i915_private *i915) { igt_flush_test(i915); intel_gt_pm_put(&i915->gt); <API key>(i915); } static void mmap_offset_lock(struct drm_i915_private *i915) __acquires(&i915->drm.vma_offset_manager->vm_lock) { write_lock(&i915->drm.vma_offset_manager->vm_lock); } static void mmap_offset_unlock(struct drm_i915_private *i915) __releases(&i915->drm.vma_offset_manager->vm_lock) { write_unlock(&i915->drm.vma_offset_manager->vm_lock); } static int <API key>(void *arg) { struct drm_i915_private *i915 = arg; struct drm_mm *mm = &i915->drm.vma_offset_manager->vm_addr_space_mm; struct drm_i915_gem_object *obj; struct drm_mm_node *hole, *next; int loop, err = 0; u64 offset; int enospc = HAS_LMEM(i915) ? -ENXIO : -ENOSPC; /* Disable background reaper */ <API key>(i915); GEM_BUG_ON(!i915->gt.awake); <API key>(&i915->gt); <API key>(i915); /* Trim the device mmap space to only a page */ mmap_offset_lock(i915); loop = 1; /* PAGE_SIZE units */ <API key>(hole, next, &mm->hole_stack, hole_stack) { struct drm_mm_node *resv; resv = kzalloc(sizeof(*resv), GFP_NOWAIT); if (!resv) { err = -ENOMEM; goto out_park; } resv->start = <API key>(hole) + loop; resv->size = hole->hole_size - loop; resv->color = -1ul; loop = 0; if (!resv->size) { kfree(resv); continue; } pr_debug("Reserving hole [%llx + %llx]\n", resv->start, resv->size); err = drm_mm_reserve_node(mm, resv); if (err) { pr_err("Failed to trim VMA manager, err=%d\n", err); kfree(resv); goto out_park; } } GEM_BUG_ON(!list_is_singular(&mm->hole_stack)); mmap_offset_unlock(i915); /* Just fits! */ if (!assert_mmap_offset(i915, PAGE_SIZE, 0)) { pr_err("Unable to insert object into single page hole\n"); err = -EINVAL; goto out; } /* Too large */ if (!assert_mmap_offset(i915, 2 * PAGE_SIZE, enospc)) { pr_err("Unexpectedly succeeded in inserting too large object into single page hole\n"); err = -EINVAL; goto out; } /* Fill the hole, further allocation attempts should then fail */ obj = <API key>(i915, PAGE_SIZE); if (IS_ERR(obj)) { err = PTR_ERR(obj); pr_err("Unable to create object for reclaimed hole\n"); goto out; } err = <API key>(obj, default_mapping(i915), &offset, NULL); if (err) { pr_err("Unable to insert object into reclaimed hole\n"); goto err_obj; } if (!assert_mmap_offset(i915, PAGE_SIZE, enospc)) { pr_err("Unexpectedly succeeded in inserting object into no holes!\n"); err = -EINVAL; goto err_obj; } i915_gem_object_put(obj); /* Now fill with busy dead objects that we expect to reap */ for (loop = 0; loop < 3; loop++) { if (intel_gt_is_wedged(&i915->gt)) break; obj = <API key>(i915, PAGE_SIZE); if (IS_ERR(obj)) { err = PTR_ERR(obj); goto out; } err = make_obj_busy(obj); if (err) { pr_err("[loop %d] Failed to busy the object\n", loop); goto err_obj; } } out: mmap_offset_lock(i915); out_park: <API key>(hole, next, mm) { if (hole->color != -1ul) continue; drm_mm_remove_node(hole); kfree(hole); } mmap_offset_unlock(i915); <API key>(i915); return err; err_obj: i915_gem_object_put(obj); goto out; } static int gtt_set(struct drm_i915_gem_object *obj) { struct i915_vma *vma; void __iomem *map; int err = 0; vma = <API key>(obj, NULL, 0, 0, PIN_MAPPABLE); if (IS_ERR(vma)) return PTR_ERR(vma); intel_gt_pm_get(vma->vm->gt); map = i915_vma_pin_iomap(vma); i915_vma_unpin(vma); if (IS_ERR(map)) { err = PTR_ERR(map); goto out; } memset_io(map, POISON_INUSE, obj->base.size); <API key>(vma); out: intel_gt_pm_put(vma->vm->gt); return err; } static int gtt_check(struct drm_i915_gem_object *obj) { struct i915_vma *vma; void __iomem *map; int err = 0; vma = <API key>(obj, NULL, 0, 0, PIN_MAPPABLE); if (IS_ERR(vma)) return PTR_ERR(vma); intel_gt_pm_get(vma->vm->gt); map = i915_vma_pin_iomap(vma); i915_vma_unpin(vma); if (IS_ERR(map)) { err = PTR_ERR(map); goto out; } if (memchr_inv((void __force *)map, POISON_FREE, obj->base.size)) { pr_err("%s: Write via mmap did not land in backing store (GTT)\n", obj->mm.region->name); err = -EINVAL; } <API key>(vma); out: intel_gt_pm_put(vma->vm->gt); return err; } static int wc_set(struct drm_i915_gem_object *obj) { void *vaddr; vaddr = <API key>(obj, I915_MAP_WC); if (IS_ERR(vaddr)) return PTR_ERR(vaddr); memset(vaddr, POISON_INUSE, obj->base.size); <API key>(obj); <API key>(obj); return 0; } static int wc_check(struct drm_i915_gem_object *obj) { void *vaddr; int err = 0; vaddr = <API key>(obj, I915_MAP_WC); if (IS_ERR(vaddr)) return PTR_ERR(vaddr); if (memchr_inv(vaddr, POISON_FREE, obj->base.size)) { pr_err("%s: Write via mmap did not land in backing store (WC)\n", obj->mm.region->name); err = -EINVAL; } <API key>(obj); return err; } static bool can_mmap(struct drm_i915_gem_object *obj, enum i915_mmap_type type) { bool no_map; if (obj->ops->mmap_offset) return type == <API key>; else if (type == <API key>) return false; if (type == I915_MMAP_TYPE_GTT && !<API key>(&to_i915(obj->base.dev)->ggtt)) return false; <API key>(obj, NULL); no_map = (type != I915_MMAP_TYPE_GTT && !<API key>(obj) && !<API key>(obj)); <API key>(obj); return !no_map; } #define expand32(x) (((x) << 0) | ((x) << 8) | ((x) << 16) | ((x) << 24)) static int __igt_mmap(struct drm_i915_private *i915, struct drm_i915_gem_object *obj, enum i915_mmap_type type) { struct vm_area_struct *area; unsigned long addr; int err, i; u64 offset; if (!can_mmap(obj, type)) return 0; err = wc_set(obj); if (err == -ENXIO) err = gtt_set(obj); if (err) return err; err = <API key>(obj, type, &offset, NULL); if (err) return err; addr = igt_mmap_offset(i915, offset, obj->base.size, PROT_WRITE, MAP_SHARED); if (IS_ERR_VALUE(addr)) return addr; pr_debug("igt_mmap(%s, %d) @ %lx\n", obj->mm.region->name, type, addr); mmap_read_lock(current->mm); area = vma_lookup(current->mm, addr); mmap_read_unlock(current->mm); if (!area) { pr_err("%s: Did not create a vm_area_struct for the mmap\n", obj->mm.region->name); err = -EINVAL; goto out_unmap; } for (i = 0; i < obj->base.size / sizeof(u32); i++) { u32 __user *ux = u64_to_user_ptr((u64)(addr + i * sizeof(*ux))); u32 x; if (get_user(x, ux)) { pr_err("%s: Unable to read from mmap, offset:%zd\n", obj->mm.region->name, i * sizeof(x)); err = -EFAULT; goto out_unmap; } if (x != expand32(POISON_INUSE)) { pr_err("%s: Read incorrect value from mmap, offset:%zd, found:%x, expected:%x\n", obj->mm.region->name, i * sizeof(x), x, expand32(POISON_INUSE)); err = -EINVAL; goto out_unmap; } x = expand32(POISON_FREE); if (put_user(x, ux)) { pr_err("%s: Unable to write to mmap, offset:%zd\n", obj->mm.region->name, i * sizeof(x)); err = -EFAULT; goto out_unmap; } } if (type == I915_MMAP_TYPE_GTT) <API key>(&i915->gt); err = wc_check(obj); if (err == -ENXIO) err = gtt_check(obj); out_unmap: vm_munmap(addr, obj->base.size); return err; } static int igt_mmap(void *arg) { struct drm_i915_private *i915 = arg; struct intel_memory_region *mr; enum intel_region_id id; <API key>(mr, i915, id) { unsigned long sizes[] = { PAGE_SIZE, mr->min_page_size, SZ_4M, }; int i; for (i = 0; i < ARRAY_SIZE(sizes); i++) { struct drm_i915_gem_object *obj; int err; obj = <API key>(i915, sizes[i], &mr, 1); if (obj == ERR_PTR(-ENODEV)) continue; if (IS_ERR(obj)) return PTR_ERR(obj); err = __igt_mmap(i915, obj, I915_MMAP_TYPE_GTT); if (err == 0) err = __igt_mmap(i915, obj, I915_MMAP_TYPE_WC); if (err == 0) err = __igt_mmap(i915, obj, <API key>); i915_gem_object_put(obj); if (err) return err; } } return 0; } static const char *repr_mmap_type(enum i915_mmap_type type) { switch (type) { case I915_MMAP_TYPE_GTT: return "gtt"; case I915_MMAP_TYPE_WB: return "wb"; case I915_MMAP_TYPE_WC: return "wc"; case I915_MMAP_TYPE_UC: return "uc"; case <API key>: return "fixed"; default: return "unknown"; } } static bool can_access(struct drm_i915_gem_object *obj) { bool access; <API key>(obj, NULL); access = <API key>(obj) || <API key>(obj); <API key>(obj); return access; } static int __igt_mmap_access(struct drm_i915_private *i915, struct drm_i915_gem_object *obj, enum i915_mmap_type type) { unsigned long __user *ptr; unsigned long A, B; unsigned long x, y; unsigned long addr; int err; u64 offset; memset(&A, 0xAA, sizeof(A)); memset(&B, 0xBB, sizeof(B)); if (!can_mmap(obj, type) || !can_access(obj)) return 0; err = <API key>(obj, type, &offset, NULL); if (err) return err; addr = igt_mmap_offset(i915, offset, obj->base.size, PROT_WRITE, MAP_SHARED); if (IS_ERR_VALUE(addr)) return addr; ptr = (unsigned long __user *)addr; err = __put_user(A, ptr); if (err) { pr_err("%s(%s): failed to write into user mmap\n", obj->mm.region->name, repr_mmap_type(type)); goto out_unmap; } <API key>(&i915->gt); err = access_process_vm(current, addr, &x, sizeof(x), 0); if (err != sizeof(x)) { pr_err("%s(%s): access_process_vm() read failed\n", obj->mm.region->name, repr_mmap_type(type)); goto out_unmap; } err = access_process_vm(current, addr, &B, sizeof(B), FOLL_WRITE); if (err != sizeof(B)) { pr_err("%s(%s): access_process_vm() write failed\n", obj->mm.region->name, repr_mmap_type(type)); goto out_unmap; } <API key>(&i915->gt); err = __get_user(y, ptr); if (err) { pr_err("%s(%s): failed to read from user mmap\n", obj->mm.region->name, repr_mmap_type(type)); goto out_unmap; } if (x != A || y != B) { pr_err("%s(%s): failed to read/write values, found (%lx, %lx)\n", obj->mm.region->name, repr_mmap_type(type), x, y); err = -EINVAL; goto out_unmap; } out_unmap: vm_munmap(addr, obj->base.size); return err; } static int igt_mmap_access(void *arg) { struct drm_i915_private *i915 = arg; struct intel_memory_region *mr; enum intel_region_id id; <API key>(mr, i915, id) { struct drm_i915_gem_object *obj; int err; obj = <API key>(i915, PAGE_SIZE, &mr, 1); if (obj == ERR_PTR(-ENODEV)) continue; if (IS_ERR(obj)) return PTR_ERR(obj); err = __igt_mmap_access(i915, obj, I915_MMAP_TYPE_GTT); if (err == 0) err = __igt_mmap_access(i915, obj, I915_MMAP_TYPE_WB); if (err == 0) err = __igt_mmap_access(i915, obj, I915_MMAP_TYPE_WC); if (err == 0) err = __igt_mmap_access(i915, obj, I915_MMAP_TYPE_UC); if (err == 0) err = __igt_mmap_access(i915, obj, <API key>); i915_gem_object_put(obj); if (err) return err; } return 0; } static int __igt_mmap_gpu(struct drm_i915_private *i915, struct drm_i915_gem_object *obj, enum i915_mmap_type type) { struct intel_engine_cs *engine; unsigned long addr; u32 __user *ux; u32 bbe; int err; u64 offset; /* * Verify that the mmap access into the backing store aligns with * that of the GPU, i.e. that mmap is indeed writing into the same * page as being read by the GPU. */ if (!can_mmap(obj, type)) return 0; err = wc_set(obj); if (err == -ENXIO) err = gtt_set(obj); if (err) return err; err = <API key>(obj, type, &offset, NULL); if (err) return err; addr = igt_mmap_offset(i915, offset, obj->base.size, PROT_WRITE, MAP_SHARED); if (IS_ERR_VALUE(addr)) return addr; ux = u64_to_user_ptr((u64)addr); bbe = MI_BATCH_BUFFER_END; if (put_user(bbe, ux)) { pr_err("%s: Unable to write to mmap\n", obj->mm.region->name); err = -EFAULT; goto out_unmap; } if (type == I915_MMAP_TYPE_GTT) <API key>(&i915->gt); <API key>(engine, i915) { struct i915_request *rq; struct i915_vma *vma; struct i915_gem_ww_ctx ww; vma = i915_vma_instance(obj, engine->kernel_context->vm, NULL); if (IS_ERR(vma)) { err = PTR_ERR(vma); goto out_unmap; } <API key>(&ww, false); retry: err = <API key>(obj, &ww); if (!err) err = i915_vma_pin_ww(vma, &ww, 0, 0, PIN_USER); if (err) goto out_ww; rq = i915_request_create(engine->kernel_context); if (IS_ERR(rq)) { err = PTR_ERR(rq); goto out_unpin; } err = <API key>(rq, vma->obj, false); if (err == 0) err = <API key>(vma, rq, 0); err = engine->emit_bb_start(rq, vma->node.start, 0, 0); i915_request_get(rq); i915_request_add(rq); if (i915_request_wait(rq, 0, HZ / 5) < 0) { struct drm_printer p = drm_info_printer(engine->i915->drm.dev); pr_err("%s(%s, %s): Failed to execute batch\n", __func__, engine->name, obj->mm.region->name); intel_engine_dump(engine, &p, "%s\n", engine->name); intel_gt_set_wedged(engine->gt); err = -EIO; } i915_request_put(rq); out_unpin: i915_vma_unpin(vma); out_ww: if (err == -EDEADLK) { err = <API key>(&ww); if (!err) goto retry; } <API key>(&ww); if (err) goto out_unmap; } out_unmap: vm_munmap(addr, obj->base.size); return err; } static int igt_mmap_gpu(void *arg) { struct drm_i915_private *i915 = arg; struct intel_memory_region *mr; enum intel_region_id id; <API key>(mr, i915, id) { struct drm_i915_gem_object *obj; int err; obj = <API key>(i915, PAGE_SIZE, &mr, 1); if (obj == ERR_PTR(-ENODEV)) continue; if (IS_ERR(obj)) return PTR_ERR(obj); err = __igt_mmap_gpu(i915, obj, I915_MMAP_TYPE_GTT); if (err == 0) err = __igt_mmap_gpu(i915, obj, I915_MMAP_TYPE_WC); if (err == 0) err = __igt_mmap_gpu(i915, obj, <API key>); i915_gem_object_put(obj); if (err) return err; } return 0; } static int check_present_pte(pte_t *pte, unsigned long addr, void *data) { if (!pte_present(*pte) || pte_none(*pte)) { pr_err("missing PTE:%lx\n", (addr - (unsigned long)data) >> PAGE_SHIFT); return -EINVAL; } return 0; } static int check_absent_pte(pte_t *pte, unsigned long addr, void *data) { if (pte_present(*pte) && !pte_none(*pte)) { pr_err("present PTE:%lx; expected to be revoked\n", (addr - (unsigned long)data) >> PAGE_SHIFT); return -EINVAL; } return 0; } static int check_present(unsigned long addr, unsigned long len) { return apply_to_page_range(current->mm, addr, len, check_present_pte, (void *)addr); } static int check_absent(unsigned long addr, unsigned long len) { return apply_to_page_range(current->mm, addr, len, check_absent_pte, (void *)addr); } static int prefault_range(u64 start, u64 len) { const char __user *addr, *end; char __maybe_unused c; int err; addr = u64_to_user_ptr(start); end = addr + len; for (; addr < end; addr += PAGE_SIZE) { err = __get_user(c, addr); if (err) return err; } return __get_user(c, end - 1); } static int __igt_mmap_revoke(struct drm_i915_private *i915, struct drm_i915_gem_object *obj, enum i915_mmap_type type) { unsigned long addr; int err; u64 offset; if (!can_mmap(obj, type)) return 0; err = <API key>(obj, type, &offset, NULL); if (err) return err; addr = igt_mmap_offset(i915, offset, obj->base.size, PROT_WRITE, MAP_SHARED); if (IS_ERR_VALUE(addr)) return addr; err = prefault_range(addr, obj->base.size); if (err) goto out_unmap; err = check_present(addr, obj->base.size); if (err) { pr_err("%s: was not present\n", obj->mm.region->name); goto out_unmap; } /* * After unbinding the object from the GGTT, its address may be reused * for other objects. Ergo we have to revoke the previous mmap PTE * access as it no longer points to the same object. */ err = <API key>(obj, <API key>); if (err) { pr_err("Failed to unbind object!\n"); goto out_unmap; } if (type != I915_MMAP_TYPE_GTT) { <API key>(obj, NULL); <API key>(obj); <API key>(obj); if (<API key>(obj)) { pr_err("Failed to put-pages object!\n"); err = -EINVAL; goto out_unmap; } } if (!obj->ops->mmap_ops) { err = check_absent(addr, obj->base.size); if (err) { pr_err("%s: was not absent\n", obj->mm.region->name); goto out_unmap; } } else { /* ttm allows access to evicted regions by design */ err = check_present(addr, obj->base.size); if (err) { pr_err("%s: was not present\n", obj->mm.region->name); goto out_unmap; } } out_unmap: vm_munmap(addr, obj->base.size); return err; } static int igt_mmap_revoke(void *arg) { struct drm_i915_private *i915 = arg; struct intel_memory_region *mr; enum intel_region_id id; <API key>(mr, i915, id) { struct drm_i915_gem_object *obj; int err; obj = <API key>(i915, PAGE_SIZE, &mr, 1); if (obj == ERR_PTR(-ENODEV)) continue; if (IS_ERR(obj)) return PTR_ERR(obj); err = __igt_mmap_revoke(i915, obj, I915_MMAP_TYPE_GTT); if (err == 0) err = __igt_mmap_revoke(i915, obj, I915_MMAP_TYPE_WC); if (err == 0) err = __igt_mmap_revoke(i915, obj, <API key>); i915_gem_object_put(obj); if (err) return err; } return 0; } int <API key>(struct drm_i915_private *i915) { static const struct i915_subtest tests[] = { SUBTEST(igt_partial_tiling), SUBTEST(igt_smoke_tiling), SUBTEST(<API key>), SUBTEST(igt_mmap), SUBTEST(igt_mmap_access), SUBTEST(igt_mmap_revoke), SUBTEST(igt_mmap_gpu), }; return i915_subtests(tests, i915); }
# Makefile for the Linux kernel device drivers. # 15 Sep 2000, Christoph Hellwig <hch@infradead.org> # Rewritten to use lists instead of if-statements. obj-y += gpio/ obj-$(CONFIG_PCI) += pci/ obj-$(CONFIG_PARISC) += parisc/ obj-$(CONFIG_RAPIDIO) += rapidio/ obj-y += video/ obj-y += idle/ obj-$(CONFIG_ACPI) += acpi/ obj-$(CONFIG_SFI) += sfi/ # PnP must come after ACPI since it will eventually need to check if acpi # was used and do nothing if so obj-$(CONFIG_PNP) += pnp/ obj-$(CONFIG_ARM_AMBA) += amba/ obj-$(CONFIG_VIRTIO) += virtio/ obj-$(CONFIG_XEN) += xen/ # regulators early, since some subsystems rely on them to initialize obj-$(CONFIG_REGULATOR) += regulator/ # char/ comes before serial/ etc so that the VT console is the boot-time # default. obj-y += char/ # gpu/ comes after char for AGP vs DRM startup obj-y += gpu/ obj-$(CONFIG_CONNECTOR) += connector/ # i810fb and intelfb depend on char/agp/ obj-$(CONFIG_FB_I810) += video/i810/ obj-$(CONFIG_FB_INTEL) += video/intelfb/ obj-y += serial/ obj-$(CONFIG_PARPORT) += parport/ obj-y += base/ block/ misc/ mfd/ obj-$(CONFIG_NUBUS) += nubus/ obj-y += macintosh/ obj-$(CONFIG_IDE) += ide/ obj-$(CONFIG_SCSI) += scsi/ obj-$(CONFIG_ATA) += ata/ obj-$(CONFIG_MTD) += mtd/ obj-$(CONFIG_SPI) += spi/ obj-y += net/ obj-$(CONFIG_ATM) += atm/ obj-$(CONFIG_FUSION) += message/ obj-y += firewire/ obj-y += ieee1394/ obj-$(CONFIG_UIO) += uio/ obj-y += cdrom/ obj-y += auxdisplay/ obj-$(CONFIG_PCCARD) += pcmcia/ obj-$(CONFIG_DIO) += dio/ obj-$(CONFIG_SBUS) += sbus/ obj-$(CONFIG_ZORRO) += zorro/ obj-$(CONFIG_MAC) += macintosh/ obj-$(CONFIG_ATA_OVER_ETH) += block/aoe/ obj-$(CONFIG_PARIDE) += block/paride/ obj-$(CONFIG_TC) += tc/ obj-$(CONFIG_UWB) += uwb/ obj-$(<API key>) += usb/otg/ obj-$(CONFIG_USB) += usb/ obj-$(<API key>) += usb/musb/ obj-$(CONFIG_PCI) += usb/ obj-$(CONFIG_USB_GADGET) += usb/gadget/ obj-$(CONFIG_SERIO) += input/serio/ obj-$(CONFIG_GAMEPORT) += input/gameport/ obj-$(CONFIG_INPUT) += input/ obj-$(CONFIG_I2O) += message/ obj-$(CONFIG_RTC_LIB) += rtc/ obj-y += i2c/ media/ obj-$(CONFIG_PPS) += pps/ obj-$(CONFIG_W1) += w1/ obj-$(CONFIG_POWER_SUPPLY) += power/ obj-$(CONFIG_HWMON) += hwmon/ obj-$(CONFIG_THERMAL) += thermal/ obj-$(CONFIG_WATCHDOG) += watchdog/ obj-$(CONFIG_PHONE) += telephony/ obj-$(CONFIG_MD) += md/ obj-$(CONFIG_BT) += bluetooth/ obj-$(<API key>) += accessibility/ obj-$(CONFIG_ISDN) += isdn/ obj-$(CONFIG_EDAC) += edac/ obj-$(CONFIG_MCA) += mca/ obj-$(CONFIG_EISA) += eisa/ obj-y += lguest/ obj-$(CONFIG_CPU_FREQ) += cpufreq/ obj-$(CONFIG_CPU_IDLE) += cpuidle/ obj-$(CONFIG_MMC) += mmc/ obj-$(CONFIG_MEMSTICK) += memstick/ obj-$(CONFIG_NEW_LEDS) += leds/ obj-$(CONFIG_INFINIBAND) += infiniband/ obj-$(CONFIG_SGI_SN) += sn/ obj-y += firmware/ obj-$(CONFIG_CRYPTO) += crypto/ obj-$(CONFIG_SUPERH) += sh/ obj-$(<API key>) += sh/ ifndef <API key> obj-y += clocksource/ endif obj-$(CONFIG_DMA_ENGINE) += dma/ obj-$(CONFIG_DCA) += dca/ obj-$(CONFIG_HID) += hid/ obj-$(CONFIG_PPC_PS3) += ps3/ obj-$(CONFIG_OF) += of/ obj-$(CONFIG_SSB) += ssb/ obj-$(CONFIG_VHOST_NET) += vhost/ obj-$(CONFIG_VLYNQ) += vlynq/ obj-$(CONFIG_STAGING) += staging/ obj-y += platform/ obj-y += ieee802154/
#ifndef <API key> #define <API key> #include <gnuradio/filter/api.h> #include <gnuradio/filter/firdes.h> #include <gnuradio/sync_block.h> #include <gnuradio/types.h> namespace gr { namespace filter { /*! * \brief Hilbert transformer. * \ingroup filter_blk * * \details * real output is input appropriately delayed. * imaginary output is hilbert filtered (90 degree phase shift) * version of input. */ class FILTER_API hilbert_fc : virtual public sync_block { public: // gr::filter::hilbert_fc::sptr typedef boost::shared_ptr<hilbert_fc> sptr; /*! * Build a Hilbert transformer filter block. * * \param ntaps The number of taps for the filter. * \param window Window type (see firdes::win_type) to use. * \param beta Beta value for a Kaiser window. */ static sptr make(unsigned int ntaps, firdes::win_type window=firdes::WIN_HAMMING, double beta=6.76); }; } /* namespace filter */ } /* namespace gr */ #endif /* <API key> */
#include "chrome/browser/extensions/<API key>.h" #include "chrome/browser/extensions/menu_manager.h" #include "chrome/browser/profiles/profile.h" #include "components/keyed_service/content/<API key>.h" #include "extensions/browser/extension_system.h" #include "extensions/browser/<API key>.h" #include "extensions/browser/<API key>.h" namespace extensions { // static MenuManager* MenuManagerFactory::<API key>( content::BrowserContext* context) { return static_cast<MenuManager*>( GetInstance()-><API key>(context, true)); } // static MenuManagerFactory* MenuManagerFactory::GetInstance() { return Singleton<MenuManagerFactory>::get(); } // static KeyedService* MenuManagerFactory::<API key>( content::BrowserContext* context) { return GetInstance()-><API key>(context); } MenuManagerFactory::MenuManagerFactory() : <API key>( "MenuManager", <API key>::GetInstance()) { DependsOn(<API key>::Get()-><API key>()); } MenuManagerFactory::~MenuManagerFactory() {} KeyedService* MenuManagerFactory::<API key>( content::BrowserContext* context) const { Profile* profile = Profile::FromBrowserContext(context); return new MenuManager(profile, ExtensionSystem::Get(profile)->state_store()); } content::BrowserContext* MenuManagerFactory::<API key>( content::BrowserContext* context) const { return <API key>::Get()->GetOriginalContext(context); } bool MenuManagerFactory::<API key>() const { return true; } bool MenuManagerFactory::<API key>() const { return true; } } // namespace extensions
// <API key>: GPL-2.0 /* * Some Intel Ibex Peak based platforms support so-called "intelligent * power sharing", which allows the CPU and GPU to cooperate to maximize * performance within a given TDP (thermal design point). This driver * performs the coordination between the CPU and GPU, monitors thermal and * power statistics in the platform, and initializes power monitoring * hardware. It also provides a few tunables to control behavior. Its * primary purpose is to safely allow CPU and GPU turbo modes to be enabled * by tracking power and thermal budget; secondarily it can boost turbo * performance by allocating more power or thermal budget to the CPU or GPU * based on available headroom and activity. * * The basic algorithm is driven by a 5s moving average of temperature. If * thermal headroom is available, the CPU and/or GPU power clamps may be * adjusted upwards. If we hit the thermal ceiling or a thermal trigger, * we scale back the clamp. Aside from trigger events (when we're critically * close or over our TDP) we don't adjust the clamps more than once every * five seconds. * * The thermal device (device 31, function 6) has a set of registers that * are updated by the ME firmware. The ME should also take the clamp values * written to those registers and write them to the CPU, but we currently * bypass that functionality and write the CPU MSR directly. * * UNSUPPORTED: * - dual MCP configs * * TODO: * - handle CPU hotplug * - provide turbo enable/disable api * * Related documents: * - CDI 403777, 403778 - Auburndale EDS vol 1 & 2 * - CDI 401376 - Ibex Peak EDS * - ref 26037, 26641 - IPS BIOS spec * - ref 26489 - Nehalem BIOS writer's guide * - ref 26921 - Ibex Peak BIOS Specification */ #include <linux/debugfs.h> #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/kernel.h> #include <linux/kthread.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/sched.h> #include <linux/sched/loadavg.h> #include <linux/seq_file.h> #include <linux/string.h> #include <linux/tick.h> #include <linux/timer.h> #include <linux/dmi.h> #include <drm/i915_drm.h> #include <asm/msr.h> #include <asm/processor.h> #include "intel_ips.h" #include <linux/<API key>.h> #define <API key> 0x3b32 /* * Package level MSRs for monitor/control */ #define PLATFORM_INFO 0xce #define PLATFORM_TDP (1<<29) #define PLATFORM_RATIO (1<<28) #define IA32_MISC_ENABLE 0x1a0 #define IA32_MISC_TURBO_EN (1ULL<<38) #define <API key> 0x1ac #define TURBO_TDC_OVR_EN (1UL<<31) #define TURBO_TDC_MASK (<API key>) #define TURBO_TDC_SHIFT (16) #define TURBO_TDP_OVR_EN (1UL<<15) #define TURBO_TDP_MASK (<API key>) /* * Core/thread MSRs for monitoring */ #define IA32_PERF_CTL 0x199 #define IA32_PERF_TURBO_DIS (1ULL<<32) /* * Thermal PCI device regs */ #define THM_CFG_TBAR 0x10 #define THM_CFG_TBAR_HI 0x14 #define THM_TSIU 0x00 #define THM_TSE 0x01 #define TSE_EN 0xb8 #define THM_TSS 0x02 #define THM_TSTR 0x03 #define THM_TSTTP 0x04 #define THM_TSCO 0x08 #define THM_TSES 0x0c #define THM_TSGPEN 0x0d #define TSGPEN_HOT_LOHI (1<<1) #define TSGPEN_CRIT_LOHI (1<<2) #define THM_TSPC 0x0e #define THM_PPEC 0x10 #define THM_CTA 0x12 #define THM_PTA 0x14 #define PTA_SLOPE_MASK (0xff00) #define PTA_SLOPE_SHIFT 8 #define PTA_OFFSET_MASK (0x00ff) #define THM_MGTA 0x16 #define MGTA_SLOPE_MASK (0xff00) #define MGTA_SLOPE_SHIFT 8 #define MGTA_OFFSET_MASK (0x00ff) #define THM_TRC 0x1a #define TRC_CORE2_EN (1<<15) #define TRC_THM_EN (1<<12) #define TRC_C6_WAR (1<<8) #define TRC_CORE1_EN (1<<7) #define TRC_CORE_PWR (1<<6) #define TRC_PCH_EN (1<<5) #define TRC_MCH_EN (1<<4) #define TRC_DIMM4 (1<<3) #define TRC_DIMM3 (1<<2) #define TRC_DIMM2 (1<<1) #define TRC_DIMM1 (1<<0) #define THM_TES 0x20 #define THM_TEN 0x21 #define TEN_UPDATE_EN 1 #define THM_PSC 0x24 #define PSC_NTG (1<<0) /* No GFX turbo support */ #define PSC_NTPC (1<<1) /* No CPU turbo support */ #define PSC_PP_DEF (0<<2) /* Perf policy up to driver */ #define PSP_PP_PC (1<<2) /* BIOS prefers CPU perf */ #define PSP_PP_BAL (2<<2) /* BIOS wants balanced perf */ #define PSP_PP_GFX (3<<2) /* BIOS prefers GFX perf */ #define PSP_PBRT (1<<4) /* BIOS run time support */ #define THM_CTV1 0x30 #define CTV_TEMP_ERROR (1<<15) #define CTV_TEMP_MASK 0x3f #define CTV_ #define THM_CTV2 0x32 #define THM_CEC 0x34 /* undocumented power accumulator in joules */ #define THM_AE 0x3f #define THM_HTS 0x50 /* 32 bits */ #define HTS_PCPL_MASK (0x7fe00000) #define HTS_PCPL_SHIFT 21 #define HTS_GPL_MASK (0x001ff000) #define HTS_GPL_SHIFT 12 #define HTS_PP_MASK (0x00000c00) #define HTS_PP_SHIFT 10 #define HTS_PP_DEF 0 #define HTS_PP_PROC 1 #define HTS_PP_BAL 2 #define HTS_PP_GFX 3 #define HTS_PCTD_DIS (1<<9) #define HTS_GTD_DIS (1<<8) #define HTS_PTL_MASK (0x000000fe) #define HTS_PTL_SHIFT 1 #define HTS_NVV (1<<0) #define THM_HTSHI 0x54 /* 16 bits */ #define HTS2_PPL_MASK (0x03ff) #define HTS2_PRST_MASK (0x3c00) #define HTS2_PRST_SHIFT 10 #define HTS2_PRST_UNLOADED 0 #define HTS2_PRST_RUNNING 1 #define HTS2_PRST_TDISOP 2 /* turbo disabled due to power */ #define HTS2_PRST_TDISHT 3 /* turbo disabled due to high temp */ #define HTS2_PRST_TDISUSR 4 /* user disabled turbo */ #define HTS2_PRST_TDISPLAT 5 /* platform disabled turbo */ #define HTS2_PRST_TDISPM 6 /* power management disabled turbo */ #define HTS2_PRST_TDISERR 7 /* some kind of error disabled turbo */ #define THM_PTL 0x56 #define THM_MGTV 0x58 #define TV_MASK 0x000000000000ff00 #define TV_SHIFT 8 #define THM_PTV 0x60 #define PTV_MASK 0x00ff #define THM_MMGPC 0x64 #define THM_MPPC 0x66 #define THM_MPCPC 0x68 #define THM_TSPIEN 0x82 #define TSPIEN_AUX_LOHI (1<<0) #define TSPIEN_HOT_LOHI (1<<1) #define TSPIEN_CRIT_LOHI (1<<2) #define TSPIEN_AUX2_LOHI (1<<3) #define THM_TSLOCK 0x83 #define THM_ATR 0x84 #define THM_TOF 0x87 #define THM_STS 0x98 #define STS_PCPL_MASK (0x7fe00000) #define STS_PCPL_SHIFT 21 #define STS_GPL_MASK (0x001ff000) #define STS_GPL_SHIFT 12 #define STS_PP_MASK (0x00000c00) #define STS_PP_SHIFT 10 #define STS_PP_DEF 0 #define STS_PP_PROC 1 #define STS_PP_BAL 2 #define STS_PP_GFX 3 #define STS_PCTD_DIS (1<<9) #define STS_GTD_DIS (1<<8) #define STS_PTL_MASK (0x000000fe) #define STS_PTL_SHIFT 1 #define STS_NVV (1<<0) #define THM_SEC 0x9c #define SEC_ACK (1<<0) #define THM_TC3 0xa4 #define THM_TC1 0xa8 #define STS_PPL_MASK (0x0003ff00) #define STS_PPL_SHIFT 16 #define THM_TC2 0xac #define THM_DTV 0xb0 #define THM_ITV 0xd8 #define ITV_ME_SEQNO_MASK 0x00ff0000 /* ME should update every ~200ms */ #define ITV_ME_SEQNO_SHIFT (16) #define ITV_MCH_TEMP_MASK 0x0000ff00 #define ITV_MCH_TEMP_SHIFT (8) #define ITV_PCH_TEMP_MASK 0x000000ff #define thm_readb(off) readb(ips->regmap + (off)) #define thm_readw(off) readw(ips->regmap + (off)) #define thm_readl(off) readl(ips->regmap + (off)) #define thm_readq(off) readq(ips->regmap + (off)) #define thm_writeb(off, val) writeb((val), ips->regmap + (off)) #define thm_writew(off, val) writew((val), ips->regmap + (off)) #define thm_writel(off, val) writel((val), ips->regmap + (off)) static const int IPS_ADJUST_PERIOD = 5000; static bool late_i915_load = false; /* For initial average collection */ static const int IPS_SAMPLE_PERIOD = 200; static const int IPS_SAMPLE_WINDOW = 5000; /* 5s moving window of samples */ #define IPS_SAMPLE_COUNT (IPS_SAMPLE_WINDOW / IPS_SAMPLE_PERIOD) /* Per-SKU limits */ struct ips_mcp_limits { int mcp_power_limit; /* mW units */ int core_power_limit; int mch_power_limit; int core_temp_limit; /* degrees C */ int mch_temp_limit; }; /* Max temps are -10 degrees C to avoid PROCHOT# */ static struct ips_mcp_limits ips_sv_limits = { .mcp_power_limit = 35000, .core_power_limit = 29000, .mch_power_limit = 20000, .core_temp_limit = 95, .mch_temp_limit = 90 }; static struct ips_mcp_limits ips_lv_limits = { .mcp_power_limit = 25000, .core_power_limit = 21000, .mch_power_limit = 13000, .core_temp_limit = 95, .mch_temp_limit = 90 }; static struct ips_mcp_limits ips_ulv_limits = { .mcp_power_limit = 18000, .core_power_limit = 14000, .mch_power_limit = 11000, .core_temp_limit = 95, .mch_temp_limit = 90 }; struct ips_driver { struct device *dev; void __iomem *regmap; int irq; struct task_struct *monitor; struct task_struct *adjust; struct dentry *debug_root; struct timer_list timer; /* Average CPU core temps (all averages in .01 degrees C for precision) */ u16 ctv1_avg_temp; u16 ctv2_avg_temp; /* GMCH average */ u16 mch_avg_temp; /* Average for the CPU (both cores?) */ u16 mcp_avg_temp; /* Average power consumption (in mW) */ u32 cpu_avg_power; u32 mch_avg_power; /* Offset values */ u16 cta_val; u16 pta_val; u16 mgta_val; /* Maximums & prefs, protected by turbo status lock */ spinlock_t turbo_status_lock; u16 mcp_temp_limit; u16 mcp_power_limit; u16 core_power_limit; u16 mch_power_limit; bool cpu_turbo_enabled; bool __cpu_turbo_on; bool gpu_turbo_enabled; bool __gpu_turbo_on; bool gpu_preferred; bool poll_turbo_status; bool second_cpu; bool <API key>; struct ips_mcp_limits *limits; /* Optional MCH interfaces for if i915 is in use */ unsigned long (*read_mch_val)(void); bool (*gpu_raise)(void); bool (*gpu_lower)(void); bool (*gpu_busy)(void); bool (*gpu_turbo_disable)(void); /* For restoration at unload */ u64 orig_turbo_limit; u64 orig_turbo_ratios; }; static bool <API key>(struct ips_driver *ips); /** * ips_cpu_busy - is CPU busy? * @ips: IPS driver struct * * Check CPU for load to see whether we should increase its thermal budget. * * RETURNS: * True if the CPU could use more power, false otherwise. */ static bool ips_cpu_busy(struct ips_driver *ips) { if ((avenrun[0] >> FSHIFT) > 1) return true; return false; } /** * ips_cpu_raise - raise CPU power clamp * @ips: IPS driver struct * * Raise the CPU power clamp by %IPS_CPU_STEP, in accordance with TDP for * this platform. * * We do this by adjusting the <API key> MSR upwards (as * long as we haven't hit the TDP limit for the SKU). */ static void ips_cpu_raise(struct ips_driver *ips) { u64 turbo_override; u16 cur_tdp_limit, new_tdp_limit; if (!ips->cpu_turbo_enabled) return; rdmsrl(<API key>, turbo_override); cur_tdp_limit = turbo_override & TURBO_TDP_MASK; new_tdp_limit = cur_tdp_limit + 8; /* 1W increase */ /* Clamp to SKU TDP limit */ if (((new_tdp_limit * 10) / 8) > ips->core_power_limit) new_tdp_limit = cur_tdp_limit; thm_writew(THM_MPCPC, (new_tdp_limit * 10) / 8); turbo_override |= TURBO_TDC_OVR_EN | TURBO_TDP_OVR_EN; wrmsrl(<API key>, turbo_override); turbo_override &= ~TURBO_TDP_MASK; turbo_override |= new_tdp_limit; wrmsrl(<API key>, turbo_override); } /** * ips_cpu_lower - lower CPU power clamp * @ips: IPS driver struct * * Lower CPU power clamp b %IPS_CPU_STEP if possible. * * We do this by adjusting the <API key> MSR down, going * as low as the platform limits will allow (though we could go lower there * wouldn't be much point). */ static void ips_cpu_lower(struct ips_driver *ips) { u64 turbo_override; u16 cur_limit, new_limit; rdmsrl(<API key>, turbo_override); cur_limit = turbo_override & TURBO_TDP_MASK; new_limit = cur_limit - 8; /* 1W decrease */ /* Clamp to SKU TDP limit */ if (new_limit < (ips->orig_turbo_limit & TURBO_TDP_MASK)) new_limit = ips->orig_turbo_limit & TURBO_TDP_MASK; thm_writew(THM_MPCPC, (new_limit * 10) / 8); turbo_override |= TURBO_TDC_OVR_EN | TURBO_TDP_OVR_EN; wrmsrl(<API key>, turbo_override); turbo_override &= ~TURBO_TDP_MASK; turbo_override |= new_limit; wrmsrl(<API key>, turbo_override); } /** * do_enable_cpu_turbo - internal turbo enable function * @data: unused * * Internal function for actually updating MSRs. When we enable/disable * turbo, we need to do it on each CPU; this function is the one called * by on_each_cpu() when needed. */ static void do_enable_cpu_turbo(void *data) { u64 perf_ctl; rdmsrl(IA32_PERF_CTL, perf_ctl); if (perf_ctl & IA32_PERF_TURBO_DIS) { perf_ctl &= ~IA32_PERF_TURBO_DIS; wrmsrl(IA32_PERF_CTL, perf_ctl); } } /** * <API key> - enable turbo mode on all CPUs * @ips: IPS driver struct * * Enable turbo mode by clearing the disable bit in IA32_PERF_CTL on * all logical threads. */ static void <API key>(struct ips_driver *ips) { /* Already on, no need to mess with MSRs */ if (ips->__cpu_turbo_on) return; if (ips-><API key>) on_each_cpu(do_enable_cpu_turbo, ips, 1); ips->__cpu_turbo_on = true; } /** * <API key> - internal turbo disable function * @data: unused * * Internal function for actually updating MSRs. When we enable/disable * turbo, we need to do it on each CPU; this function is the one called * by on_each_cpu() when needed. */ static void <API key>(void *data) { u64 perf_ctl; rdmsrl(IA32_PERF_CTL, perf_ctl); if (!(perf_ctl & IA32_PERF_TURBO_DIS)) { perf_ctl |= IA32_PERF_TURBO_DIS; wrmsrl(IA32_PERF_CTL, perf_ctl); } } /** * <API key> - disable turbo mode on all CPUs * @ips: IPS driver struct * * Disable turbo mode by setting the disable bit in IA32_PERF_CTL on * all logical threads. */ static void <API key>(struct ips_driver *ips) { /* Already off, leave it */ if (!ips->__cpu_turbo_on) return; if (ips-><API key>) on_each_cpu(<API key>, ips, 1); ips->__cpu_turbo_on = false; } /** * ips_gpu_busy - is GPU busy? * @ips: IPS driver struct * * Check GPU for load to see whether we should increase its thermal budget. * We need to call into the i915 driver in this case. * * RETURNS: * True if the GPU could use more power, false otherwise. */ static bool ips_gpu_busy(struct ips_driver *ips) { if (!<API key>(ips)) return false; return ips->gpu_busy(); } /** * ips_gpu_raise - raise GPU power clamp * @ips: IPS driver struct * * Raise the GPU frequency/power if possible. We need to call into the * i915 driver in this case. */ static void ips_gpu_raise(struct ips_driver *ips) { if (!<API key>(ips)) return; if (!ips->gpu_raise()) ips->gpu_turbo_enabled = false; return; } /** * ips_gpu_lower - lower GPU power clamp * @ips: IPS driver struct * * Lower GPU frequency/power if possible. Need to call i915. */ static void ips_gpu_lower(struct ips_driver *ips) { if (!<API key>(ips)) return; if (!ips->gpu_lower()) ips->gpu_turbo_enabled = false; return; } /** * <API key> - notify the gfx driver turbo is available * @ips: IPS driver struct * * Call into the graphics driver indicating that it can safely use * turbo mode. */ static void <API key>(struct ips_driver *ips) { if (ips->__gpu_turbo_on) return; ips->__gpu_turbo_on = true; } /** * <API key> - notify the gfx driver to disable turbo mode * @ips: IPS driver struct * * Request that the graphics driver disable turbo mode. */ static void <API key>(struct ips_driver *ips) { /* Avoid calling i915 if turbo is already disabled */ if (!ips->__gpu_turbo_on) return; if (!ips->gpu_turbo_disable()) dev_err(ips->dev, "failed to disable graphics turbo\n"); else ips->__gpu_turbo_on = false; } /** * mcp_exceeded - check whether we're outside our thermal & power limits * @ips: IPS driver struct * * Check whether the MCP is over its thermal or power budget. */ static bool mcp_exceeded(struct ips_driver *ips) { unsigned long flags; bool ret = false; u32 temp_limit; u32 avg_power; spin_lock_irqsave(&ips->turbo_status_lock, flags); temp_limit = ips->mcp_temp_limit * 100; if (ips->mcp_avg_temp > temp_limit) ret = true; avg_power = ips->cpu_avg_power + ips->mch_avg_power; if (avg_power > ips->mcp_power_limit) ret = true; <API key>(&ips->turbo_status_lock, flags); return ret; } /** * cpu_exceeded - check whether a CPU core is outside its limits * @ips: IPS driver struct * @cpu: CPU number to check * * Check a given CPU's average temp or power is over its limit. */ static bool cpu_exceeded(struct ips_driver *ips, int cpu) { unsigned long flags; int avg; bool ret = false; spin_lock_irqsave(&ips->turbo_status_lock, flags); avg = cpu ? ips->ctv2_avg_temp : ips->ctv1_avg_temp; if (avg > (ips->limits->core_temp_limit * 100)) ret = true; if (ips->cpu_avg_power > ips->core_power_limit * 100) ret = true; <API key>(&ips->turbo_status_lock, flags); if (ret) dev_info(ips->dev, "CPU power or thermal limit exceeded\n"); return ret; } /** * mch_exceeded - check whether the GPU is over budget * @ips: IPS driver struct * * Check the MCH temp & power against their maximums. */ static bool mch_exceeded(struct ips_driver *ips) { unsigned long flags; bool ret = false; spin_lock_irqsave(&ips->turbo_status_lock, flags); if (ips->mch_avg_temp > (ips->limits->mch_temp_limit * 100)) ret = true; if (ips->mch_avg_power > ips->mch_power_limit) ret = true; <API key>(&ips->turbo_status_lock, flags); return ret; } /** * verify_limits - verify BIOS provided limits * @ips: IPS structure * * BIOS can optionally provide non-default limits for power and temp. Check * them here and use the defaults if the BIOS values are not provided or * are otherwise unusable. */ static void verify_limits(struct ips_driver *ips) { if (ips->mcp_power_limit < ips->limits->mcp_power_limit || ips->mcp_power_limit > 35000) ips->mcp_power_limit = ips->limits->mcp_power_limit; if (ips->mcp_temp_limit < ips->limits->core_temp_limit || ips->mcp_temp_limit < ips->limits->mch_temp_limit || ips->mcp_temp_limit > 150) ips->mcp_temp_limit = min(ips->limits->core_temp_limit, ips->limits->mch_temp_limit); } /** * update_turbo_limits - get various limits & settings from regs * @ips: IPS driver struct * * Update the IPS power & temp limits, along with turbo enable flags, * based on latest register contents. * * Used at init time and for runtime BIOS support, which requires polling * the regs for updates (as a result of AC->DC transition for example). * * LOCKING: * Caller must hold turbo_status_lock (outside of init) */ static void update_turbo_limits(struct ips_driver *ips) { u32 hts = thm_readl(THM_HTS); ips->cpu_turbo_enabled = !(hts & HTS_PCTD_DIS); /* * Disable turbo for now, until we can figure out why the power figures * are wrong */ ips->cpu_turbo_enabled = false; if (ips->gpu_busy) ips->gpu_turbo_enabled = !(hts & HTS_GTD_DIS); ips->core_power_limit = thm_readw(THM_MPCPC); ips->mch_power_limit = thm_readw(THM_MMGPC); ips->mcp_temp_limit = thm_readw(THM_PTL); ips->mcp_power_limit = thm_readw(THM_MPPC); verify_limits(ips); /* Ignore BIOS CPU vs GPU pref */ } /** * ips_adjust - adjust power clamp based on thermal state * @data: ips driver structure * * Wake up every 5s or so and check whether we should adjust the power clamp. * Check CPU and GPU load to determine which needs adjustment. There are * several things to consider here: * - do we need to adjust up or down? * - is CPU busy? * - is GPU busy? * - is CPU in turbo? * - is GPU in turbo? * - is CPU or GPU preferred? (CPU is default) * * So, given the above, we do the following: * - up (TDP available) * - CPU not busy, GPU not busy - nothing * - CPU busy, GPU not busy - adjust CPU up * - CPU not busy, GPU busy - adjust GPU up * - CPU busy, GPU busy - adjust preferred unit up, taking headroom from * non-preferred unit if necessary * - down (at TDP limit) * - adjust both CPU and GPU down if possible * cpu+ gpu+ cpu+gpu- cpu-gpu+ cpu-gpu- cpu < gpu < cpu+gpu+ cpu+ gpu+ nothing cpu < gpu >= cpu+gpu-(mcp<) cpu+gpu-(mcp<) gpu- gpu- cpu >= gpu < cpu-gpu+(mcp<) cpu- cpu-gpu+(mcp<) cpu- cpu >= gpu >= cpu-gpu- cpu-gpu- cpu-gpu- cpu-gpu- * */ static int ips_adjust(void *data) { struct ips_driver *ips = data; unsigned long flags; dev_dbg(ips->dev, "starting ips-adjust thread\n"); /* * Adjust CPU and GPU clamps every 5s if needed. Doing it more * often isn't recommended due to ME interaction. */ do { bool cpu_busy = ips_cpu_busy(ips); bool gpu_busy = ips_gpu_busy(ips); spin_lock_irqsave(&ips->turbo_status_lock, flags); if (ips->poll_turbo_status) update_turbo_limits(ips); <API key>(&ips->turbo_status_lock, flags); /* Update turbo status if necessary */ if (ips->cpu_turbo_enabled) <API key>(ips); else <API key>(ips); if (ips->gpu_turbo_enabled) <API key>(ips); else <API key>(ips); /* We're outside our comfort zone, crank them down */ if (mcp_exceeded(ips)) { ips_cpu_lower(ips); ips_gpu_lower(ips); goto sleep; } if (!cpu_exceeded(ips, 0) && cpu_busy) ips_cpu_raise(ips); else ips_cpu_lower(ips); if (!mch_exceeded(ips) && gpu_busy) ips_gpu_raise(ips); else ips_gpu_lower(ips); sleep: <API key>(msecs_to_jiffies(IPS_ADJUST_PERIOD)); } while (!kthread_should_stop()); dev_dbg(ips->dev, "ips-adjust thread stopped\n"); return 0; } /* * Helpers for reading out temp/power values and calculating their * averages for the decision making and monitoring functions. */ static u16 calc_avg_temp(struct ips_driver *ips, u16 *array) { u64 total = 0; int i; u16 avg; for (i = 0; i < IPS_SAMPLE_COUNT; i++) total += (u64)(array[i] * 100); do_div(total, IPS_SAMPLE_COUNT); avg = (u16)total; return avg; } static u16 read_mgtv(struct ips_driver *ips) { u16 ret; u64 slope, offset; u64 val; val = thm_readq(THM_MGTV); val = (val & TV_MASK) >> TV_SHIFT; slope = offset = thm_readw(THM_MGTA); slope = (slope & MGTA_SLOPE_MASK) >> MGTA_SLOPE_SHIFT; offset = offset & MGTA_OFFSET_MASK; ret = ((val * slope + 0x40) >> 7) + offset; return 0; /* MCH temp reporting buggy */ } static u16 read_ptv(struct ips_driver *ips) { u16 val; val = thm_readw(THM_PTV) & PTV_MASK; return val; } static u16 read_ctv(struct ips_driver *ips, int cpu) { int reg = cpu ? THM_CTV2 : THM_CTV1; u16 val; val = thm_readw(reg); if (!(val & CTV_TEMP_ERROR)) val = (val) >> 6; /* discard fractional component */ else val = 0; return val; } static u32 get_cpu_power(struct ips_driver *ips, u32 *last, int period) { u32 val; u32 ret; /* * CEC is in joules/65535. Take difference over time to * get watts. */ val = thm_readl(THM_CEC); /* period is in ms and we want mW */ ret = (((val - *last) * 1000) / period); ret = (ret * 1000) / 65535; *last = val; return 0; } static const u16 temp_decay_factor = 2; static u16 update_average_temp(u16 avg, u16 val) { u16 ret; /* Multiply by 100 for extra precision */ ret = (val * 100 / temp_decay_factor) + (((temp_decay_factor - 1) * avg) / temp_decay_factor); return ret; } static const u16 power_decay_factor = 2; static u16 <API key>(u32 avg, u32 val) { u32 ret; ret = (val / power_decay_factor) + (((power_decay_factor - 1) * avg) / power_decay_factor); return ret; } static u32 calc_avg_power(struct ips_driver *ips, u32 *array) { u64 total = 0; u32 avg; int i; for (i = 0; i < IPS_SAMPLE_COUNT; i++) total += array[i]; do_div(total, IPS_SAMPLE_COUNT); avg = (u32)total; return avg; } static void monitor_timeout(struct timer_list *t) { struct ips_driver *ips = from_timer(ips, t, timer); wake_up_process(ips->monitor); } /** * ips_monitor - temp/power monitoring thread * @data: ips driver structure * * This is the main function for the IPS driver. It monitors power and * tempurature in the MCP and adjusts CPU and GPU power clams accordingly. * * We keep a 5s moving average of power consumption and tempurature. Using * that data, along with CPU vs GPU preference, we adjust the power clamps * up or down. */ static int ips_monitor(void *data) { struct ips_driver *ips = data; unsigned long seqno_timestamp, expire, last_msecs, last_sample_period; int i; u32 *cpu_samples, *mchp_samples, old_cpu_power; u16 *mcp_samples, *ctv1_samples, *ctv2_samples, *mch_samples; u8 cur_seqno, last_seqno; mcp_samples = kcalloc(IPS_SAMPLE_COUNT, sizeof(u16), GFP_KERNEL); ctv1_samples = kcalloc(IPS_SAMPLE_COUNT, sizeof(u16), GFP_KERNEL); ctv2_samples = kcalloc(IPS_SAMPLE_COUNT, sizeof(u16), GFP_KERNEL); mch_samples = kcalloc(IPS_SAMPLE_COUNT, sizeof(u16), GFP_KERNEL); cpu_samples = kcalloc(IPS_SAMPLE_COUNT, sizeof(u32), GFP_KERNEL); mchp_samples = kcalloc(IPS_SAMPLE_COUNT, sizeof(u32), GFP_KERNEL); if (!mcp_samples || !ctv1_samples || !ctv2_samples || !mch_samples || !cpu_samples || !mchp_samples) { dev_err(ips->dev, "failed to allocate sample array, ips disabled\n"); kfree(mcp_samples); kfree(ctv1_samples); kfree(ctv2_samples); kfree(mch_samples); kfree(cpu_samples); kfree(mchp_samples); return -ENOMEM; } last_seqno = (thm_readl(THM_ITV) & ITV_ME_SEQNO_MASK) >> ITV_ME_SEQNO_SHIFT; seqno_timestamp = get_jiffies_64(); old_cpu_power = thm_readl(THM_CEC); <API key>(msecs_to_jiffies(IPS_SAMPLE_PERIOD)); /* Collect an initial average */ for (i = 0; i < IPS_SAMPLE_COUNT; i++) { u32 mchp, cpu_power; u16 val; mcp_samples[i] = read_ptv(ips); val = read_ctv(ips, 0); ctv1_samples[i] = val; val = read_ctv(ips, 1); ctv2_samples[i] = val; val = read_mgtv(ips); mch_samples[i] = val; cpu_power = get_cpu_power(ips, &old_cpu_power, IPS_SAMPLE_PERIOD); cpu_samples[i] = cpu_power; if (ips->read_mch_val) { mchp = ips->read_mch_val(); mchp_samples[i] = mchp; } <API key>(msecs_to_jiffies(IPS_SAMPLE_PERIOD)); if (kthread_should_stop()) break; } ips->mcp_avg_temp = calc_avg_temp(ips, mcp_samples); ips->ctv1_avg_temp = calc_avg_temp(ips, ctv1_samples); ips->ctv2_avg_temp = calc_avg_temp(ips, ctv2_samples); ips->mch_avg_temp = calc_avg_temp(ips, mch_samples); ips->cpu_avg_power = calc_avg_power(ips, cpu_samples); ips->mch_avg_power = calc_avg_power(ips, mchp_samples); kfree(mcp_samples); kfree(ctv1_samples); kfree(ctv2_samples); kfree(mch_samples); kfree(cpu_samples); kfree(mchp_samples); /* Start the adjustment thread now that we have data */ wake_up_process(ips->adjust); /* * Ok, now we have an initial avg. From here on out, we track the * running avg using a decaying average calculation. This allows * us to reduce the sample frequency if the CPU and GPU are idle. */ old_cpu_power = thm_readl(THM_CEC); <API key>(msecs_to_jiffies(IPS_SAMPLE_PERIOD)); last_sample_period = IPS_SAMPLE_PERIOD; timer_setup(&ips->timer, monitor_timeout, TIMER_DEFERRABLE); do { u32 cpu_val, mch_val; u16 val; /* MCP itself */ val = read_ptv(ips); ips->mcp_avg_temp = update_average_temp(ips->mcp_avg_temp, val); /* Processor 0 */ val = read_ctv(ips, 0); ips->ctv1_avg_temp = update_average_temp(ips->ctv1_avg_temp, val); /* Power */ cpu_val = get_cpu_power(ips, &old_cpu_power, last_sample_period); ips->cpu_avg_power = <API key>(ips->cpu_avg_power, cpu_val); if (ips->second_cpu) { /* Processor 1 */ val = read_ctv(ips, 1); ips->ctv2_avg_temp = update_average_temp(ips->ctv2_avg_temp, val); } /* MCH */ val = read_mgtv(ips); ips->mch_avg_temp = update_average_temp(ips->mch_avg_temp, val); /* Power */ if (ips->read_mch_val) { mch_val = ips->read_mch_val(); ips->mch_avg_power = <API key>(ips->mch_avg_power, mch_val); } /* * Make sure ME is updating thermal regs. * Note: * If it's been more than a second since the last update, * the ME is probably hung. */ cur_seqno = (thm_readl(THM_ITV) & ITV_ME_SEQNO_MASK) >> ITV_ME_SEQNO_SHIFT; if (cur_seqno == last_seqno && time_after(jiffies, seqno_timestamp + HZ)) { dev_warn(ips->dev, "ME failed to update for more than 1s, likely hung\n"); } else { seqno_timestamp = get_jiffies_64(); last_seqno = cur_seqno; } last_msecs = jiffies_to_msecs(jiffies); expire = jiffies + msecs_to_jiffies(IPS_SAMPLE_PERIOD); __set_current_state(TASK_INTERRUPTIBLE); mod_timer(&ips->timer, expire); schedule(); /* Calculate actual sample period for power averaging */ last_sample_period = jiffies_to_msecs(jiffies) - last_msecs; if (!last_sample_period) last_sample_period = 1; } while (!kthread_should_stop()); del_timer_sync(&ips->timer); dev_dbg(ips->dev, "ips-monitor thread stopped\n"); return 0; } #if 0 #define THM_DUMPW(reg) \ { \ u16 val = thm_readw(reg); \ dev_dbg(ips->dev, #reg ": 0x%04x\n", val); \ } #define THM_DUMPL(reg) \ { \ u32 val = thm_readl(reg); \ dev_dbg(ips->dev, #reg ": 0x%08x\n", val); \ } #define THM_DUMPQ(reg) \ { \ u64 val = thm_readq(reg); \ dev_dbg(ips->dev, #reg ": 0x%016x\n", val); \ } static void dump_thermal_info(struct ips_driver *ips) { u16 ptl; ptl = thm_readw(THM_PTL); dev_dbg(ips->dev, "Processor temp limit: %d\n", ptl); THM_DUMPW(THM_CTA); THM_DUMPW(THM_TRC); THM_DUMPW(THM_CTV1); THM_DUMPL(THM_STS); THM_DUMPW(THM_PTV); THM_DUMPQ(THM_MGTV); } #endif /** * ips_irq_handler - handle temperature triggers and other IPS events * @irq: irq number * @arg: unused * * Handle temperature limit trigger events, generally by lowering the clamps. * If we're at a critical limit, we clamp back to the lowest possible value * to prevent emergency shutdown. */ static irqreturn_t ips_irq_handler(int irq, void *arg) { struct ips_driver *ips = arg; u8 tses = thm_readb(THM_TSES); u8 tes = thm_readb(THM_TES); if (!tses && !tes) return IRQ_NONE; dev_info(ips->dev, "TSES: 0x%02x\n", tses); dev_info(ips->dev, "TES: 0x%02x\n", tes); /* STS update from EC? */ if (tes & 1) { u32 sts, tc1; sts = thm_readl(THM_STS); tc1 = thm_readl(THM_TC1); if (sts & STS_NVV) { spin_lock(&ips->turbo_status_lock); ips->core_power_limit = (sts & STS_PCPL_MASK) >> STS_PCPL_SHIFT; ips->mch_power_limit = (sts & STS_GPL_MASK) >> STS_GPL_SHIFT; /* ignore EC CPU vs GPU pref */ ips->cpu_turbo_enabled = !(sts & STS_PCTD_DIS); /* * Disable turbo for now, until we can figure * out why the power figures are wrong */ ips->cpu_turbo_enabled = false; if (ips->gpu_busy) ips->gpu_turbo_enabled = !(sts & STS_GTD_DIS); ips->mcp_temp_limit = (sts & STS_PTL_MASK) >> STS_PTL_SHIFT; ips->mcp_power_limit = (tc1 & STS_PPL_MASK) >> STS_PPL_SHIFT; verify_limits(ips); spin_unlock(&ips->turbo_status_lock); thm_writeb(THM_SEC, SEC_ACK); } thm_writeb(THM_TES, tes); } /* Thermal trip */ if (tses) { dev_warn(ips->dev, "thermal trip occurred, tses: 0x%04x\n", tses); thm_writeb(THM_TSES, tses); } return IRQ_HANDLED; } #ifndef CONFIG_DEBUG_FS static void ips_debugfs_init(struct ips_driver *ips) { return; } static void ips_debugfs_cleanup(struct ips_driver *ips) { return; } #else /* Expose current state and limits in debugfs if possible */ static int cpu_temp_show(struct seq_file *m, void *data) { struct ips_driver *ips = m->private; seq_printf(m, "%d.%02d\n", ips->ctv1_avg_temp / 100, ips->ctv1_avg_temp % 100); return 0; } <API key>(cpu_temp); static int cpu_power_show(struct seq_file *m, void *data) { struct ips_driver *ips = m->private; seq_printf(m, "%dmW\n", ips->cpu_avg_power); return 0; } <API key>(cpu_power); static int cpu_clamp_show(struct seq_file *m, void *data) { u64 turbo_override; int tdp, tdc; rdmsrl(<API key>, turbo_override); tdp = (int)(turbo_override & TURBO_TDP_MASK); tdc = (int)((turbo_override & TURBO_TDC_MASK) >> TURBO_TDC_SHIFT); /* Convert to .1W/A units */ tdp = tdp * 10 / 8; tdc = tdc * 10 / 8; /* Watts Amperes */ seq_printf(m, "%d.%dW %d.%dA\n", tdp / 10, tdp % 10, tdc / 10, tdc % 10); return 0; } <API key>(cpu_clamp); static int mch_temp_show(struct seq_file *m, void *data) { struct ips_driver *ips = m->private; seq_printf(m, "%d.%02d\n", ips->mch_avg_temp / 100, ips->mch_avg_temp % 100); return 0; } <API key>(mch_temp); static int mch_power_show(struct seq_file *m, void *data) { struct ips_driver *ips = m->private; seq_printf(m, "%dmW\n", ips->mch_avg_power); return 0; } <API key>(mch_power); static void ips_debugfs_cleanup(struct ips_driver *ips) { <API key>(ips->debug_root); } static void ips_debugfs_init(struct ips_driver *ips) { ips->debug_root = debugfs_create_dir("ips", NULL); debugfs_create_file("cpu_temp", 0444, ips->debug_root, ips, &cpu_temp_fops); debugfs_create_file("cpu_power", 0444, ips->debug_root, ips, &cpu_power_fops); debugfs_create_file("cpu_clamp", 0444, ips->debug_root, ips, &cpu_clamp_fops); debugfs_create_file("mch_temp", 0444, ips->debug_root, ips, &mch_temp_fops); debugfs_create_file("mch_power", 0444, ips->debug_root, ips, &mch_power_fops); } #endif /* CONFIG_DEBUG_FS */ /** * ips_detect_cpu - detect whether CPU supports IPS * * Walk our list and see if we're on a supported CPU. If we find one, * return the limits for it. */ static struct ips_mcp_limits *ips_detect_cpu(struct ips_driver *ips) { u64 turbo_power, misc_en; struct ips_mcp_limits *limits = NULL; u16 tdp; if (!(boot_cpu_data.x86 == 6 && boot_cpu_data.x86_model == 37)) { dev_info(ips->dev, "Non-IPS CPU detected.\n"); return NULL; } rdmsrl(IA32_MISC_ENABLE, misc_en); if (misc_en & IA32_MISC_TURBO_EN) ips-><API key> = true; else ips-><API key> = false; if (strstr(boot_cpu_data.x86_model_id, "CPU M")) limits = &ips_sv_limits; else if (strstr(boot_cpu_data.x86_model_id, "CPU L")) limits = &ips_lv_limits; else if (strstr(boot_cpu_data.x86_model_id, "CPU U")) limits = &ips_ulv_limits; else { dev_info(ips->dev, "No CPUID match found.\n"); return NULL; } rdmsrl(<API key>, turbo_power); tdp = turbo_power & TURBO_TDP_MASK; /* Sanity check TDP against CPU */ if (limits->core_power_limit != (tdp / 8) * 1000) { dev_info(ips->dev, "CPU TDP doesn't match expected value (found %d, expected %d)\n", tdp / 8, limits->core_power_limit / 1000); limits->core_power_limit = (tdp / 8) * 1000; } return limits; } /** * ips_get_i915_syms - try to get GPU control methods from i915 driver * @ips: IPS driver * * The i915 driver exports several interfaces to allow the IPS driver to * monitor and control graphics turbo mode. If we can find them, we can * enable graphics turbo, otherwise we must disable it to avoid exceeding * thermal and power limits in the MCP. */ static bool ips_get_i915_syms(struct ips_driver *ips) { ips->read_mch_val = symbol_get(i915_read_mch_val); if (!ips->read_mch_val) goto out_err; ips->gpu_raise = symbol_get(i915_gpu_raise); if (!ips->gpu_raise) goto out_put_mch; ips->gpu_lower = symbol_get(i915_gpu_lower); if (!ips->gpu_lower) goto out_put_raise; ips->gpu_busy = symbol_get(i915_gpu_busy); if (!ips->gpu_busy) goto out_put_lower; ips->gpu_turbo_disable = symbol_get(<API key>); if (!ips->gpu_turbo_disable) goto out_put_busy; return true; out_put_busy: symbol_put(i915_gpu_busy); out_put_lower: symbol_put(i915_gpu_lower); out_put_raise: symbol_put(i915_gpu_raise); out_put_mch: symbol_put(i915_read_mch_val); out_err: return false; } static bool <API key>(struct ips_driver *ips) { if (!ips->gpu_busy && late_i915_load) { if (ips_get_i915_syms(ips)) { dev_info(ips->dev, "i915 driver attached, reenabling gpu turbo\n"); ips->gpu_turbo_enabled = !(thm_readl(THM_HTS) & HTS_GTD_DIS); } } return ips->gpu_turbo_enabled; } void <API key>(void) { /* We can't cleanly get at the various ips_driver structs from * this caller (the i915 driver), so just set a flag saying * that it's time to try getting the symbols again. */ late_i915_load = true; } EXPORT_SYMBOL_GPL(<API key>); static const struct pci_device_id ips_id_table[] = { { PCI_VDEVICE(INTEL, <API key>), }, { 0, } }; MODULE_DEVICE_TABLE(pci, ips_id_table); static int <API key>(const struct dmi_system_id *id) { pr_info("Blacklisted intel_ips for %s\n", id->ident); return 1; } static const struct dmi_system_id ips_blacklist[] = { { .callback = <API key>, .ident = "HP ProBook", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), DMI_MATCH(DMI_PRODUCT_NAME, "HP ProBook"), }, }, { } /* terminating entry */ }; static int ips_probe(struct pci_dev *dev, const struct pci_device_id *id) { u64 platform_info; struct ips_driver *ips; u32 hts; int ret = 0; u16 htshi, trc, trc_required_mask; u8 tse; if (dmi_check_system(ips_blacklist)) return -ENODEV; ips = devm_kzalloc(&dev->dev, sizeof(*ips), GFP_KERNEL); if (!ips) return -ENOMEM; spin_lock_init(&ips->turbo_status_lock); ips->dev = &dev->dev; ips->limits = ips_detect_cpu(ips); if (!ips->limits) { dev_info(&dev->dev, "IPS not supported on this CPU\n"); return -ENXIO; } ret = pcim_enable_device(dev); if (ret) { dev_err(&dev->dev, "can't enable PCI device, aborting\n"); return ret; } ret = pcim_iomap_regions(dev, 1 << 0, pci_name(dev)); if (ret) { dev_err(&dev->dev, "failed to map thermal regs, aborting\n"); return ret; } ips->regmap = pcim_iomap_table(dev)[0]; pci_set_drvdata(dev, ips); tse = thm_readb(THM_TSE); if (tse != TSE_EN) { dev_err(&dev->dev, "thermal device not enabled (0x%02x), aborting\n", tse); return -ENXIO; } trc = thm_readw(THM_TRC); trc_required_mask = TRC_CORE1_EN | TRC_CORE_PWR | TRC_MCH_EN; if ((trc & trc_required_mask) != trc_required_mask) { dev_err(&dev->dev, "thermal reporting for required devices not enabled, aborting\n"); return -ENXIO; } if (trc & TRC_CORE2_EN) ips->second_cpu = true; update_turbo_limits(ips); dev_dbg(&dev->dev, "max cpu power clamp: %dW\n", ips->mcp_power_limit / 10); dev_dbg(&dev->dev, "max core power clamp: %dW\n", ips->core_power_limit / 10); /* BIOS may update limits at runtime */ if (thm_readl(THM_PSC) & PSP_PBRT) ips->poll_turbo_status = true; if (!ips_get_i915_syms(ips)) { dev_info(&dev->dev, "failed to get i915 symbols, graphics turbo disabled until i915 loads\n"); ips->gpu_turbo_enabled = false; } else { dev_dbg(&dev->dev, "graphics turbo enabled\n"); ips->gpu_turbo_enabled = true; } /* * Check PLATFORM_INFO MSR to make sure this chip is * turbo capable. */ rdmsrl(PLATFORM_INFO, platform_info); if (!(platform_info & PLATFORM_TDP)) { dev_err(&dev->dev, "platform indicates TDP override unavailable, aborting\n"); return -ENODEV; } /* * IRQ handler for ME interaction * Note: don't use MSI here as the PCH has bugs. */ ret = <API key>(dev, 1, 1, PCI_IRQ_LEGACY); if (ret < 0) return ret; ips->irq = pci_irq_vector(dev, 0); ret = request_irq(ips->irq, ips_irq_handler, IRQF_SHARED, "ips", ips); if (ret) { dev_err(&dev->dev, "request irq failed, aborting\n"); return ret; } /* Enable aux, hot & critical interrupts */ thm_writeb(THM_TSPIEN, TSPIEN_AUX2_LOHI | TSPIEN_CRIT_LOHI | TSPIEN_HOT_LOHI | TSPIEN_AUX_LOHI); thm_writeb(THM_TEN, TEN_UPDATE_EN); /* Collect adjustment values */ ips->cta_val = thm_readw(THM_CTA); ips->pta_val = thm_readw(THM_PTA); ips->mgta_val = thm_readw(THM_MGTA); /* Save turbo limits & ratios */ rdmsrl(<API key>, ips->orig_turbo_limit); <API key>(ips); ips->cpu_turbo_enabled = false; /* Create thermal adjust thread */ ips->adjust = kthread_create(ips_adjust, ips, "ips-adjust"); if (IS_ERR(ips->adjust)) { dev_err(&dev->dev, "failed to create thermal adjust thread, aborting\n"); ret = -ENOMEM; goto error_free_irq; } /* * Set up the work queue and monitor thread. The monitor thread * will wake up ips_adjust thread. */ ips->monitor = kthread_run(ips_monitor, ips, "ips-monitor"); if (IS_ERR(ips->monitor)) { dev_err(&dev->dev, "failed to create thermal monitor thread, aborting\n"); ret = -ENOMEM; goto <API key>; } hts = (ips->core_power_limit << HTS_PCPL_SHIFT) | (ips->mcp_temp_limit << HTS_PTL_SHIFT) | HTS_NVV; htshi = HTS2_PRST_RUNNING << HTS2_PRST_SHIFT; thm_writew(THM_HTSHI, htshi); thm_writel(THM_HTS, hts); ips_debugfs_init(ips); dev_info(&dev->dev, "IPS driver initialized, MCP temp limit %d\n", ips->mcp_temp_limit); return ret; <API key>: kthread_stop(ips->adjust); error_free_irq: free_irq(ips->irq, ips); <API key>(dev); return ret; } static void ips_remove(struct pci_dev *dev) { struct ips_driver *ips = pci_get_drvdata(dev); u64 turbo_override; ips_debugfs_cleanup(ips); /* Release i915 driver */ if (ips->read_mch_val) symbol_put(i915_read_mch_val); if (ips->gpu_raise) symbol_put(i915_gpu_raise); if (ips->gpu_lower) symbol_put(i915_gpu_lower); if (ips->gpu_busy) symbol_put(i915_gpu_busy); if (ips->gpu_turbo_disable) symbol_put(<API key>); rdmsrl(<API key>, turbo_override); turbo_override &= ~(TURBO_TDC_OVR_EN | TURBO_TDP_OVR_EN); wrmsrl(<API key>, turbo_override); wrmsrl(<API key>, ips->orig_turbo_limit); free_irq(ips->irq, ips); <API key>(dev); if (ips->adjust) kthread_stop(ips->adjust); if (ips->monitor) kthread_stop(ips->monitor); dev_dbg(&dev->dev, "IPS driver removed\n"); } static struct pci_driver ips_pci_driver = { .name = "intel ips", .id_table = ips_id_table, .probe = ips_probe, .remove = ips_remove, }; module_pci_driver(ips_pci_driver); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Jesse Barnes <jbarnes@virtuousgeek.org>"); MODULE_DESCRIPTION("Intelligent Power Sharing Driver");
# OpenERP, Open Source Management Solution # This program is free software: you can redistribute it and/or modify # published by the Free Software Foundation, either version 3 of the # This program is distributed in the hope that it will be useful, # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the from openerp.osv import osv from openerp import netsvc from openerp.tools.translate import _ class procurement_order(osv.osv): _inherit = 'procurement.order' def check_buy(self, cr, uid, ids, context=None): for procurement in self.browse(cr, uid, ids, context=context): for line in procurement.product_id.flow_pull_ids: if line.location_id==procurement.location_id: return line.type_proc=='buy' return super(procurement_order, self).check_buy(cr, uid, ids) def check_produce(self, cr, uid, ids, context=None): for procurement in self.browse(cr, uid, ids, context=context): for line in procurement.product_id.flow_pull_ids: if line.location_id==procurement.location_id: return line.type_proc=='produce' return super(procurement_order, self).check_produce(cr, uid, ids) def check_move(self, cr, uid, ids, context=None): for procurement in self.browse(cr, uid, ids, context=context): for line in procurement.product_id.flow_pull_ids: if line.location_id==procurement.location_id: return (line.type_proc=='move') and (line.location_src_id) return False def action_move_create(self, cr, uid, ids, context=None): proc_obj = self.pool.get('procurement.order') move_obj = self.pool.get('stock.move') picking_obj=self.pool.get('stock.picking') wf_service = netsvc.LocalService("workflow") for proc in proc_obj.browse(cr, uid, ids, context=context): line = None for line in proc.product_id.flow_pull_ids: if line.location_id == proc.location_id: break assert line, 'Line cannot be False if we are on this state of the workflow' origin = (proc.origin or proc.name or '').split(':')[0] +':'+line.name picking_id = picking_obj.create(cr, uid, { 'origin': origin, 'company_id': line.company_id and line.company_id.id or False, 'type': line.picking_type, 'stock_journal_id': line.journal_id and line.journal_id.id or False, 'move_type': 'one', 'partner_id': line.partner_address_id.id, 'note': _('Picking for pulled procurement coming from original location %s, pull rule %s, via original Procurement %s (#%d)') % (proc.location_id.name, line.name, proc.name, proc.id), 'invoice_state': line.invoice_state, }) move_id = move_obj.create(cr, uid, { 'name': line.name, 'picking_id': picking_id, 'company_id': line.company_id and line.company_id.id or False, 'product_id': proc.product_id.id, 'date': proc.date_planned, 'product_qty': proc.product_qty, 'product_uom': proc.product_uom.id, 'product_uos_qty': (proc.product_uos and proc.product_uos_qty)\ or proc.product_qty, 'product_uos': (proc.product_uos and proc.product_uos.id)\ or proc.product_uom.id, 'partner_id': line.partner_address_id.id, 'location_id': line.location_src_id.id, 'location_dest_id': line.location_id.id, 'move_dest_id': proc.move_id and proc.move_id.id or False, # to verif, about history ? 'tracking_id': False, 'cancel_cascade': line.cancel_cascade, 'state': 'confirmed', 'note': _('Move for pulled procurement coming from original location %s, pull rule %s, via original Procurement %s (#%d)') % (proc.location_id.name, line.name, proc.name, proc.id), }) if proc.move_id and proc.move_id.state in ('confirmed'): move_obj.write(cr,uid, [proc.move_id.id], { 'state':'waiting' }, context=context) proc_id = proc_obj.create(cr, uid, { 'name': line.name, 'origin': origin, 'note': _('Pulled procurement coming from original location %s, pull rule %s, via original Procurement %s (#%d)') % (proc.location_id.name, line.name, proc.name, proc.id), 'company_id': line.company_id and line.company_id.id or False, 'date_planned': proc.date_planned, 'product_id': proc.product_id.id, 'product_qty': proc.product_qty, 'product_uom': proc.product_uom.id, 'product_uos_qty': (proc.product_uos and proc.product_uos_qty)\ or proc.product_qty, 'product_uos': (proc.product_uos and proc.product_uos.id)\ or proc.product_uom.id, 'location_id': line.location_src_id.id, 'procure_method': line.procure_method, 'move_id': move_id, }) wf_service = netsvc.LocalService("workflow") wf_service.trg_validate(uid, 'stock.picking', picking_id, 'button_confirm', cr) wf_service.trg_validate(uid, 'procurement.order', proc_id, 'button_confirm', cr) if proc.move_id: move_obj.write(cr, uid, [proc.move_id.id], {'location_id':proc.location_id.id}) msg = _('Pulled from another location.') self.write(cr, uid, [proc.id], {'state':'running', 'message': msg}) self.message_post(cr, uid, [proc.id], body=msg, context=context) # trigger direct processing (the new procurement shares the same planned date as the original one, which is already being processed) wf_service.trg_validate(uid, 'procurement.order', proc_id, 'button_check', cr) return False procurement_order() # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
<?php require_once('../../_include.php'); $config = <API key>::getInstance(); $metadata = <API key>::getMetadataHandler(); // Get the local session $session = SimpleSAML_Session::getInstance(); SimpleSAML_Logger::info('SAML2.0 - SP.SingleLogoutService: Accessing SAML 2.0 SP endpoint SingleLogoutService'); if (!$config->getBoolean('enable.saml20-sp', TRUE)) throw new <API key>('NOACCESS'); // Destroy local session if exists. $session->doLogout('saml2'); $binding = SAML2_Binding::getCurrentBinding(); $message = $binding->receive(); $idpEntityId = $message->getIssuer(); if ($idpEntityId === NULL) { /* Without an issuer we have no way to respond to the message. */ throw new <API key>('Received message on logout endpoint without issuer.'); } $spEntityId = $metadata-><API key>('saml20-sp-hosted'); $idpMetadata = $metadata->getMetaDataConfig($idpEntityId, 'saml20-idp-remote'); $spMetadata = $metadata->getMetaDataConfig($spEntityId, 'saml20-sp-hosted'); sspmod_saml_Message::validateMessage($idpMetadata, $spMetadata, $message); if ($message instanceof SAML2_LogoutRequest) { try { // Extract some parameters from the logout request $requestid = $message->getId(); SimpleSAML_Logger::info('SAML2.0 - SP.SingleLogoutService: IdP (' . $idpEntityId . ') is sending logout request to me SP (' . $spEntityId . ') requestid '.$requestid); SimpleSAML_Logger::stats('saml20-idp-SLO idpinit ' . $spEntityId . ' ' . $idpEntityId); /* Create response. */ $lr = sspmod_saml_Message::buildLogoutResponse($spMetadata, $idpMetadata); $lr->setRelayState($message->getRelayState()); $lr->setInResponseTo($message->getId()); SimpleSAML_Logger::info('SAML2.0 - SP.SingleLogoutService: SP me (' . $spEntityId . ') is sending logout response to IdP (' . $idpEntityId . ')'); /* Send response. */ $binding = new SAML2_HTTPRedirect(); $binding->send($lr); } catch (Exception $exception) { throw new <API key>('LOGOUTREQUEST', $exception); } } elseif ($message instanceof <API key>) { SimpleSAML_Logger::stats('saml20-sp-SLO spinit ' . $spEntityId . ' ' . $idpEntityId); $id = $message->getRelayState(); if (empty($id)) { /* For <API key>. */ $id = $message->getInResponseTo(); } $returnTo = $session->getData('spLogoutReturnTo', $id); if (empty($returnTo)) { throw new <API key>('LOGOUTINFOLOST'); } <API key>::redirect($returnTo); } else { throw new <API key>('SLOSERVICEPARAMS'); } ?>
<!DOCTYPE HTML> <html> <body> This test passes if there are two green squares below:<br> <svg width="300" height="300"> <svg id="svg" width="100" height="100"> <rect width="100%" height="100%" fill="green"/> </svg> <svg id="svg" x="100" y="100" width="100" height="100"> <rect width="100%" height="100%" fill="green"/> </svg> </svg> </body> </html>
<html manifest="resources/<API key>.manifest"> <script> if (window.testRunner) { testRunner.dumpAsText() testRunner.waitUntilDone(); } function log(message) { document.getElementById("result").innerHTML += message + "\n"; } function onprogress(event) { log("loading resource: " + event.loaded + " / " + event.total); if (event.loaded == 3) applicationCache.abort(); } function onnoupdate() { log("FAILURE"); log("noupdate"); if (window.testRunner) testRunner.notifyDone(); } function oncached() { log("FAILURE"); log("CACHED"); if (window.testRunner) testRunner.notifyDone(); } function onupdateready() { log("FAILURE"); log("UPDATEREADY"); if (window.testRunner) testRunner.notifyDone(); } function onerror() { log("SUCCESS"); if (window.testRunner) testRunner.notifyDone(); } applicationCache.addEventListener('noupdate', onnoupdate, false); applicationCache.addEventListener('cached', oncached, false); applicationCache.addEventListener('error', onerror, false); applicationCache.addEventListener('updateready', onupdateready, false); applicationCache.addEventListener('progress', onprogress, false); </script> <div>This tests that download process was aborted after progress event.</div> <div id="result"></div> </html>
(function e(t,n,r){function s(o,u){if(!n[o]){if(!t[o]){var a=typeof require=="function"&&require;if(!u&&a)return a(o,!0);if(i)return i(o,!0);var f=new Error("Cannot find module '"+o+"'");throw f.code="MODULE_NOT_FOUND",f}var l=n[o]={exports:{}};t[o][0].call(l.exports,function(e){var n=t[o][1][e];return s(n?n:e)},l,l.exports,e,t,n,r)}return n[o].exports}var i=typeof require=="function"&&require;for(var o=0;o<r.length;o++)s(r[o]);return s})({1:[function(_dereq_,module,exports){ var Core = _dereq_('../lib/Core'), ShimIE8 = _dereq_('../lib/Shim.IE8'); if (typeof window !== 'undefined') { window.ForerunnerDB = Core; } module.exports = Core; },{"../lib/Core":4,"../lib/Shim.IE8":27}],2:[function(_dereq_,module,exports){ "use strict"; var Shared = _dereq_('./Shared'), Path = _dereq_('./Path'); var BinaryTree = function (data, compareFunc, hashFunc) { this.init.apply(this, arguments); }; BinaryTree.prototype.init = function (data, index, compareFunc, hashFunc) { this._store = []; this._keys = []; if (index !== undefined) { this.index(index); } if (compareFunc !== undefined) { this.compareFunc(compareFunc); } if (hashFunc !== undefined) { this.hashFunc(hashFunc); } if (data !== undefined) { this.data(data); } }; Shared.addModule('BinaryTree', BinaryTree); Shared.mixin(BinaryTree.prototype, 'Mixin.ChainReactor'); Shared.mixin(BinaryTree.prototype, 'Mixin.Sorting'); Shared.mixin(BinaryTree.prototype, 'Mixin.Common'); Shared.synthesize(BinaryTree.prototype, 'compareFunc'); Shared.synthesize(BinaryTree.prototype, 'hashFunc'); Shared.synthesize(BinaryTree.prototype, 'indexDir'); Shared.synthesize(BinaryTree.prototype, 'keys'); Shared.synthesize(BinaryTree.prototype, 'index', function (index) { if (index !== undefined) { // Convert the index object to an array of key val objects this.keys(this.extractKeys(index)); } return this.$super.call(this, index); }); BinaryTree.prototype.extractKeys = function (obj) { var i, keys = []; for (i in obj) { if (obj.hasOwnProperty(i)) { keys.push({ key: i, val: obj[i] }); } } return keys; }; BinaryTree.prototype.data = function (val) { if (val !== undefined) { this._data = val; if (this._hashFunc) { this._hash = this._hashFunc(val); } return this; } return this._data; }; /** * Pushes an item to the binary tree node's store array. * @param {*} val The item to add to the store. * @returns {*} */ BinaryTree.prototype.push = function (val) { if (val !== undefined) { this._store.push(val); return this; } return false; }; /** * Pulls an item from the binary tree node's store array. * @param {*} val The item to remove from the store. * @returns {*} */ BinaryTree.prototype.pull = function (val) { if (val !== undefined) { var index = this._store.indexOf(val); if (index > -1) { this._store.splice(index, 1); return this; } } return false; }; /** * Default compare method. Can be overridden. * @param a * @param b * @returns {number} * @private */ BinaryTree.prototype._compareFunc = function (a, b) { // Loop the index array var i, indexData, result = 0; for (i = 0; i < this._keys.length; i++) { indexData = this._keys[i]; if (indexData.val === 1) { result = this.sortAsc(a[indexData.key], b[indexData.key]); } else if (indexData.val === -1) { result = this.sortDesc(a[indexData.key], b[indexData.key]); } if (result !== 0) { return result; } } return result; }; /** * Default hash function. Can be overridden. * @param obj * @private */ BinaryTree.prototype._hashFunc = function (obj) { /*var i, indexData, hash = ''; for (i = 0; i < this._keys.length; i++) { indexData = this._keys[i]; if (hash) { hash += '_'; } hash += obj[indexData.key]; } return hash;*/ return obj[this._keys[0].key]; }; BinaryTree.prototype.insert = function (data) { var result, inserted, failed, i; if (data instanceof Array) { // Insert array of data inserted = []; failed = []; for (i = 0; i < data.length; i++) { if (this.insert(data[i])) { inserted.push(data[i]); } else { failed.push(data[i]); } } return { inserted: inserted, failed: failed }; } if (!this._data) { // Insert into this node (overwrite) as there is no data this.data(data); //this.push(data); return true; } result = this._compareFunc(this._data, data); if (result === 0) { this.push(data); // Less than this node if (this._left) { // Propagate down the left branch this._left.insert(data); } else { // Assign to left branch this._left = new BinaryTree(data, this._index, this._compareFunc, this._hashFunc); } return true; } if (result === -1) { // Greater than this node if (this._right) { // Propagate down the right branch this._right.insert(data); } else { // Assign to right branch this._right = new BinaryTree(data, this._index, this._compareFunc, this._hashFunc); } return true; } if (result === 1) { // Less than this node if (this._left) { // Propagate down the left branch this._left.insert(data); } else { // Assign to left branch this._left = new BinaryTree(data, this._index, this._compareFunc, this._hashFunc); } return true; } return false; }; BinaryTree.prototype.lookup = function (data, resultArr) { var result = this._compareFunc(this._data, data); resultArr = resultArr || []; if (result === 0) { if (this._left) { this._left.lookup(data, resultArr); } resultArr.push(this._data); if (this._right) { this._right.lookup(data, resultArr); } } if (result === -1) { if (this._right) { this._right.lookup(data, resultArr); } } if (result === 1) { if (this._left) { this._left.lookup(data, resultArr); } } return resultArr; }; BinaryTree.prototype.inOrder = function (type, resultArr) { resultArr = resultArr || []; if (this._left) { this._left.inOrder(type, resultArr); } switch (type) { case 'hash': resultArr.push(this._hash); break; case 'data': resultArr.push(this._data); break; default: resultArr.push({ key: this._data, arr: this._store }); break; } if (this._right) { this._right.inOrder(type, resultArr); } return resultArr; }; /** * * @param {String} type * @param {String} key The data key / path to range search against. * @param {Number} from Range search from this value (inclusive) * @param {Number} to Range search to this value (inclusive) * @param {Array=} resultArr Leave undefined when calling (internal use), * passes the result array between recursive calls to be returned when * the recursion chain completes. * @param {Path=} pathResolver Leave undefined when calling (internal use), * caches the path resolver instance for performance. * @returns {Array} Array of matching document objects */ BinaryTree.prototype.findRange = function (type, key, from, to, resultArr, pathResolver) { resultArr = resultArr || []; pathResolver = pathResolver || new Path(key); if (this._left) { this._left.findRange(type, key, from, to, resultArr, pathResolver); } // Check if this node's data is greater or less than the from value var pathVal = pathResolver.value(this._data), fromResult = this.sortAsc(pathVal, from), toResult = this.sortAsc(pathVal, to); if ((fromResult === 0 || fromResult === 1) && (toResult === 0 || toResult === -1)) { // This data node is greater than or equal to the from value, // and less than or equal to the to value so include it switch (type) { case 'hash': resultArr.push(this._hash); break; case 'data': resultArr.push(this._data); break; default: resultArr.push({ key: this._data, arr: this._store }); break; } } if (this._right) { this._right.findRange(type, key, from, to, resultArr, pathResolver); } return resultArr; }; BinaryTree.prototype.match = function (query, options) { // Check if the passed query has data in the keys our index // operates on and if so, is the query sort matching our order var pathSolver = new Path(), indexKeyArr, queryArr, matchedKeys = [], matchedKeyCount = 0, i; indexKeyArr = pathSolver.parseArr(this._index, { verbose: true }); queryArr = pathSolver.parseArr(query, { ignore:/\$/, verbose: true }); // Loop the query array and check the order of keys against the // index key array to see if this index can be used for (i = 0; i < indexKeyArr.length; i++) { if (queryArr[i] === indexKeyArr[i]) { matchedKeyCount++; matchedKeys.push(queryArr[i]); } } return { matchedKeys: matchedKeys, totalKeyCount: queryArr.length, score: matchedKeyCount }; //return pathSolver.countObjectPaths(this._keys, query); }; Shared.finishModule('BinaryTree'); module.exports = BinaryTree; },{"./Path":23,"./Shared":26}],3:[function(_dereq_,module,exports){ "use strict"; var Shared, Db, Metrics, KeyValueStore, Path, IndexHashMap, IndexBinaryTree, Crc, Overload, ReactorIO; Shared = _dereq_('./Shared'); /** * Creates a new collection. Collections store multiple documents and * handle CRUD against those documents. * @constructor */ var Collection = function (name) { this.init.apply(this, arguments); }; Collection.prototype.init = function (name, options) { this._primaryKey = '_id'; this._primaryIndex = new KeyValueStore('primary'); this._primaryCrc = new KeyValueStore('primaryCrc'); this._crcLookup = new KeyValueStore('crcLookup'); this._name = name; this._data = []; this._metrics = new Metrics(); this._options = options || { changeTimestamp: false }; // Create an object to store internal protected data this._metaData = {}; this._deferQueue = { insert: [], update: [], remove: [], upsert: [], async: [] }; this._deferThreshold = { insert: 100, update: 100, remove: 100, upsert: 100 }; this._deferTime = { insert: 1, update: 1, remove: 1, upsert: 1 }; this._deferredCalls = true; // Set the subset to itself since it is the root collection this.subsetOf(this); }; Shared.addModule('Collection', Collection); Shared.mixin(Collection.prototype, 'Mixin.Common'); Shared.mixin(Collection.prototype, 'Mixin.Events'); Shared.mixin(Collection.prototype, 'Mixin.ChainReactor'); Shared.mixin(Collection.prototype, 'Mixin.CRUD'); Shared.mixin(Collection.prototype, 'Mixin.Constants'); Shared.mixin(Collection.prototype, 'Mixin.Triggers'); Shared.mixin(Collection.prototype, 'Mixin.Sorting'); Shared.mixin(Collection.prototype, 'Mixin.Matching'); Shared.mixin(Collection.prototype, 'Mixin.Updating'); Shared.mixin(Collection.prototype, 'Mixin.Tags'); Metrics = _dereq_('./Metrics'); KeyValueStore = _dereq_('./KeyValueStore'); Path = _dereq_('./Path'); IndexHashMap = _dereq_('./IndexHashMap'); IndexBinaryTree = _dereq_('./IndexBinaryTree'); Crc = _dereq_('./Crc'); Db = Shared.modules.Db; Overload = _dereq_('./Overload'); ReactorIO = _dereq_('./ReactorIO'); /** * Returns a checksum of a string. * @param {String} string The string to checksum. * @return {String} The checksum generated. */ Collection.prototype.crc = Crc; /** * Gets / sets the deferred calls flag. If set to true (default) * then operations on large data sets can be broken up and done * over multiple CPU cycles (creating an async state). For purely * synchronous behaviour set this to false. * @param {Boolean=} val The value to set. * @returns {Boolean} */ Shared.synthesize(Collection.prototype, 'deferredCalls'); /** * Gets / sets the current state. * @param {String=} val The name of the state to set. * @returns {*} */ Shared.synthesize(Collection.prototype, 'state'); /** * Gets / sets the name of the collection. * @param {String=} val The name of the collection to set. * @returns {*} */ Shared.synthesize(Collection.prototype, 'name'); /** * Gets / sets the metadata stored in the collection. */ Shared.synthesize(Collection.prototype, 'metaData'); /** * Gets / sets boolean to determine if the collection should be * capped or not. */ Shared.synthesize(Collection.prototype, 'capped'); /** * Gets / sets capped collection size. This is the maximum number * of records that the capped collection will store. */ Shared.synthesize(Collection.prototype, 'cappedSize'); Collection.prototype._asyncPending = function (key) { this._deferQueue.async.push(key); }; Collection.prototype._asyncComplete = function (key) { // Remove async flag for this type var index = this._deferQueue.async.indexOf(key); while (index > -1) { this._deferQueue.async.splice(index, 1); index = this._deferQueue.async.indexOf(key); } if (this._deferQueue.async.length === 0) { this.deferEmit('ready'); } }; /** * Get the data array that represents the collection's data. * This data is returned by reference and should not be altered outside * of the provided CRUD functionality of the collection as doing so * may cause unstable index behaviour within the collection. * @returns {Array} */ Collection.prototype.data = function () { return this._data; }; /** * Drops a collection and all it's stored data from the database. * @returns {boolean} True on success, false on failure. */ Collection.prototype.drop = function (callback) { var key; if (!this.isDropped()) { if (this._db && this._db._collection && this._name) { if (this.debug()) { console.log(this.logIdentifier() + ' Dropping'); } this._state = 'dropped'; this.emit('drop', this); delete this._db._collection[this._name]; // Remove any reactor IO chain links if (this._collate) { for (key in this._collate) { if (this._collate.hasOwnProperty(key)) { this.collateRemove(key); } } } delete this._primaryKey; delete this._primaryIndex; delete this._primaryCrc; delete this._crcLookup; delete this._name; delete this._data; delete this._metrics; delete this._listeners; if (callback) { callback(false, true); } return true; } } else { if (callback) { callback(false, true); } return true; } if (callback) { callback(false, true); } return false; }; /** * Gets / sets the primary key for this collection. * @param {String=} keyName The name of the primary key. * @returns {*} */ Collection.prototype.primaryKey = function (keyName) { if (keyName !== undefined) { if (this._primaryKey !== keyName) { var oldKey = this._primaryKey; this._primaryKey = keyName; // Set the primary key index primary key this._primaryIndex.primaryKey(keyName); // Rebuild the primary key index this.<API key>(); // Propagate change down the chain this.chainSend('primaryKey', keyName, {oldData: oldKey}); } return this; } return this._primaryKey; }; /** * Handles insert events and routes changes to binds and views as required. * @param {Array} inserted An array of inserted documents. * @param {Array} failed An array of documents that failed to insert. * @private */ Collection.prototype._onInsert = function (inserted, failed) { this.emit('insert', inserted, failed); }; /** * Handles update events and routes changes to binds and views as required. * @param {Array} items An array of updated documents. * @private */ Collection.prototype._onUpdate = function (items) { this.emit('update', items); }; /** * Handles remove events and routes changes to binds and views as required. * @param {Array} items An array of removed documents. * @private */ Collection.prototype._onRemove = function (items) { this.emit('remove', items); }; /** * Handles any change to the collection. * @private */ Collection.prototype._onChange = function () { if (this._options.changeTimestamp) { // Record the last change timestamp this._metaData.lastChange = new Date(); } }; /** * Gets / sets the db instance this class instance belongs to. * @param {Db=} db The db instance. * @returns {*} */ Shared.synthesize(Collection.prototype, 'db', function (db) { if (db) { if (this.primaryKey() === '_id') { // Set primary key to the db's key by default this.primaryKey(db.primaryKey()); // Apply the same debug settings this.debug(db.debug()); } } return this.$super.apply(this, arguments); }); /** * Gets / sets mongodb emulation mode. * @param {Boolean=} val True to enable, false to disable. * @returns {*} */ Shared.synthesize(Collection.prototype, 'mongoEmulation'); /** * Sets the collection's data to the array / documents passed. If any * data already exists in the collection it will be removed before the * new data is set. * @param {Array|Object} data The array of documents or a single document * that will be set as the collections data. * @param options Optional options object. * @param callback Optional callback function. */ Collection.prototype.setData = function (data, options, callback) { if (this.isDropped()) { throw(this.logIdentifier() + ' Cannot operate in a dropped state!'); } if (data) { var op = this._metrics.create('setData'); op.start(); options = this.options(options); this.preSetData(data, options, callback); if (options.$decouple) { data = this.decouple(data); } if (!(data instanceof Array)) { data = [data]; } op.time('transformIn'); data = this.transformIn(data); op.time('transformIn'); var oldData = [].concat(this._data); this._dataReplace(data); // Update the primary key index op.time('Rebuild Primary Key Index'); this.<API key>(options); op.time('Rebuild Primary Key Index'); // Rebuild all other indexes op.time('Rebuild All Other Indexes'); this._rebuildIndexes(); op.time('Rebuild All Other Indexes'); op.time('Resolve chains'); this.chainSend('setData', data, {oldData: oldData}); op.time('Resolve chains'); op.stop(); this._onChange(); this.emit('setData', this._data, oldData); } if (callback) { callback(false); } return this; }; /** * Drops and rebuilds the primary key index for all documents in the collection. * @param {Object=} options An optional options object. * @private */ Collection.prototype.<API key> = function (options) { options = options || { $ensureKeys: undefined, $violationCheck: undefined }; var ensureKeys = options && options.$ensureKeys !== undefined ? options.$ensureKeys : true, violationCheck = options && options.$violationCheck !== undefined ? options.$violationCheck : true, arr, arrCount, arrItem, pIndex = this._primaryIndex, crcIndex = this._primaryCrc, crcLookup = this._crcLookup, pKey = this._primaryKey, jString; // Drop the existing primary index pIndex.truncate(); crcIndex.truncate(); crcLookup.truncate(); // Loop the data and check for a primary key in each object arr = this._data; arrCount = arr.length; while (arrCount arrItem = arr[arrCount]; if (ensureKeys) { // Make sure the item has a primary key this.ensurePrimaryKey(arrItem); } if (violationCheck) { // Check for primary key violation if (!pIndex.uniqueSet(arrItem[pKey], arrItem)) { // Primary key violation throw(this.logIdentifier() + ' Call to setData on collection failed because your data violates the primary key unique constraint. One or more documents are using the same primary key: ' + arrItem[this._primaryKey]); } } else { pIndex.set(arrItem[pKey], arrItem); } // Generate a CRC string jString = this.jStringify(arrItem); crcIndex.set(arrItem[pKey], jString); crcLookup.set(jString, arrItem); } }; /** * Checks for a primary key on the document and assigns one if none * currently exists. * @param {Object} obj The object to check a primary key against. * @private */ Collection.prototype.ensurePrimaryKey = function (obj) { if (obj[this._primaryKey] === undefined) { // Assign a primary key automatically obj[this._primaryKey] = this.objectId(); } }; /** * Clears all data from the collection. * @returns {Collection} */ Collection.prototype.truncate = function () { if (this.isDropped()) { throw(this.logIdentifier() + ' Cannot operate in a dropped state!'); } this.emit('truncate', this._data); // Clear all the data from the collection this._data.length = 0; // Re-create the primary index data this._primaryIndex = new KeyValueStore('primary'); this._primaryCrc = new KeyValueStore('primaryCrc'); this._crcLookup = new KeyValueStore('crcLookup'); this._onChange(); this.deferEmit('change', {type: 'truncate'}); return this; }; /** * Modifies an existing document or documents in a collection. This will update * all matches for 'query' with the data held in 'update'. It will not overwrite * the matched documents with the update document. * * @param {Object} obj The document object to upsert or an array containing * documents to upsert. * * If the document contains a primary key field (based on the collections's primary * key) then the database will search for an existing document with a matching id. * If a matching document is found, the document will be updated. Any keys that * match keys on the existing document will be overwritten with new data. Any keys * that do not currently exist on the document will be added to the document. * * If the document does not contain an id or the id passed does not match an existing * document, an insert is performed instead. If no id is present a new primary key * id is provided for the item. * * @param {Function=} callback Optional callback method. * @returns {Object} An object containing two keys, "op" contains either "insert" or * "update" depending on the type of operation that was performed and "result" * contains the return data from the operation used. */ Collection.prototype.upsert = function (obj, callback) { if (this.isDropped()) { throw(this.logIdentifier() + ' Cannot operate in a dropped state!'); } if (obj) { var queue = this._deferQueue.upsert, deferThreshold = this._deferThreshold.upsert, returnData = {}, query, i; // Determine if the object passed is an array or not if (obj instanceof Array) { if (this._deferredCalls && obj.length > deferThreshold) { // Break up upsert into blocks this._deferQueue.upsert = queue.concat(obj); this._asyncPending('upsert'); // Fire off the insert queue handler this.processQueue('upsert', callback); return {}; } else { // Loop the array and upsert each item returnData = []; for (i = 0; i < obj.length; i++) { returnData.push(this.upsert(obj[i])); } if (callback) { callback(); } return returnData; } } // Determine if the operation is an insert or an update if (obj[this._primaryKey]) { // Check if an object with this primary key already exists query = {}; query[this._primaryKey] = obj[this._primaryKey]; if (this._primaryIndex.lookup(query)[0]) { // The document already exists with this id, this operation is an update returnData.op = 'update'; } else { // No document with this id exists, this operation is an insert returnData.op = 'insert'; } } else { // The document passed does not contain an id, this operation is an insert returnData.op = 'insert'; } switch (returnData.op) { case 'insert': returnData.result = this.insert(obj); break; case 'update': returnData.result = this.update(query, obj); break; default: break; } return returnData; } else { if (callback) { callback(); } } return {}; }; Collection.prototype.filter = function (query, func, options) { return (this.find(query, options)).filter(func); }; /** * Executes a method against each document that matches query and then executes * an update based on the return data of the method. * @param {Object} query The query object. * @param {Function} func The method that each document is passed to. If this method * returns false for a particular document it is excluded from the update. * @param {Object=} options Optional options object passed to the initial find call. * @returns {Array} */ Collection.prototype.filterUpdate = function (query, func, options) { var items = this.find(query, options), results = [], singleItem, singleQuery, singleUpdate, pk = this.primaryKey(), i; for (i = 0; i < items.length; i++) { singleItem = items[i]; singleUpdate = func(singleItem); if (singleUpdate) { singleQuery = {}; singleQuery[pk] = singleItem[pk]; results.push(this.update(singleQuery, singleUpdate)); } } return results; }; /** * Modifies an existing document or documents in a collection. This will update * all matches for 'query' with the data held in 'update'. It will not overwrite * the matched documents with the update document. * * @param {Object} query The query that must be matched for a document to be * operated on. * @param {Object} update The object containing updated key/values. Any keys that * match keys on the existing document will be overwritten with this data. Any * keys that do not currently exist on the document will be added to the document. * @param {Object=} options An options object. * @returns {Array} The items that were updated. */ Collection.prototype.update = function (query, update, options) { if (this.isDropped()) { throw(this.logIdentifier() + ' Cannot operate in a dropped state!'); } // Decouple the update data update = this.decouple(update); // Convert queries from mongo dot notation to forerunner queries if (this.mongoEmulation()) { this.convertToFdb(query); this.convertToFdb(update); } // Handle transform update = this.transformIn(update); if (this.debug()) { console.log(this.logIdentifier() + ' Updating some data'); } var self = this, op = this._metrics.create('update'), dataSet, updated, updateCall = function (referencedDoc) { var oldDoc = self.decouple(referencedDoc), newDoc, triggerOperation, result; if (self.willTrigger(self.TYPE_UPDATE, self.PHASE_BEFORE) || self.willTrigger(self.TYPE_UPDATE, self.PHASE_AFTER)) { newDoc = self.decouple(referencedDoc); triggerOperation = { type: 'update', query: self.decouple(query), update: self.decouple(update), options: self.decouple(options), op: op }; // Update newDoc with the update criteria so we know what the data will look // like AFTER the update is processed result = self.updateObject(newDoc, triggerOperation.update, triggerOperation.query, triggerOperation.options, ''); if (self.processTrigger(triggerOperation, self.TYPE_UPDATE, self.PHASE_BEFORE, referencedDoc, newDoc) !== false) { // No triggers complained so let's execute the replacement of the existing // object with the new one result = self.updateObject(referencedDoc, newDoc, triggerOperation.query, triggerOperation.options, ''); // NOTE: If for some reason we would only like to fire this event if changes are actually going // to occur on the object from the proposed update then we can add "result &&" to the if self.processTrigger(triggerOperation, self.TYPE_UPDATE, self.PHASE_AFTER, oldDoc, newDoc); } else { // Trigger cancelled operation so tell result that it was not updated result = false; } } else { // No triggers complained so let's execute the replacement of the existing // object with the new one result = self.updateObject(referencedDoc, update, query, options, ''); } // Inform indexes of the change self._updateIndexes(oldDoc, referencedDoc); return result; }; op.start(); op.time('Retrieve documents to update'); dataSet = this.find(query, {$decouple: false}); op.time('Retrieve documents to update'); if (dataSet.length) { op.time('Update documents'); updated = dataSet.filter(updateCall); op.time('Update documents'); if (updated.length) { op.time('Resolve chains'); this.chainSend('update', { query: query, update: update, dataSet: updated }, options); op.time('Resolve chains'); this._onUpdate(updated); this._onChange(); this.deferEmit('change', {type: 'update', data: updated}); } } op.stop(); // TODO: Should we decouple the updated array before return by default? return updated || []; }; /** * Replaces an existing object with data from the new object without * breaking data references. * @param {Object} currentObj The object to alter. * @param {Object} newObj The new object to overwrite the existing one with. * @returns {*} Chain. * @private */ Collection.prototype._replaceObj = function (currentObj, newObj) { var i; // Check if the new document has a different primary key value from the existing one // Remove item from indexes this._removeFromIndexes(currentObj); // Remove existing keys from current object for (i in currentObj) { if (currentObj.hasOwnProperty(i)) { delete currentObj[i]; } } // Add new keys to current object for (i in newObj) { if (newObj.hasOwnProperty(i)) { currentObj[i] = newObj[i]; } } // Update the item in the primary index if (!this._insertIntoIndexes(currentObj)) { throw(this.logIdentifier() + ' Primary key violation in update! Key violated: ' + currentObj[this._primaryKey]); } // Update the object in the collection data //this._data.splice(this._data.indexOf(currentObj), 1, newObj); return this; }; /** * Helper method to update a document from it's id. * @param {String} id The id of the document. * @param {Object} update The object containing the key/values to update to. * @returns {Object} The document that was updated or undefined * if no document was updated. */ Collection.prototype.updateById = function (id, update) { var searchObj = {}; searchObj[this._primaryKey] = id; return this.update(searchObj, update)[0]; }; /** * Internal method for document updating. * @param {Object} doc The document to update. * @param {Object} update The object with key/value pairs to update the document with. * @param {Object} query The query object that we need to match to perform an update. * @param {Object} options An options object. * @param {String} path The current recursive path. * @param {String} opType The type of update operation to perform, if none is specified * default is to set new data against matching fields. * @returns {Boolean} True if the document was updated with new / changed data or * false if it was not updated because the data was the same. * @private */ Collection.prototype.updateObject = function (doc, update, query, options, path, opType) { // TODO: This method is long, try to break it into smaller pieces update = this.decouple(update); // Clear leading dots from path path = path || ''; if (path.substr(0, 1) === '.') { path = path.substr(1, path.length -1); } //var oldDoc = this.decouple(doc), var updated = false, recurseUpdated = false, operation, tmpArray, tmpIndex, tmpCount, tempIndex, tempKey, replaceObj, pk, pathInstance, sourceIsArray, updateIsArray, i; // Loop each key in the update object for (i in update) { if (update.hasOwnProperty(i)) { // Reset operation flag operation = false; // Check if the property starts with a dollar (function) if (i.substr(0, 1) === '$') { // Check for commands switch (i) { case '$key': case '$index': case '$data': case '$min': case '$max': // Ignore some operators operation = true; break; case '$each': operation = true; // Loop over the array of updates and run each one tmpCount = update.$each.length; for (tmpIndex = 0; tmpIndex < tmpCount; tmpIndex++) { recurseUpdated = this.updateObject(doc, update.$each[tmpIndex], query, options, path); if (recurseUpdated) { updated = true; } } updated = updated || recurseUpdated; break; case '$replace': operation = true; replaceObj = update.$replace; pk = this.primaryKey(); // Loop the existing item properties and compare with // the replacement (never remove primary key) for (tempKey in doc) { if (doc.hasOwnProperty(tempKey) && tempKey !== pk) { if (replaceObj[tempKey] === undefined) { // The new document doesn't have this field, remove it from the doc this._updateUnset(doc, tempKey); updated = true; } } } // Loop the new item props and update the doc for (tempKey in replaceObj) { if (replaceObj.hasOwnProperty(tempKey) && tempKey !== pk) { this._updateOverwrite(doc, tempKey, replaceObj[tempKey]); updated = true; } } break; default: operation = true; // Now run the operation recurseUpdated = this.updateObject(doc, update[i], query, options, path, i); updated = updated || recurseUpdated; break; } } // Check if the key has a .$ at the end, denoting an array lookup if (this._isPositionalKey(i)) { operation = true; // Modify i to be the name of the field i = i.substr(0, i.length - 2); pathInstance = new Path(path + '.' + i); // Check if the key is an array and has items if (doc[i] && doc[i] instanceof Array && doc[i].length) { tmpArray = []; // Loop the array and find matches to our search for (tmpIndex = 0; tmpIndex < doc[i].length; tmpIndex++) { if (this._match(doc[i][tmpIndex], pathInstance.value(query)[0], options, '', {})) { tmpArray.push(tmpIndex); } } // Loop the items that matched and update them for (tmpIndex = 0; tmpIndex < tmpArray.length; tmpIndex++) { recurseUpdated = this.updateObject(doc[i][tmpArray[tmpIndex]], update[i + '.$'], query, options, path + '.' + i, opType); updated = updated || recurseUpdated; } } } if (!operation) { if (!opType && typeof(update[i]) === 'object') { if (doc[i] !== null && typeof(doc[i]) === 'object') { // Check if we are dealing with arrays sourceIsArray = doc[i] instanceof Array; updateIsArray = update[i] instanceof Array; if (sourceIsArray || updateIsArray) { // Check if the update is an object and the doc is an array if (!updateIsArray && sourceIsArray) { // Update is an object, source is an array so match the array items // with our query object to find the one to update inside this array // Loop the array and find matches to our search for (tmpIndex = 0; tmpIndex < doc[i].length; tmpIndex++) { recurseUpdated = this.updateObject(doc[i][tmpIndex], update[i], query, options, path + '.' + i, opType); updated = updated || recurseUpdated; } } else { // Either both source and update are arrays or the update is // an array and the source is not, so set source to update if (doc[i] !== update[i]) { this._updateProperty(doc, i, update[i]); updated = true; } } } else { // The doc key is an object so traverse the // update further recurseUpdated = this.updateObject(doc[i], update[i], query, options, path + '.' + i, opType); updated = updated || recurseUpdated; } } else { if (doc[i] !== update[i]) { this._updateProperty(doc, i, update[i]); updated = true; } } } else { switch (opType) { case '$inc': var doUpdate = true; // Check for a $min / $max operator if (update[i] > 0) { if (update.$max) { // Check current value if (doc[i] >= update.$max) { // Don't update doUpdate = false; } } } else if (update[i] < 0) { if (update.$min) { // Check current value if (doc[i] <= update.$min) { // Don't update doUpdate = false; } } } if (doUpdate) { this._updateIncrement(doc, i, update[i]); updated = true; } break; case '$cast': // Casts a property to the type specified if it is not already // that type. If the cast is an array or an object and the property // is not already that type a new array or object is created and // set to the property, overwriting the previous value switch (update[i]) { case 'array': if (!(doc[i] instanceof Array)) { // Cast to an array this._updateProperty(doc, i, update.$data || []); updated = true; } break; case 'object': if (!(doc[i] instanceof Object) || (doc[i] instanceof Array)) { // Cast to an object this._updateProperty(doc, i, update.$data || {}); updated = true; } break; case 'number': if (typeof doc[i] !== 'number') { // Cast to a number this._updateProperty(doc, i, Number(doc[i])); updated = true; } break; case 'string': if (typeof doc[i] !== 'string') { // Cast to a string this._updateProperty(doc, i, String(doc[i])); updated = true; } break; default: throw(this.logIdentifier() + ' Cannot update cast to unknown type: ' + update[i]); } break; case '$push': // Check if the target key is undefined and if so, create an array if (doc[i] === undefined) { // Initialise a new array this._updateProperty(doc, i, []); } // Check that the target key is an array if (doc[i] instanceof Array) { // Check for a $position modifier with an $each if (update[i].$position !== undefined && update[i].$each instanceof Array) { // Grab the position to insert at tempIndex = update[i].$position; // Loop the each array and push each item tmpCount = update[i].$each.length; for (tmpIndex = 0; tmpIndex < tmpCount; tmpIndex++) { this._updateSplicePush(doc[i], tempIndex + tmpIndex, update[i].$each[tmpIndex]); } } else if (update[i].$each instanceof Array) { // Do a loop over the each to push multiple items tmpCount = update[i].$each.length; for (tmpIndex = 0; tmpIndex < tmpCount; tmpIndex++) { this._updatePush(doc[i], update[i].$each[tmpIndex]); } } else { // Do a standard push this._updatePush(doc[i], update[i]); } updated = true; } else { throw(this.logIdentifier() + ' Cannot push to a key that is not an array! (' + i + ')'); } break; case '$pull': if (doc[i] instanceof Array) { tmpArray = []; // Loop the array and find matches to our search for (tmpIndex = 0; tmpIndex < doc[i].length; tmpIndex++) { if (this._match(doc[i][tmpIndex], update[i], options, '', {})) { tmpArray.push(tmpIndex); } } tmpCount = tmpArray.length; // Now loop the pull array and remove items to be pulled while (tmpCount this._updatePull(doc[i], tmpArray[tmpCount]); updated = true; } } break; case '$pullAll': if (doc[i] instanceof Array) { if (update[i] instanceof Array) { tmpArray = doc[i]; tmpCount = tmpArray.length; if (tmpCount > 0) { // Now loop the pull array and remove items to be pulled while (tmpCount for (tempIndex = 0; tempIndex < update[i].length; tempIndex++) { if (tmpArray[tmpCount] === update[i][tempIndex]) { this._updatePull(doc[i], tmpCount); tmpCount updated = true; } } if (tmpCount < 0) { break; } } } } else { throw(this.logIdentifier() + ' Cannot pullAll without being given an array of values to pull! (' + i + ')'); } } break; case '$addToSet': // Check if the target key is undefined and if so, create an array if (doc[i] === undefined) { // Initialise a new array this._updateProperty(doc, i, []); } // Check that the target key is an array if (doc[i] instanceof Array) { // Loop the target array and check for existence of item var targetArr = doc[i], targetArrIndex, targetArrCount = targetArr.length, objHash, addObj = true, optionObj = (options && options.$addToSet), hashMode, pathSolver; // Check if we have an options object for our operation if (update[i].$key) { hashMode = false; pathSolver = new Path(update[i].$key); objHash = pathSolver.value(update[i])[0]; // Remove the key from the object before we add it delete update[i].$key; } else if (optionObj && optionObj.key) { hashMode = false; pathSolver = new Path(optionObj.key); objHash = pathSolver.value(update[i])[0]; } else { objHash = this.jStringify(update[i]); hashMode = true; } for (targetArrIndex = 0; targetArrIndex < targetArrCount; targetArrIndex++) { if (hashMode) { // Check if objects match via a string hash (JSON) if (this.jStringify(targetArr[targetArrIndex]) === objHash) { // The object already exists, don't add it addObj = false; break; } } else { // Check if objects match based on the path if (objHash === pathSolver.value(targetArr[targetArrIndex])[0]) { // The object already exists, don't add it addObj = false; break; } } } if (addObj) { this._updatePush(doc[i], update[i]); updated = true; } } else { throw(this.logIdentifier() + ' Cannot addToSet on a key that is not an array! (' + i + ')'); } break; case '$splicePush': // Check if the target key is undefined and if so, create an array if (doc[i] === undefined) { // Initialise a new array this._updateProperty(doc, i, []); } // Check that the target key is an array if (doc[i] instanceof Array) { tempIndex = update.$index; if (tempIndex !== undefined) { delete update.$index; // Check for out of bounds index if (tempIndex > doc[i].length) { tempIndex = doc[i].length; } this._updateSplicePush(doc[i], tempIndex, update[i]); updated = true; } else { throw(this.logIdentifier() + ' Cannot splicePush without a $index integer value!'); } } else { throw(this.logIdentifier() + ' Cannot splicePush with a key that is not an array! (' + i + ')'); } break; case '$move': if (doc[i] instanceof Array) { // Loop the array and find matches to our search for (tmpIndex = 0; tmpIndex < doc[i].length; tmpIndex++) { if (this._match(doc[i][tmpIndex], update[i], options, '', {})) { var moveToIndex = update.$index; if (moveToIndex !== undefined) { delete update.$index; this._updateSpliceMove(doc[i], tmpIndex, moveToIndex); updated = true; } else { throw(this.logIdentifier() + ' Cannot move without a $index integer value!'); } break; } } } else { throw(this.logIdentifier() + ' Cannot move on a key that is not an array! (' + i + ')'); } break; case '$mul': this._updateMultiply(doc, i, update[i]); updated = true; break; case '$rename': this._updateRename(doc, i, update[i]); updated = true; break; case '$overwrite': this._updateOverwrite(doc, i, update[i]); updated = true; break; case '$unset': this._updateUnset(doc, i); updated = true; break; case '$clear': this._updateClear(doc, i); updated = true; break; case '$pop': if (doc[i] instanceof Array) { if (this._updatePop(doc[i], update[i])) { updated = true; } } else { throw(this.logIdentifier() + ' Cannot pop from a key that is not an array! (' + i + ')'); } break; case '$toggle': // Toggle the boolean property between true and false this._updateProperty(doc, i, !doc[i]); updated = true; break; default: if (doc[i] !== update[i]) { this._updateProperty(doc, i, update[i]); updated = true; } break; } } } } } return updated; }; /** * Determines if the passed key has an array positional mark (a dollar at the end * of its name). * @param {String} key The key to check. * @returns {Boolean} True if it is a positional or false if not. * @private */ Collection.prototype._isPositionalKey = function (key) { return key.substr(key.length - 2, 2) === '.$'; }; /** * Removes any documents from the collection that match the search query * key/values. * @param {Object} query The query object. * @param {Object=} options An options object. * @param {Function=} callback A callback method. * @returns {Array} An array of the documents that were removed. */ Collection.prototype.remove = function (query, options, callback) { if (this.isDropped()) { throw(this.logIdentifier() + ' Cannot operate in a dropped state!'); } var self = this, dataSet, index, arrIndex, returnArr, removeMethod, triggerOperation, doc, newDoc; if (typeof(options) === 'function') { callback = options; options = {}; } // Convert queries from mongo dot notation to forerunner queries if (this.mongoEmulation()) { this.convertToFdb(query); } if (query instanceof Array) { returnArr = []; for (arrIndex = 0; arrIndex < query.length; arrIndex++) { returnArr.push(this.remove(query[arrIndex], {noEmit: true})); } if (!options || (options && !options.noEmit)) { this._onRemove(returnArr); } if (callback) { callback(false, returnArr); } return returnArr; } else { returnArr = []; dataSet = this.find(query, {$decouple: false}); if (dataSet.length) { removeMethod = function (dataItem) { // Remove the item from the collection's indexes self._removeFromIndexes(dataItem); // Remove data from internal stores index = self._data.indexOf(dataItem); self._dataRemoveAtIndex(index); returnArr.push(dataItem); }; // Remove the data from the collection for (var i = 0; i < dataSet.length; i++) { doc = dataSet[i]; if (self.willTrigger(self.TYPE_REMOVE, self.PHASE_BEFORE) || self.willTrigger(self.TYPE_REMOVE, self.PHASE_AFTER)) { triggerOperation = { type: 'remove' }; newDoc = self.decouple(doc); if (self.processTrigger(triggerOperation, self.TYPE_REMOVE, self.PHASE_BEFORE, newDoc, newDoc) !== false) { // The trigger didn't ask to cancel so execute the removal method removeMethod(doc); self.processTrigger(triggerOperation, self.TYPE_REMOVE, self.PHASE_AFTER, newDoc, newDoc); } } else { // No triggers to execute removeMethod(doc); } } if (returnArr.length) { //op.time('Resolve chains'); self.chainSend('remove', { query: query, dataSet: returnArr }, options); //op.time('Resolve chains'); if (!options || (options && !options.noEmit)) { this._onRemove(returnArr); } this._onChange(); this.deferEmit('change', {type: 'remove', data: returnArr}); } } if (callback) { callback(false, returnArr); } return returnArr; } }; /** * Helper method that removes a document that matches the given id. * @param {String} id The id of the document to remove. * @returns {Object} The document that was removed or undefined if * nothing was removed. */ Collection.prototype.removeById = function (id) { var searchObj = {}; searchObj[this._primaryKey] = id; return this.remove(searchObj)[0]; }; /** * Processes a deferred action queue. * @param {String} type The queue name to process. * @param {Function} callback A method to call when the queue has processed. * @param {Object=} resultObj A temp object to hold results in. */ Collection.prototype.processQueue = function (type, callback, resultObj) { var self = this, queue = this._deferQueue[type], deferThreshold = this._deferThreshold[type], deferTime = this._deferTime[type], dataArr, result; resultObj = resultObj || { deferred: true }; if (queue.length) { // Process items up to the threshold if (queue.length > deferThreshold) { // Grab items up to the threshold value dataArr = queue.splice(0, deferThreshold); } else { // Grab all the remaining items dataArr = queue.splice(0, queue.length); } result = self[type](dataArr); switch (type) { case 'insert': resultObj.inserted = resultObj.inserted || []; resultObj.failed = resultObj.failed || []; resultObj.inserted = resultObj.inserted.concat(result.inserted); resultObj.failed = resultObj.failed.concat(result.failed); break; } // Queue another process setTimeout(function () { self.processQueue.call(self, type, callback, resultObj); }, deferTime); } else { if (callback) { callback(resultObj); } this._asyncComplete(type); } // Check if all queues are complete if (!this.isProcessingQueue()) { this.deferEmit('queuesComplete'); } }; /** * Checks if any CRUD operations have been deferred and are still waiting to * be processed. * @returns {Boolean} True if there are still deferred CRUD operations to process * or false if all queues are clear. */ Collection.prototype.isProcessingQueue = function () { var i; for (i in this._deferQueue) { if (this._deferQueue.hasOwnProperty(i)) { if (this._deferQueue[i].length) { return true; } } } return false; }; /** * Inserts a document or array of documents into the collection. * @param {Object|Array} data Either a document object or array of document * @param {Number=} index Optional index to insert the record at. * @param {Function=} callback Optional callback called once action is complete. * objects to insert into the collection. */ Collection.prototype.insert = function (data, index, callback) { if (this.isDropped()) { throw(this.logIdentifier() + ' Cannot operate in a dropped state!'); } if (typeof(index) === 'function') { callback = index; index = this._data.length; } else if (index === undefined) { index = this._data.length; } data = this.transformIn(data); return this._insertHandle(data, index, callback); }; /** * Inserts a document or array of documents into the collection. * @param {Object|Array} data Either a document object or array of document * @param {Number=} index Optional index to insert the record at. * @param {Function=} callback Optional callback called once action is complete. * objects to insert into the collection. */ Collection.prototype._insertHandle = function (data, index, callback) { var //self = this, queue = this._deferQueue.insert, deferThreshold = this._deferThreshold.insert, //deferTime = this._deferTime.insert, inserted = [], failed = [], insertResult, resultObj, i; if (data instanceof Array) { // Check if there are more insert items than the insert defer // threshold, if so, break up inserts so we don't tie up the // ui or thread if (this._deferredCalls && data.length > deferThreshold) { // Break up insert into blocks this._deferQueue.insert = queue.concat(data); this._asyncPending('insert'); // Fire off the insert queue handler this.processQueue('insert', callback); return; } else { // Loop the array and add items for (i = 0; i < data.length; i++) { insertResult = this._insert(data[i], index + i); if (insertResult === true) { inserted.push(data[i]); } else { failed.push({ doc: data[i], reason: insertResult }); } } } } else { // Store the data item insertResult = this._insert(data, index); if (insertResult === true) { inserted.push(data); } else { failed.push({ doc: data, reason: insertResult }); } } resultObj = { deferred: false, inserted: inserted, failed: failed }; this._onInsert(inserted, failed); if (callback) { callback(resultObj); } this._onChange(); this.deferEmit('change', {type: 'insert', data: inserted}); return resultObj; }; /** * Internal method to insert a document into the collection. Will * check for index violations before allowing the document to be inserted. * @param {Object} doc The document to insert after passing index violation * tests. * @param {Number=} index Optional index to insert the document at. * @returns {Boolean|Object} True on success, false if no document passed, * or an object containing details about an index violation if one occurred. * @private */ Collection.prototype._insert = function (doc, index) { if (doc) { var self = this, indexViolation, triggerOperation, insertMethod, newDoc, capped = this.capped(), cappedSize = this.cappedSize(); this.ensurePrimaryKey(doc); // Check indexes are not going to be broken by the document indexViolation = this.<API key>(doc); insertMethod = function (doc) { // Add the item to the collection's indexes self._insertIntoIndexes(doc); // Check index overflow if (index > self._data.length) { index = self._data.length; } // Insert the document self._dataInsertAtIndex(index, doc); // Check capped collection status and remove first record // if we are over the threshold if (capped && self._data.length > cappedSize) { // Remove the first item in the data array self.removeById(self._data[0][self._primaryKey]); } //op.time('Resolve chains'); self.chainSend('insert', doc, {index: index}); //op.time('Resolve chains'); }; if (!indexViolation) { if (self.willTrigger(self.TYPE_INSERT, self.PHASE_BEFORE) || self.willTrigger(self.TYPE_INSERT, self.PHASE_AFTER)) { triggerOperation = { type: 'insert' }; if (self.processTrigger(triggerOperation, self.TYPE_INSERT, self.PHASE_BEFORE, {}, doc) !== false) { insertMethod(doc); if (self.willTrigger(self.TYPE_INSERT, self.PHASE_AFTER)) { // Clone the doc so that the programmer cannot update the internal document // on the "after" phase trigger newDoc = self.decouple(doc); self.processTrigger(triggerOperation, self.TYPE_INSERT, self.PHASE_AFTER, {}, newDoc); } } else { // The trigger just wants to cancel the operation return 'Trigger cancelled operation'; } } else { // No triggers to execute insertMethod(doc); } return true; } else { return 'Index violation in index: ' + indexViolation; } } return 'No document passed to insert'; }; /** * Inserts a document into the internal collection data array at * Inserts a document into the internal collection data array at * the specified index. * @param {number} index The index to insert at. * @param {object} doc The document to insert. * @private */ Collection.prototype._dataInsertAtIndex = function (index, doc) { this._data.splice(index, 0, doc); }; /** * Removes a document from the internal collection data array at * the specified index. * @param {number} index The index to remove from. * @private */ Collection.prototype._dataRemoveAtIndex = function (index) { this._data.splice(index, 1); }; /** * Replaces all data in the collection's internal data array with * the passed array of data. * @param {array} data The array of data to replace existing data with. * @private */ Collection.prototype._dataReplace = function (data) { // Clear the array - using a while loop with pop is by far the // fastest way to clear an array currently while (this._data.length) { this._data.pop(); } // Append new items to the array this._data = this._data.concat(data); }; /** * Inserts a document into the collection indexes. * @param {Object} doc The document to insert. * @private */ Collection.prototype._insertIntoIndexes = function (doc) { var arr = this._indexByName, arrIndex, violated, jString = this.jStringify(doc); // Insert to primary key index violated = this._primaryIndex.uniqueSet(doc[this._primaryKey], doc); this._primaryCrc.uniqueSet(doc[this._primaryKey], jString); this._crcLookup.uniqueSet(jString, doc); // Insert into other indexes for (arrIndex in arr) { if (arr.hasOwnProperty(arrIndex)) { arr[arrIndex].insert(doc); } } return violated; }; /** * Removes a document from the collection indexes. * @param {Object} doc The document to remove. * @private */ Collection.prototype._removeFromIndexes = function (doc) { var arr = this._indexByName, arrIndex, jString = this.jStringify(doc); // Remove from primary key index this._primaryIndex.unSet(doc[this._primaryKey]); this._primaryCrc.unSet(doc[this._primaryKey]); this._crcLookup.unSet(jString); // Remove from other indexes for (arrIndex in arr) { if (arr.hasOwnProperty(arrIndex)) { arr[arrIndex].remove(doc); } } }; /** * Updates collection index data for the passed document. * @param {Object} oldDoc The old document as it was before the update. * @param {Object} newDoc The document as it now is after the update. * @private */ Collection.prototype._updateIndexes = function (oldDoc, newDoc) { this._removeFromIndexes(oldDoc); this._insertIntoIndexes(newDoc); }; /** * Rebuild collection indexes. * @private */ Collection.prototype._rebuildIndexes = function () { var arr = this._indexByName, arrIndex; // Remove from other indexes for (arrIndex in arr) { if (arr.hasOwnProperty(arrIndex)) { arr[arrIndex].rebuild(); } } }; /** * Uses the passed query to generate a new collection with results * matching the query parameters. * * @param {Object} query The query object to generate the subset with. * @param {Object=} options An options object. * @returns {*} */ Collection.prototype.subset = function (query, options) { var result = this.find(query, options); return new Collection() .subsetOf(this) .primaryKey(this._primaryKey) .setData(result); }; /** * Gets / sets the collection that this collection is a subset of. * @param {Collection=} collection The collection to set as the parent of this subset. * @returns {Collection} */ Shared.synthesize(Collection.prototype, 'subsetOf'); /** * Checks if the collection is a subset of the passed collection. * @param {Collection} collection The collection to test against. * @returns {Boolean} True if the passed collection is the parent of * the current collection. */ Collection.prototype.isSubsetOf = function (collection) { return this._subsetOf === collection; }; /** * Find the distinct values for a specified field across a single collection and * returns the results in an array. * @param {String} key The field path to return distinct values for e.g. "person.name". * @param {Object=} query The query to use to filter the documents used to return values from. * @param {Object=} options The query options to use when running the query. * @returns {Array} */ Collection.prototype.distinct = function (key, query, options) { if (this.isDropped()) { throw(this.logIdentifier() + ' Cannot operate in a dropped state!'); } var data = this.find(query, options), pathSolver = new Path(key), valueUsed = {}, distinctValues = [], value, i; // Loop the data and build array of distinct values for (i = 0; i < data.length; i++) { value = pathSolver.value(data[i])[0]; if (value && !valueUsed[value]) { valueUsed[value] = true; distinctValues.push(value); } } return distinctValues; }; /** * Helper method to find a document by it's id. * @param {String} id The id of the document. * @param {Object=} options The options object, allowed keys are sort and limit. * @returns {Array} The items that were updated. */ Collection.prototype.findById = function (id, options) { var searchObj = {}; searchObj[this._primaryKey] = id; return this.find(searchObj, options)[0]; }; /** * Finds all documents that contain the passed string or search object * regardless of where the string might occur within the document. This * will match strings from the start, middle or end of the document's * string (partial match). * @param search The string to search for. Case sensitive. * @param options A standard find() options object. * @returns {Array} An array of documents that matched the search string. */ Collection.prototype.peek = function (search, options) { // Loop all items var arr = this._data, arrCount = arr.length, arrIndex, arrItem, tempColl = new Collection(), typeOfSearch = typeof search; if (typeOfSearch === 'string') { for (arrIndex = 0; arrIndex < arrCount; arrIndex++) { // Get json representation of object arrItem = this.jStringify(arr[arrIndex]); // Check if string exists in object json if (arrItem.indexOf(search) > -1) { // Add this item to the temp collection tempColl.insert(arr[arrIndex]); } } return tempColl.find({}, options); } else { return this.find(search, options); } }; /** * Provides a query plan / operations log for a query. * @param {Object} query The query to execute. * @param {Object=} options Optional options object. * @returns {Object} The query plan. */ Collection.prototype.explain = function (query, options) { var result = this.find(query, options); return result.__fdbOp._data; }; /** * Generates an options object with default values or adds default * values to a passed object if those values are not currently set * to anything. * @param {object=} obj Optional options object to modify. * @returns {object} The options object. */ Collection.prototype.options = function (obj) { obj = obj || {}; obj.$decouple = obj.$decouple !== undefined ? obj.$decouple : true; obj.$explain = obj.$explain !== undefined ? obj.$explain : false; return obj; }; /** * Queries the collection based on the query object passed. * @param {Object} query The query key/values that a document must match in * order for it to be returned in the result array. * @param {Object=} options An optional options object. * @param {Function=} callback !! DO NOT USE, THIS IS NON-OPERATIONAL !! * Optional callback. If specified the find process * will not return a value and will assume that you wish to operate under an * async mode. This will break up large find requests into smaller chunks and * process them in a non-blocking fashion allowing large datasets to be queried * without causing the browser UI to pause. Results from this type of operation * will be passed back to the callback once completed. * * @returns {Array} The results array from the find operation, containing all * documents that matched the query. */ Collection.prototype.find = function (query, options, callback) { // Convert queries from mongo dot notation to forerunner queries if (this.mongoEmulation()) { this.convertToFdb(query); } if (callback) { // Check the size of the collection's data array // Split operation into smaller tasks and callback when complete callback('Callbacks for the find() operation are not yet implemented!', []); return []; } return this._find.apply(this, arguments); }; Collection.prototype._find = function (query, options) { if (this.isDropped()) { throw(this.logIdentifier() + ' Cannot operate in a dropped state!'); } // TODO: This method is quite long, break into smaller pieces query = query || {}; options = this.options(options); var op = this._metrics.create('find'), pk = this.primaryKey(), self = this, analysis, scanLength, requiresTableScan = true, resultArr, joinCollectionIndex, joinIndex, joinCollection = {}, joinQuery, joinPath, joinCollectionName, <API key>, joinMatch, joinMatchIndex, joinSearchQuery, joinSearchOptions, joinMulti, joinRequire, joinFindResults, joinFindResult, joinItem, joinPrefix, <API key>, resultIndex, resultRemove = [], index, i, j, k, l, fieldListOn = [], fieldListOff = [], elemMatchPathSolver, elemMatchSubArr, elemMatchSpliceArr, matcherTmpOptions = {}, result, cursor = {}, pathSolver, //renameFieldMethod, //renameFieldPath, matcher = function (doc) { return self._match(doc, query, options, 'and', matcherTmpOptions); }; op.start(); if (query) { // Get query analysis to execute best optimised code path op.time('analyseQuery'); analysis = this._analyseQuery(self.decouple(query), options, op); op.time('analyseQuery'); op.data('analysis', analysis); if (analysis.hasJoin && analysis.queriesJoin) { // The query has a join and tries to limit by it's joined data // Get an instance reference to the join collections op.time('joinReferences'); for (joinIndex = 0; joinIndex < analysis.joinsOn.length; joinIndex++) { joinCollectionName = analysis.joinsOn[joinIndex]; joinPath = new Path(analysis.joinQueries[joinCollectionName]); joinQuery = joinPath.value(query)[0]; joinCollection[analysis.joinsOn[joinIndex]] = this._db.collection(analysis.joinsOn[joinIndex]).subset(joinQuery); // Remove join clause from main query delete query[analysis.joinQueries[joinCollectionName]]; } op.time('joinReferences'); } // Check if an index lookup can be used to return this result if (analysis.indexMatch.length && (!options || (options && !options.$skipIndex))) { op.data('index.potential', analysis.indexMatch); op.data('index.used', analysis.indexMatch[0].index); // Get the data from the index op.time('indexLookup'); resultArr = analysis.indexMatch[0].lookup || []; op.time('indexLookup'); // Check if the index coverage is all keys, if not we still need to table scan it if (analysis.indexMatch[0].keyData.totalKeyCount === analysis.indexMatch[0].keyData.score) { // Don't require a table scan to find relevant documents requiresTableScan = false; } } else { op.flag('usedIndex', false); } if (requiresTableScan) { if (resultArr && resultArr.length) { scanLength = resultArr.length; op.time('tableScan: ' + scanLength); // Filter the source data and return the result resultArr = resultArr.filter(matcher); } else { // Filter the source data and return the result scanLength = this._data.length; op.time('tableScan: ' + scanLength); resultArr = this._data.filter(matcher); } op.time('tableScan: ' + scanLength); } // Order the array if we were passed a sort clause if (options.$orderBy) { op.time('sort'); resultArr = this.sort(options.$orderBy, resultArr); op.time('sort'); } if (options.$page !== undefined && options.$limit !== undefined) { // Record paging data cursor.page = options.$page; cursor.pages = Math.ceil(resultArr.length / options.$limit); cursor.records = resultArr.length; // Check if we actually need to apply the paging logic if (options.$page && options.$limit > 0) { op.data('cursor', cursor); // Skip to the page specified based on limit resultArr.splice(0, options.$page * options.$limit); } } if (options.$skip) { cursor.skip = options.$skip; // Skip past the number of records specified resultArr.splice(0, options.$skip); op.data('skip', options.$skip); } if (options.$limit && resultArr && resultArr.length > options.$limit) { cursor.limit = options.$limit; resultArr.length = options.$limit; op.data('limit', options.$limit); } if (options.$decouple) { // Now decouple the data from the original objects op.time('decouple'); resultArr = this.decouple(resultArr); op.time('decouple'); op.data('flag.decouple', true); } // Now process any joins on the final data if (options.$join) { for (joinCollectionIndex = 0; joinCollectionIndex < options.$join.length; joinCollectionIndex++) { for (joinCollectionName in options.$join[joinCollectionIndex]) { if (options.$join[joinCollectionIndex].hasOwnProperty(joinCollectionName)) { // Set the key to store the join result in to the collection name by default <API key> = joinCollectionName; // Get the join collection instance from the DB if (joinCollection[joinCollectionName]) { <API key> = joinCollection[joinCollectionName]; } else { <API key> = this._db.collection(joinCollectionName); } // Get the match data for the join joinMatch = options.$join[joinCollectionIndex][joinCollectionName]; // Loop our result data array for (resultIndex = 0; resultIndex < resultArr.length; resultIndex++) { // Loop the join conditions and build a search object from them joinSearchQuery = {}; joinMulti = false; joinRequire = false; joinPrefix = ''; for (joinMatchIndex in joinMatch) { if (joinMatch.hasOwnProperty(joinMatchIndex)) { // Check the join condition name for a special command operator if (joinMatchIndex.substr(0, 1) === '$') { // Special command switch (joinMatchIndex) { case '$where': if (joinMatch[joinMatchIndex].query) { // Commented old code here, new one does dynamic reverse lookups //joinSearchQuery = joinMatch[joinMatchIndex].query; joinSearchQuery = self.<API key>(joinMatch[joinMatchIndex].query, resultArr[resultIndex]); } if (joinMatch[joinMatchIndex].options) { joinSearchOptions = joinMatch[joinMatchIndex].options; } break; case '$as': // Rename the collection when stored in the result document <API key> = joinMatch[joinMatchIndex]; break; case '$multi': // Return an array of documents instead of a single matching document joinMulti = joinMatch[joinMatchIndex]; break; case '$require': // Remove the result item if no matching join data is found joinRequire = joinMatch[joinMatchIndex]; break; case '$prefix': // Add a prefix to properties mixed in joinPrefix = joinMatch[joinMatchIndex]; break; default: break; } } else { // Get the data to match against and store in the search object // Resolve complex referenced query joinSearchQuery[joinMatchIndex] = self.<API key>(joinMatch[joinMatchIndex], resultArr[resultIndex]); } } } // Do a find on the target collection against the match data joinFindResults = <API key>.find(joinSearchQuery, joinSearchOptions); // Check if we require a joined row to allow the result item if (!joinRequire || (joinRequire && joinFindResults[0])) { // Join is not required or condition is met if (<API key> === '$root') { // The property name to store the join results in is $root // which means we need to mixin the results but this only // works if joinMulti is disabled if (joinMulti !== false) { // Throw an exception here as this join is not physically possible! throw(this.logIdentifier() + ' Cannot combine [$as: "$root"] with [$multi: true] in $join clause!'); } // Mixin the result joinFindResult = joinFindResults[0]; joinItem = resultArr[resultIndex]; for (l in joinFindResult) { if (joinFindResult.hasOwnProperty(l) && joinItem[joinPrefix + l] === undefined) { // Properties are only mixed in if they do not already exist // in the target item (are undefined). Using a prefix denoted via // $prefix is a good way to prevent property name conflicts joinItem[joinPrefix + l] = joinFindResult[l]; } } } else { resultArr[resultIndex][<API key>] = joinMulti === false ? joinFindResults[0] : joinFindResults; } } else { // Join required but condition not met, add item to removal queue resultRemove.push(resultArr[resultIndex]); } } } } } op.data('flag.join', true); } // Process removal queue if (resultRemove.length) { op.time('removalQueue'); for (i = 0; i < resultRemove.length; i++) { index = resultArr.indexOf(resultRemove[i]); if (index > -1) { resultArr.splice(index, 1); } } op.time('removalQueue'); } if (options.$transform) { op.time('transform'); for (i = 0; i < resultArr.length; i++) { resultArr.splice(i, 1, options.$transform(resultArr[i])); } op.time('transform'); op.data('flag.transform', true); } // Process transforms if (this._transformEnabled && this._transformOut) { op.time('transformOut'); resultArr = this.transformOut(resultArr); op.time('transformOut'); } op.data('results', resultArr.length); } else { resultArr = []; } // Check for an $as operator in the options object and if it exists // iterate over the fields and generate a rename function that will // operate over the entire returned data array and rename each object's // fields to their new names // TODO: Enable $as in collection find to allow renaming fields /*if (options.$as) { renameFieldPath = new Path(); renameFieldMethod = function (obj, oldFieldPath, newFieldName) { renameFieldPath.path(oldFieldPath); renameFieldPath.rename(newFieldName); }; for (i in options.$as) { if (options.$as.hasOwnProperty(i)) { } } }*/ if (!options.$aggregate) { // Generate a list of fields to limit data by // Each property starts off being enabled by default (= 1) then // if any property is explicitly specified as 1 then all switch to // zero except _id. // Any that are explicitly set to zero are switched off. op.time('scanFields'); for (i in options) { if (options.hasOwnProperty(i) && i.indexOf('$') !== 0) { if (options[i] === 1) { fieldListOn.push(i); } else if (options[i] === 0) { fieldListOff.push(i); } } } op.time('scanFields'); // Limit returned fields by the options data if (fieldListOn.length || fieldListOff.length) { op.data('flag.limitFields', true); op.data('limitFields.on', fieldListOn); op.data('limitFields.off', fieldListOff); op.time('limitFields'); // We have explicit fields switched on or off for (i = 0; i < resultArr.length; i++) { result = resultArr[i]; for (j in result) { if (result.hasOwnProperty(j)) { if (fieldListOn.length) { // We have explicit fields switched on so remove all fields // that are not explicitly switched on // Check if the field name is not the primary key if (j !== pk) { if (fieldListOn.indexOf(j) === -1) { // This field is not in the on list, remove it delete result[j]; } } } if (fieldListOff.length) { // We have explicit fields switched off so remove fields // that are explicitly switched off if (fieldListOff.indexOf(j) > -1) { // This field is in the off list, remove it delete result[j]; } } } } } op.time('limitFields'); } // Now run any projections on the data required if (options.$elemMatch) { op.data('flag.elemMatch', true); op.time('<API key>'); for (i in options.$elemMatch) { if (options.$elemMatch.hasOwnProperty(i)) { elemMatchPathSolver = new Path(i); // Loop the results array for (j = 0; j < resultArr.length; j++) { elemMatchSubArr = elemMatchPathSolver.value(resultArr[j])[0]; // Check we have a sub-array to loop if (elemMatchSubArr && elemMatchSubArr.length) { // Loop the sub-array and check for projection query matches for (k = 0; k < elemMatchSubArr.length; k++) { // Check if the current item in the sub-array matches the projection query if (self._match(elemMatchSubArr[k], options.$elemMatch[i], options, '', {})) { // The item matches the projection query so set the sub-array // to an array that ONLY contains the matching item and then // exit the loop since we only want to match the first item elemMatchPathSolver.set(resultArr[j], i, [elemMatchSubArr[k]]); break; } } } } } } op.time('<API key>'); } if (options.$elemsMatch) { op.data('flag.elemsMatch', true); op.time('<API key>'); for (i in options.$elemsMatch) { if (options.$elemsMatch.hasOwnProperty(i)) { elemMatchPathSolver = new Path(i); // Loop the results array for (j = 0; j < resultArr.length; j++) { elemMatchSubArr = elemMatchPathSolver.value(resultArr[j])[0]; // Check we have a sub-array to loop if (elemMatchSubArr && elemMatchSubArr.length) { elemMatchSpliceArr = []; // Loop the sub-array and check for projection query matches for (k = 0; k < elemMatchSubArr.length; k++) { // Check if the current item in the sub-array matches the projection query if (self._match(elemMatchSubArr[k], options.$elemsMatch[i], options, '', {})) { // The item matches the projection query so add it to the final array elemMatchSpliceArr.push(elemMatchSubArr[k]); } } // Now set the final sub-array to the matched items elemMatchPathSolver.set(resultArr[j], i, elemMatchSpliceArr); } } } } op.time('<API key>'); } } // Process aggregation if (options.$aggregate) { op.data('flag.aggregate', true); op.time('aggregate'); pathSolver = new Path(options.$aggregate); resultArr = pathSolver.value(resultArr); op.time('aggregate'); } op.stop(); resultArr.__fdbOp = op; resultArr.$cursor = cursor; return resultArr; }; Collection.prototype.<API key> = function (query, item) { var self = this, newQuery, propType, propVal, pathResult, i; if (typeof query === 'string') { // Check if the property name starts with a back-reference if (query.substr(0, 3) === '$$.') { // Fill the query with a back-referenced value pathResult = new Path(query.substr(3, query.length - 3)).value(item); } else { pathResult = new Path(query).value(item); } if (pathResult.length > 1) { return {$in: pathResult}; } else { return pathResult[0]; } } newQuery = {}; for (i in query) { if (query.hasOwnProperty(i)) { propType = typeof query[i]; propVal = query[i]; switch (propType) { case 'string': // Check if the property name starts with a back-reference if (propVal.substr(0, 3) === '$$.') { // Fill the query with a back-referenced value newQuery[i] = new Path(propVal.substr(3, propVal.length - 3)).value(item)[0]; } else { newQuery[i] = propVal; } break; case 'object': newQuery[i] = self.<API key>(propVal, item); break; default: newQuery[i] = propVal; break; } } } return newQuery; }; /** * Returns one document that satisfies the specified query criteria. If multiple * documents satisfy the query, this method returns the first document to match * the query. * @returns {*} */ Collection.prototype.findOne = function () { return (this.find.apply(this, arguments))[0]; }; /** * Gets the index in the collection data array of the first item matched by * the passed query object. * @param {Object} query The query to run to find the item to return the index of. * @param {Object=} options An options object. * @returns {Number} */ Collection.prototype.indexOf = function (query, options) { var item = this.find(query, {$decouple: false})[0], sortedData; if (item) { if (!options || options && !options.$orderBy) { // Basic lookup from order of insert return this._data.indexOf(item); } else { // Trying to locate index based on query with sort order options.$decouple = false; sortedData = this.find(query, options); return sortedData.indexOf(item); } } return -1; }; /** * Returns the index of the document identified by the passed item's primary key. * @param {*} itemLookup The document whose primary key should be used to lookup * or the id to lookup. * @param {Object=} options An options object. * @returns {Number} The index the item with the matching primary key is occupying. */ Collection.prototype.indexOfDocById = function (itemLookup, options) { var item, sortedData; if (typeof itemLookup !== 'object') { item = this._primaryIndex.get(itemLookup); } else { item = this._primaryIndex.get(itemLookup[this._primaryKey]); } if (item) { if (!options || options && !options.$orderBy) { // Basic lookup return this._data.indexOf(item); } else { // Sorted lookup options.$decouple = false; sortedData = this.find({}, options); return sortedData.indexOf(item); } } return -1; }; /** * Removes a document from the collection by it's index in the collection's * data array. * @param {Number} index The index of the document to remove. * @returns {Object} The document that has been removed or false if none was * removed. */ Collection.prototype.removeByIndex = function (index) { var doc, docId; doc = this._data[index]; if (doc !== undefined) { doc = this.decouple(doc); docId = doc[this.primaryKey()]; return this.removeById(docId); } return false; }; /** * Gets / sets the collection transform options. * @param {Object} obj A collection transform options object. * @returns {*} */ Collection.prototype.transform = function (obj) { if (obj !== undefined) { if (typeof obj === "object") { if (obj.enabled !== undefined) { this._transformEnabled = obj.enabled; } if (obj.dataIn !== undefined) { this._transformIn = obj.dataIn; } if (obj.dataOut !== undefined) { this._transformOut = obj.dataOut; } } else { this._transformEnabled = obj !== false; } return this; } return { enabled: this._transformEnabled, dataIn: this._transformIn, dataOut: this._transformOut }; }; /** * Transforms data using the set transformIn method. * @param {Object} data The data to transform. * @returns {*} */ Collection.prototype.transformIn = function (data) { if (this._transformEnabled && this._transformIn) { if (data instanceof Array) { var finalArr = [], i; for (i = 0; i < data.length; i++) { finalArr[i] = this._transformIn(data[i]); } return finalArr; } else { return this._transformIn(data); } } return data; }; /** * Transforms data using the set transformOut method. * @param {Object} data The data to transform. * @returns {*} */ Collection.prototype.transformOut = function (data) { if (this._transformEnabled && this._transformOut) { if (data instanceof Array) { var finalArr = [], i; for (i = 0; i < data.length; i++) { finalArr[i] = this._transformOut(data[i]); } return finalArr; } else { return this._transformOut(data); } } return data; }; /** * Sorts an array of documents by the given sort path. * @param {*} sortObj The keys and orders the array objects should be sorted by. * @param {Array} arr The array of documents to sort. * @returns {Array} */ Collection.prototype.sort = function (sortObj, arr) { // Make sure we have an array object arr = arr || []; var sortArr = [], sortKey, sortSingleObj; for (sortKey in sortObj) { if (sortObj.hasOwnProperty(sortKey)) { sortSingleObj = {}; sortSingleObj[sortKey] = sortObj[sortKey]; sortSingleObj.___fdbKey = String(sortKey); sortArr.push(sortSingleObj); } } if (sortArr.length < 2) { // There is only one sort criteria, do a simple sort and return it return this._sort(sortObj, arr); } else { return this._bucketSort(sortArr, arr); } }; /** * Takes array of sort paths and sorts them into buckets before returning final * array fully sorted by multi-keys. * @param keyArr * @param arr * @returns {*} * @private */ Collection.prototype._bucketSort = function (keyArr, arr) { var keyObj = keyArr.shift(), arrCopy, bucketData, bucketOrder, bucketKey, buckets, i, finalArr = []; if (keyArr.length > 0) { // Sort array by bucket key arr = this._sort(keyObj, arr); // Split items into buckets bucketData = this.bucket(keyObj.___fdbKey, arr); bucketOrder = bucketData.order; buckets = bucketData.buckets; // Loop buckets and sort contents for (i = 0; i < bucketOrder.length; i++) { bucketKey = bucketOrder[i]; arrCopy = [].concat(keyArr); finalArr = finalArr.concat(this._bucketSort(arrCopy, buckets[bucketKey])); } return finalArr; } else { return this._sort(keyObj, arr); } }; /** * Sorts array by individual sort path. * @param key * @param arr * @returns {Array|*} * @private */ Collection.prototype._sort = function (key, arr) { var self = this, sorterMethod, pathSolver = new Path(), dataPath = pathSolver.parse(key, true)[0]; pathSolver.path(dataPath.path); if (dataPath.value === 1) { // Sort ascending sorterMethod = function (a, b) { var valA = pathSolver.value(a)[0], valB = pathSolver.value(b)[0]; return self.sortAsc(valA, valB); }; } else if (dataPath.value === -1) { // Sort descending sorterMethod = function (a, b) { var valA = pathSolver.value(a)[0], valB = pathSolver.value(b)[0]; return self.sortDesc(valA, valB); }; } else { throw(this.logIdentifier() + ' $orderBy clause has invalid direction: ' + dataPath.value + ', accepted values are 1 or -1 for ascending or descending!'); } return arr.sort(sorterMethod); }; /** * Takes an array of objects and returns a new object with the array items * split into buckets by the passed key. * @param {String} key The key to split the array into buckets by. * @param {Array} arr An array of objects. * @returns {Object} */ Collection.prototype.bucket = function (key, arr) { var i, oldField, field, fieldArr = [], buckets = {}; for (i = 0; i < arr.length; i++) { field = String(arr[i][key]); if (oldField !== field) { fieldArr.push(field); oldField = field; } buckets[field] = buckets[field] || []; buckets[field].push(arr[i]); } return { buckets: buckets, order: fieldArr }; }; /** * Internal method that takes a search query and options and returns an object * containing details about the query which can be used to optimise the search. * * @param query * @param options * @param op * @returns {Object} * @private */ Collection.prototype._analyseQuery = function (query, options, op) { var analysis = { queriesOn: [this._name], indexMatch: [], hasJoin: false, queriesJoin: false, joinQueries: {}, query: query, options: options }, joinCollectionIndex, joinCollectionName, joinCollections = [], <API key> = [], queryPath, index, indexMatchData, indexRef, indexRefName, indexLookup, pathSolver, queryKeyCount, i; // Check if the query is a primary key lookup op.time('checkIndexes'); pathSolver = new Path(); queryKeyCount = pathSolver.countKeys(query); if (queryKeyCount) { if (query[this._primaryKey] !== undefined) { // Return item via primary key possible op.time('checkIndexMatch: Primary Key'); analysis.indexMatch.push({ lookup: this._primaryIndex.lookup(query, options), keyData: { matchedKeys: [this._primaryKey], totalKeyCount: queryKeyCount, score: 1 }, index: this._primaryIndex }); op.time('checkIndexMatch: Primary Key'); } // Check if an index can speed up the query for (i in this._indexById) { if (this._indexById.hasOwnProperty(i)) { indexRef = this._indexById[i]; indexRefName = indexRef.name(); op.time('checkIndexMatch: ' + indexRefName); indexMatchData = indexRef.match(query, options); if (indexMatchData.score > 0) { // This index can be used, store it indexLookup = indexRef.lookup(query, options); analysis.indexMatch.push({ lookup: indexLookup, keyData: indexMatchData, index: indexRef }); } op.time('checkIndexMatch: ' + indexRefName); if (indexMatchData.score === queryKeyCount) { // Found an optimal index, do not check for any more break; } } } op.time('checkIndexes'); // Sort array descending on index key count (effectively a measure of relevance to the query) if (analysis.indexMatch.length > 1) { op.time('findOptimalIndex'); analysis.indexMatch.sort(function (a, b) { if (a.keyData.score > b.keyData.score) { // This index has a higher score than the other return -1; } if (a.keyData.score < b.keyData.score) { // This index has a lower score than the other return 1; } // The indexes have the same score but can still be compared by the number of records // they return from the query. The fewer records they return the better so order by // record count if (a.keyData.score === b.keyData.score) { return a.lookup.length - b.lookup.length; } }); op.time('findOptimalIndex'); } } // Check for join data if (options.$join) { analysis.hasJoin = true; // Loop all join operations for (joinCollectionIndex = 0; joinCollectionIndex < options.$join.length; joinCollectionIndex++) { // Loop the join collections and keep a reference to them for (joinCollectionName in options.$join[joinCollectionIndex]) { if (options.$join[joinCollectionIndex].hasOwnProperty(joinCollectionName)) { joinCollections.push(joinCollectionName); // Check if the join uses an $as operator if ('$as' in options.$join[joinCollectionIndex][joinCollectionName]) { <API key>.push(options.$join[joinCollectionIndex][joinCollectionName].$as); } else { <API key>.push(joinCollectionName); } } } } // Loop the join collection references and determine if the query references // any of the collections that are used in the join. If there no queries against // joined collections the find method can use a code path optimised for this. // Queries against joined collections requires the joined collections to be filtered // first and then joined so requires a little more work. for (index = 0; index < <API key>.length; index++) { // Check if the query references any collection data that the join will create queryPath = this.<API key>(query, <API key>[index], ''); if (queryPath) { analysis.joinQueries[joinCollections[index]] = queryPath; analysis.queriesJoin = true; } } analysis.joinsOn = joinCollections; analysis.queriesOn = analysis.queriesOn.concat(joinCollections); } return analysis; }; /** * Checks if the passed query references this collection. * @param query * @param collection * @param path * @returns {*} * @private */ Collection.prototype.<API key> = function (query, collection, path) { var i; for (i in query) { if (query.hasOwnProperty(i)) { // Check if this key is a reference match if (i === collection) { if (path) { path += '.'; } return path + i; } else { if (typeof(query[i]) === 'object') { // Recurse if (path) { path += '.'; } path += i; return this.<API key>(query[i], collection, path); } } } } return false; }; /** * Returns the number of documents currently in the collection. * @returns {Number} */ Collection.prototype.count = function (query, options) { if (!query) { return this._data.length; } else { // Run query and return count return this.find(query, options).length; } }; /** * Finds sub-documents from the collection's documents. * @param {Object} match The query object to use when matching parent documents * from which the sub-documents are queried. * @param {String} path The path string used to identify the key in which * sub-documents are stored in parent documents. * @param {Object=} subDocQuery The query to use when matching which sub-documents * to return. * @param {Object=} subDocOptions The options object to use when querying for * sub-documents. * @returns {*} */ Collection.prototype.findSub = function (match, path, subDocQuery, subDocOptions) { var pathHandler = new Path(path), docArr = this.find(match), docCount = docArr.length, docIndex, subDocArr, subDocCollection = this._db.collection('__FDB_temp_' + this.objectId()), subDocResults, resultObj = { parents: docCount, subDocTotal: 0, subDocs: [], pathFound: false, err: '' }; subDocOptions = subDocOptions || {}; for (docIndex = 0; docIndex < docCount; docIndex++) { subDocArr = pathHandler.value(docArr[docIndex])[0]; if (subDocArr) { subDocCollection.setData(subDocArr); subDocResults = subDocCollection.find(subDocQuery, subDocOptions); if (subDocOptions.returnFirst && subDocResults.length) { return subDocResults[0]; } if (subDocOptions.$split) { resultObj.subDocs.push(subDocResults); } else { resultObj.subDocs = resultObj.subDocs.concat(subDocResults); } resultObj.subDocTotal += subDocResults.length; resultObj.pathFound = true; } } // Drop the sub-document collection subDocCollection.drop(); // Check if the call should not return stats, if so return only subDocs array if (subDocOptions.$stats) { return resultObj; } else { return resultObj.subDocs; } if (!resultObj.pathFound) { resultObj.err = 'No objects found in the parent documents with a matching path of: ' + path; } return resultObj; }; /** * Finds the first sub-document from the collection's documents that matches * the subDocQuery parameter. * @param {Object} match The query object to use when matching parent documents * from which the sub-documents are queried. * @param {String} path The path string used to identify the key in which * sub-documents are stored in parent documents. * @param {Object=} subDocQuery The query to use when matching which sub-documents * to return. * @param {Object=} subDocOptions The options object to use when querying for * sub-documents. * @returns {Object} */ Collection.prototype.findSubOne = function (match, path, subDocQuery, subDocOptions) { return this.findSub(match, path, subDocQuery, subDocOptions)[0]; }; /** * Checks that the passed document will not violate any index rules if * inserted into the collection. * @param {Object} doc The document to check indexes against. * @returns {Boolean} Either false (no violation occurred) or true if * a violation was detected. */ Collection.prototype.<API key> = function (doc) { var indexViolated, arr = this._indexByName, arrIndex, arrItem; // Check the item's primary key is not already in use if (this._primaryIndex.get(doc[this._primaryKey])) { indexViolated = this._primaryIndex; } else { // Check violations of other indexes for (arrIndex in arr) { if (arr.hasOwnProperty(arrIndex)) { arrItem = arr[arrIndex]; if (arrItem.unique()) { if (arrItem.violation(doc)) { indexViolated = arrItem; break; } } } } } return indexViolated ? indexViolated.name() : false; }; /** * Creates an index on the specified keys. * @param {Object} keys The object containing keys to index. * @param {Object} options An options object. * @returns {*} */ Collection.prototype.ensureIndex = function (keys, options) { if (this.isDropped()) { throw(this.logIdentifier() + ' Cannot operate in a dropped state!'); } this._indexByName = this._indexByName || {}; this._indexById = this._indexById || {}; var index, time = { start: new Date().getTime() }; if (options) { switch (options.type) { case 'hashed': index = new IndexHashMap(keys, options, this); break; case 'btree': index = new IndexBinaryTree(keys, options, this); break; default: // Default index = new IndexHashMap(keys, options, this); break; } } else { // Default index = new IndexHashMap(keys, options, this); } // Check the index does not already exist if (this._indexByName[index.name()]) { // Index already exists return { err: 'Index with that name already exists' }; } if (this._indexById[index.id()]) { // Index already exists return { err: 'Index with those keys already exists' }; } // Create the index index.rebuild(); // Add the index this._indexByName[index.name()] = index; this._indexById[index.id()] = index; time.end = new Date().getTime(); time.total = time.end - time.start; this._lastOp = { type: 'ensureIndex', stats: { time: time } }; return { index: index, id: index.id(), name: index.name(), state: index.state() }; }; /** * Gets an index by it's name. * @param {String} name The name of the index to retreive. * @returns {*} */ Collection.prototype.index = function (name) { if (this._indexByName) { return this._indexByName[name]; } }; /** * Gets the last reporting operation's details such as run time. * @returns {Object} */ Collection.prototype.lastOp = function () { return this._metrics.list(); }; /** * Generates a difference object that contains insert, update and remove arrays * representing the operations to execute to make this collection have the same * data as the one passed. * @param {Collection} collection The collection to diff against. * @returns {{}} */ Collection.prototype.diff = function (collection) { var diff = { insert: [], update: [], remove: [] }; var pm = this.primaryKey(), arr, arrIndex, arrItem, arrCount; // Check if the primary key index of each collection can be utilised if (pm !== collection.primaryKey()) { throw(this.logIdentifier() + ' Diffing requires that both collections have the same primary key!'); } // Use the collection primary key index to do the diff (super-fast) arr = collection._data; // Check if we have an array or another collection while (arr && !(arr instanceof Array)) { // We don't have an array, assign collection and get data collection = arr; arr = collection._data; } arrCount = arr.length; // Loop the collection's data array and check for matching items for (arrIndex = 0; arrIndex < arrCount; arrIndex++) { arrItem = arr[arrIndex]; // Check for a matching item in this collection if (this._primaryIndex.get(arrItem[pm])) { // Matching item exists, check if the data is the same if (this._primaryCrc.get(arrItem[pm]) !== collection._primaryCrc.get(arrItem[pm])) { // The documents exist in both collections but data differs, update required diff.update.push(arrItem); } } else { // The document is missing from this collection, insert required diff.insert.push(arrItem); } } // Now loop this collection's data and check for matching items arr = this._data; arrCount = arr.length; for (arrIndex = 0; arrIndex < arrCount; arrIndex++) { arrItem = arr[arrIndex]; if (!collection._primaryIndex.get(arrItem[pm])) { // The document does not exist in the other collection, remove required diff.remove.push(arrItem); } } return diff; }; Collection.prototype.collateAdd = new Overload({ /** * Adds a data source to collate data from and specifies the * key name to collate data to. * @func collateAdd * @memberof Collection * @param {Collection} collection The collection to collate data from. * @param {String=} keyName Optional name of the key to collate data to. * If none is provided the record CRUD is operated on the root collection * data. */ 'object, string': function (collection, keyName) { var self = this; self.collateAdd(collection, function (packet) { var obj1, obj2; switch (packet.type) { case 'insert': if (keyName) { obj1 = { $push: {} }; obj1.$push[keyName] = self.decouple(packet.data); self.update({}, obj1); } else { self.insert(packet.data); } break; case 'update': if (keyName) { obj1 = {}; obj2 = {}; obj1[keyName] = packet.data.query; obj2[keyName + '.$'] = packet.data.update; self.update(obj1, obj2); } else { self.update(packet.data.query, packet.data.update); } break; case 'remove': if (keyName) { obj1 = { $pull: {} }; obj1.$pull[keyName] = {}; obj1.$pull[keyName][self.primaryKey()] = packet.data.dataSet[0][collection.primaryKey()]; self.update({}, obj1); } else { self.remove(packet.data); } break; default: } }); }, /** * Adds a data source to collate data from and specifies a process * method that will handle the collation functionality (for custom * collation). * @func collateAdd * @memberof Collection * @param {Collection} collection The collection to collate data from. * @param {Function} process The process method. */ 'object, function': function (collection, process) { if (typeof collection === 'string') { // The collection passed is a name, not a reference so get // the reference from the name collection = this._db.collection(collection, { autoCreate: false, throwError: false }); } if (collection) { this._collate = this._collate || {}; this._collate[collection.name()] = new ReactorIO(collection, this, process); return this; } else { throw('Cannot collate from a non-existent collection!'); } } }); Collection.prototype.collateRemove = function (collection) { if (typeof collection === 'object') { // We need to have the name of the collection to remove it collection = collection.name(); } if (collection) { // Drop the reactor IO chain node this._collate[collection].drop(); // Remove the collection data from the collate object delete this._collate[collection]; return this; } else { throw('No collection name passed to collateRemove() or collection not found!'); } }; Db.prototype.collection = new Overload({ /** * Get a collection with no name (generates a random name). If the * collection does not already exist then one is created for that * name automatically. * @func collection * @memberof Db * @param {String} collectionName The name of the collection. * @returns {Collection} */ '': function () { return this.$main.call(this, { name: this.objectId() }); }, /** * Get a collection by name. If the collection does not already exist * then one is created for that name automatically. * @func collection * @memberof Db * @param {Object} data An options object or a collection instance. * @returns {Collection} */ 'object': function (data) { // Handle being passed an instance if (data instanceof Collection) { if (data.state() !== 'droppped') { return data; } else { return this.$main.call(this, { name: data.name() }); } } return this.$main.call(this, data); }, /** * Get a collection by name. If the collection does not already exist * then one is created for that name automatically. * @func collection * @memberof Db * @param {String} collectionName The name of the collection. * @returns {Collection} */ 'string': function (collectionName) { return this.$main.call(this, { name: collectionName }); }, /** * Get a collection by name. If the collection does not already exist * then one is created for that name automatically. * @func collection * @memberof Db * @param {String} collectionName The name of the collection. * @param {String} primaryKey Optional primary key to specify the primary key field on the collection * objects. Defaults to "_id". * @returns {Collection} */ 'string, string': function (collectionName, primaryKey) { return this.$main.call(this, { name: collectionName, primaryKey: primaryKey }); }, /** * Get a collection by name. If the collection does not already exist * then one is created for that name automatically. * @func collection * @memberof Db * @param {String} collectionName The name of the collection. * @param {Object} options An options object. * @returns {Collection} */ 'string, object': function (collectionName, options) { options.name = collectionName; return this.$main.call(this, options); }, /** * Get a collection by name. If the collection does not already exist * then one is created for that name automatically. * @func collection * @memberof Db * @param {String} collectionName The name of the collection. * @param {String} primaryKey Optional primary key to specify the primary key field on the collection * objects. Defaults to "_id". * @param {Object} options An options object. * @returns {Collection} */ 'string, string, object': function (collectionName, primaryKey, options) { options.name = collectionName; options.primaryKey = primaryKey; return this.$main.call(this, options); }, /** * The main handler method. This gets called by all the other variants and * handles the actual logic of the overloaded method. * @func collection * @memberof Db * @param {Object} options An options object. * @returns {*} */ '$main': function (options) { var self = this, name = options.name; if (name) { if (this._collection[name]) { return this._collection[name]; } else { if (options && options.autoCreate === false) { if (options && options.throwError !== false) { throw(this.logIdentifier() + ' Cannot get collection ' + name + ' because it does not exist and auto-create has been disabled!'); } return undefined; } if (this.debug()) { console.log(this.logIdentifier() + ' Creating collection ' + name); } } this._collection[name] = this._collection[name] || new Collection(name, options).db(this); this._collection[name].mongoEmulation(this.mongoEmulation()); if (options.primaryKey !== undefined) { this._collection[name].primaryKey(options.primaryKey); } if (options.capped !== undefined) { // Check we have a size if (options.size !== undefined) { this._collection[name].capped(options.capped); this._collection[name].cappedSize(options.size); } else { throw(this.logIdentifier() + ' Cannot create a capped collection without specifying a size!'); } } // Listen for events on this collection so we can fire global events // on the database in response to it self._collection[name].on('change', function () { self.emit('change', self._collection[name], 'collection', name); }); self.emit('create', self._collection[name], 'collection', name); return this._collection[name]; } else { if (!options || (options && options.throwError !== false)) { throw(this.logIdentifier() + ' Cannot get collection with undefined name!'); } } } }); /** * Determine if a collection with the passed name already exists. * @memberof Db * @param {String} viewName The name of the collection to check for. * @returns {boolean} */ Db.prototype.collectionExists = function (viewName) { return Boolean(this._collection[viewName]); }; /** * Returns an array of collections the DB currently has. * @memberof Db * @param {String|RegExp=} search The optional search string or regular expression to use * to match collection names against. * @returns {Array} An array of objects containing details of each collection * the database is currently managing. */ Db.prototype.collections = function (search) { var arr = [], collections = this._collection, collection, i; if (search) { if (!(search instanceof RegExp)) { // Turn the search into a regular expression search = new RegExp(search); } } for (i in collections) { if (collections.hasOwnProperty(i)) { collection = collections[i]; if (search) { if (search.exec(i)) { arr.push({ name: i, count: collection.count(), linked: collection.isLinked !== undefined ? collection.isLinked() : false }); } } else { arr.push({ name: i, count: collection.count(), linked: collection.isLinked !== undefined ? collection.isLinked() : false }); } } } arr.sort(function (a, b) { return a.name.localeCompare(b.name); }); return arr; }; Shared.finishModule('Collection'); module.exports = Collection; },{"./Crc":5,"./IndexBinaryTree":7,"./IndexHashMap":8,"./KeyValueStore":9,"./Metrics":10,"./Overload":22,"./Path":23,"./ReactorIO":24,"./Shared":26}],4:[function(_dereq_,module,exports){ "use strict"; var Shared, Db, Metrics, Overload, _instances = []; Shared = _dereq_('./Shared'); Overload = _dereq_('./Overload'); /** * Creates a new ForerunnerDB instance. Core instances handle the lifecycle of * multiple database instances. * @constructor */ var Core = function (name) { this.init.apply(this, arguments); }; Core.prototype.init = function (name) { this._db = {}; this._debug = {}; this._name = name || 'ForerunnerDB'; _instances.push(this); }; /** * Returns the number of instantiated ForerunnerDB objects. * @returns {Number} The number of instantiated instances. */ Core.prototype.instantiatedCount = function () { return _instances.length; }; /** * Get all instances as an array or a single ForerunnerDB instance * by it's array index. * @param {Number=} index Optional index of instance to get. * @returns {Array|Object} Array of instances or a single instance. */ Core.prototype.instances = function (index) { if (index !== undefined) { return _instances[index]; } return _instances; }; /** * Get all instances as an array of instance names or a single ForerunnerDB * instance by it's name. * @param {String=} name Optional name of instance to get. * @returns {Array|Object} Array of instance names or a single instance. */ Core.prototype.namedInstances = function (name) { var i, instArr; if (name !== undefined) { for (i = 0; i < _instances.length; i++) { if (_instances[i].name === name) { return _instances[i]; } } return undefined; } instArr = []; for (i = 0; i < _instances.length; i++) { instArr.push(_instances[i].name); } return instArr; }; Core.prototype.moduleLoaded = new Overload({ /** * Checks if a module has been loaded into the database. * @func moduleLoaded * @memberof Core * @param {String} moduleName The name of the module to check for. * @returns {Boolean} True if the module is loaded, false if not. */ 'string': function (moduleName) { if (moduleName !== undefined) { moduleName = moduleName.replace(/ /g, ''); var modules = moduleName.split(','), index; for (index = 0; index < modules.length; index++) { if (!Shared.modules[modules[index]]) { return false; } } return true; } return false; }, /** * Checks if a module is loaded and if so calls the passed * callback method. * @func moduleLoaded * @memberof Core * @param {String} moduleName The name of the module to check for. * @param {Function} callback The callback method to call if module is loaded. */ 'string, function': function (moduleName, callback) { if (moduleName !== undefined) { moduleName = moduleName.replace(/ /g, ''); var modules = moduleName.split(','), index; for (index = 0; index < modules.length; index++) { if (!Shared.modules[modules[index]]) { return false; } } if (callback) { callback(); } } }, /** * Checks if an array of named modules are loaded and if so * calls the passed callback method. * @func moduleLoaded * @memberof Core * @param {Array} moduleName The array of module names to check for. * @param {Function} callback The callback method to call if modules are loaded. */ 'array, function': function (moduleNameArr, callback) { var moduleName, i; for (i = 0; i < moduleNameArr.length; i++) { moduleName = moduleNameArr[i]; if (moduleName !== undefined) { moduleName = moduleName.replace(/ /g, ''); var modules = moduleName.split(','), index; for (index = 0; index < modules.length; index++) { if (!Shared.modules[modules[index]]) { return false; } } } } if (callback) { callback(); } }, /** * Checks if a module is loaded and if so calls the passed * success method, otherwise calls the failure method. * @func moduleLoaded * @memberof Core * @param {String} moduleName The name of the module to check for. * @param {Function} success The callback method to call if module is loaded. * @param {Function} failure The callback method to call if module not loaded. */ 'string, function, function': function (moduleName, success, failure) { if (moduleName !== undefined) { moduleName = moduleName.replace(/ /g, ''); var modules = moduleName.split(','), index; for (index = 0; index < modules.length; index++) { if (!Shared.modules[modules[index]]) { failure(); return false; } } success(); } } }); /** * Checks version against the string passed and if it matches (or partially matches) * then the callback is called. * @param {String} val The version to check against. * @param {Function} callback The callback to call if match is true. * @returns {Boolean} */ Core.prototype.version = function (val, callback) { if (val !== undefined) { if (Shared.version.indexOf(val) === 0) { if (callback) { callback(); } return true; } return false; } return Shared.version; }; // Expose moduleLoaded() method to non-instantiated object ForerunnerDB Core.moduleLoaded = Core.prototype.moduleLoaded; // Expose version() method to non-instantiated object ForerunnerDB Core.version = Core.prototype.version; // Expose instances() method to non-instantiated object ForerunnerDB Core.instances = Core.prototype.instances; // Expose instantiatedCount() method to non-instantiated object ForerunnerDB Core.instantiatedCount = Core.prototype.instantiatedCount; // Provide public access to the Shared object Core.shared = Shared; Core.prototype.shared = Shared; Shared.addModule('Core', Core); Shared.mixin(Core.prototype, 'Mixin.Common'); Shared.mixin(Core.prototype, 'Mixin.Constants'); Db = _dereq_('./Db.js'); Metrics = _dereq_('./Metrics.js'); /** * Gets / sets the name of the instance. This is primarily used for * name-spacing persistent storage. * @param {String=} val The name of the instance to set. * @returns {*} */ Shared.synthesize(Core.prototype, 'name'); /** * Gets / sets mongodb emulation mode. * @param {Boolean=} val True to enable, false to disable. * @returns {*} */ Shared.synthesize(Core.prototype, 'mongoEmulation'); // Set a flag to determine environment Core.prototype._isServer = false; /** * Returns true if ForerunnerDB is running on a client browser. * @returns {boolean} */ Core.prototype.isClient = function () { return !this._isServer; }; /** * Returns true if ForerunnerDB is running on a server. * @returns {boolean} */ Core.prototype.isServer = function () { return this._isServer; }; /** * Checks if the database is running on a client (browser) or * a server (node.js). * @returns {Boolean} Returns true if running on a browser. */ Core.prototype.isClient = function () { return !this._isServer; }; /** * Checks if the database is running on a client (browser) or * a server (node.js). * @returns {Boolean} Returns true if running on a server. */ Core.prototype.isServer = function () { return this._isServer; }; /** * Added to provide an error message for users who have not seen * the new instantiation breaking change warning and try to get * a collection directly from the core instance. */ Core.prototype.collection = function () { throw("ForerunnerDB's instantiation has changed since version 1.3.36 to support multiple database instances. Please see the readme.md file for the minor change you have to make to get your project back up and running, or see the issue related to this change at https://github.com/Irrelon/ForerunnerDB/issues/44"); }; module.exports = Core; },{"./Db.js":6,"./Metrics.js":10,"./Overload":22,"./Shared":26}],5:[function(_dereq_,module,exports){ "use strict"; /** * @mixin */ var crcTable = (function () { var crcTable = [], c, n, k; for (n = 0; n < 256; n++) { c = n; for (k = 0; k < 8; k++) { c = ((c & 1) ? (0xEDB88320 ^ (c >>> 1)) : (c >>> 1)); // jshint ignore:line } crcTable[n] = c; } return crcTable; }()); module.exports = function(str) { var crc = 0 ^ (-1), // jshint ignore:line i; for (i = 0; i < str.length; i++) { crc = (crc >>> 8) ^ crcTable[(crc ^ str.charCodeAt(i)) & 0xFF]; // jshint ignore:line } return (crc ^ (-1)) >>> 0; // jshint ignore:line }; },{}],6:[function(_dereq_,module,exports){ "use strict"; var Shared, Core, Collection, Metrics, Crc, Overload; Shared = _dereq_('./Shared'); Overload = _dereq_('./Overload'); /** * Creates a new ForerunnerDB database instance. * @constructor */ var Db = function (name, core) { this.init.apply(this, arguments); }; Db.prototype.init = function (name, core) { this.core(core); this._primaryKey = '_id'; this._name = name; this._collection = {}; this._debug = {}; }; Shared.addModule('Db', Db); Db.prototype.moduleLoaded = new Overload({ /** * Checks if a module has been loaded into the database. * @func moduleLoaded * @memberof Db * @param {String} moduleName The name of the module to check for. * @returns {Boolean} True if the module is loaded, false if not. */ 'string': function (moduleName) { if (moduleName !== undefined) { moduleName = moduleName.replace(/ /g, ''); var modules = moduleName.split(','), index; for (index = 0; index < modules.length; index++) { if (!Shared.modules[modules[index]]) { return false; } } return true; } return false; }, /** * Checks if a module is loaded and if so calls the passed * callback method. * @func moduleLoaded * @memberof Db * @param {String} moduleName The name of the module to check for. * @param {Function} callback The callback method to call if module is loaded. */ 'string, function': function (moduleName, callback) { if (moduleName !== undefined) { moduleName = moduleName.replace(/ /g, ''); var modules = moduleName.split(','), index; for (index = 0; index < modules.length; index++) { if (!Shared.modules[modules[index]]) { return false; } } if (callback) { callback(); } } }, /** * Checks if a module is loaded and if so calls the passed * success method, otherwise calls the failure method. * @func moduleLoaded * @memberof Db * @param {String} moduleName The name of the module to check for. * @param {Function} success The callback method to call if module is loaded. * @param {Function} failure The callback method to call if module not loaded. */ 'string, function, function': function (moduleName, success, failure) { if (moduleName !== undefined) { moduleName = moduleName.replace(/ /g, ''); var modules = moduleName.split(','), index; for (index = 0; index < modules.length; index++) { if (!Shared.modules[modules[index]]) { failure(); return false; } } success(); } } }); /** * Checks version against the string passed and if it matches (or partially matches) * then the callback is called. * @param {String} val The version to check against. * @param {Function} callback The callback to call if match is true. * @returns {Boolean} */ Db.prototype.version = function (val, callback) { if (val !== undefined) { if (Shared.version.indexOf(val) === 0) { if (callback) { callback(); } return true; } return false; } return Shared.version; }; // Expose moduleLoaded method to non-instantiated object ForerunnerDB Db.moduleLoaded = Db.prototype.moduleLoaded; // Expose version method to non-instantiated object ForerunnerDB Db.version = Db.prototype.version; // Provide public access to the Shared object Db.shared = Shared; Db.prototype.shared = Shared; Shared.addModule('Db', Db); Shared.mixin(Db.prototype, 'Mixin.Common'); Shared.mixin(Db.prototype, 'Mixin.ChainReactor'); Shared.mixin(Db.prototype, 'Mixin.Constants'); Shared.mixin(Db.prototype, 'Mixin.Tags'); Core = Shared.modules.Core; Collection = _dereq_('./Collection.js'); Metrics = _dereq_('./Metrics.js'); Crc = _dereq_('./Crc.js'); Db.prototype._isServer = false; /** * Gets / sets the core object this database belongs to. */ Shared.synthesize(Db.prototype, 'core'); /** * Gets / sets the default primary key for new collections. * @param {String=} val The name of the primary key to set. * @returns {*} */ Shared.synthesize(Db.prototype, 'primaryKey'); /** * Gets / sets the current state. * @param {String=} val The name of the state to set. * @returns {*} */ Shared.synthesize(Db.prototype, 'state'); /** * Gets / sets the name of the database. * @param {String=} val The name of the database to set. * @returns {*} */ Shared.synthesize(Db.prototype, 'name'); /** * Gets / sets mongodb emulation mode. * @param {Boolean=} val True to enable, false to disable. * @returns {*} */ Shared.synthesize(Db.prototype, 'mongoEmulation'); /** * Returns true if ForerunnerDB is running on a client browser. * @returns {boolean} */ Db.prototype.isClient = function () { return !this._isServer; }; /** * Returns true if ForerunnerDB is running on a server. * @returns {boolean} */ Db.prototype.isServer = function () { return this._isServer; }; /** * Returns a checksum of a string. * @param {String} string The string to checksum. * @return {String} The checksum generated. */ Db.prototype.crc = Crc; /** * Checks if the database is running on a client (browser) or * a server (node.js). * @returns {Boolean} Returns true if running on a browser. */ Db.prototype.isClient = function () { return !this._isServer; }; /** * Checks if the database is running on a client (browser) or * a server (node.js). * @returns {Boolean} Returns true if running on a server. */ Db.prototype.isServer = function () { return this._isServer; }; /** * Converts a normal javascript array of objects into a DB collection. * @param {Array} arr An array of objects. * @returns {Collection} A new collection instance with the data set to the * array passed. */ Db.prototype.arrayToCollection = function (arr) { return new Collection().setData(arr); }; /** * Registers an event listener against an event name. * @param {String} event The name of the event to listen for. * @param {Function} listener The listener method to call when * the event is fired. * @returns {*} */ Db.prototype.on = function(event, listener) { this._listeners = this._listeners || {}; this._listeners[event] = this._listeners[event] || []; this._listeners[event].push(listener); return this; }; /** * De-registers an event listener from an event name. * @param {String} event The name of the event to stop listening for. * @param {Function} listener The listener method passed to on() when * registering the event listener. * @returns {*} */ Db.prototype.off = function(event, listener) { if (event in this._listeners) { var arr = this._listeners[event], index = arr.indexOf(listener); if (index > -1) { arr.splice(index, 1); } } return this; }; /** * Emits an event by name with the given data. * @param {String} event The name of the event to emit. * @param {*=} data The data to emit with the event. * @returns {*} */ Db.prototype.emit = function(event, data) { this._listeners = this._listeners || {}; if (event in this._listeners) { var arr = this._listeners[event], arrCount = arr.length, arrIndex; for (arrIndex = 0; arrIndex < arrCount; arrIndex++) { arr[arrIndex].apply(this, Array.prototype.slice.call(arguments, 1)); } } return this; }; Db.prototype.peek = function (search) { var i, coll, arr = [], typeOfSearch = typeof search; // Loop collections for (i in this._collection) { if (this._collection.hasOwnProperty(i)) { coll = this._collection[i]; if (typeOfSearch === 'string') { arr = arr.concat(coll.peek(search)); } else { arr = arr.concat(coll.find(search)); } } } return arr; }; /** * Find all documents across all collections in the database that match the passed * string or search object. * @param search String or search object. * @returns {Array} */ Db.prototype.peek = function (search) { var i, coll, arr = [], typeOfSearch = typeof search; // Loop collections for (i in this._collection) { if (this._collection.hasOwnProperty(i)) { coll = this._collection[i]; if (typeOfSearch === 'string') { arr = arr.concat(coll.peek(search)); } else { arr = arr.concat(coll.find(search)); } } } return arr; }; /** * Find all documents across all collections in the database that match the passed * string or search object and return them in an object where each key is the name * of the collection that the document was matched in. * @param search String or search object. * @returns {object} */ Db.prototype.peekCat = function (search) { var i, coll, cat = {}, arr, typeOfSearch = typeof search; // Loop collections for (i in this._collection) { if (this._collection.hasOwnProperty(i)) { coll = this._collection[i]; if (typeOfSearch === 'string') { arr = coll.peek(search); if (arr && arr.length) { cat[coll.name()] = arr; } } else { arr = coll.find(search); if (arr && arr.length) { cat[coll.name()] = arr; } } } } return cat; }; Db.prototype.drop = new Overload({ /** * Drops the database. * @func drop * @memberof Db */ '': function () { if (!this.isDropped()) { var arr = this.collections(), arrCount = arr.length, arrIndex; this._state = 'dropped'; for (arrIndex = 0; arrIndex < arrCount; arrIndex++) { this.collection(arr[arrIndex].name).drop(); delete this._collection[arr[arrIndex].name]; } this.emit('drop', this); delete this._listeners; delete this._core._db[this._name]; } return true; }, /** * Drops the database with optional callback method. * @func drop * @memberof Db * @param {Function} callback Optional callback method. */ 'function': function (callback) { if (!this.isDropped()) { var arr = this.collections(), arrCount = arr.length, arrIndex, finishCount = 0, afterDrop = function () { finishCount++; if (finishCount === arrCount) { if (callback) { callback(); } } }; this._state = 'dropped'; for (arrIndex = 0; arrIndex < arrCount; arrIndex++) { this.collection(arr[arrIndex].name).drop(afterDrop); delete this._collection[arr[arrIndex].name]; } this.emit('drop', this); delete this._listeners; delete this._core._db[this._name]; } return true; }, /** * Drops the database with optional persistent storage drop. Persistent * storage is dropped by default if no preference is provided. * @func drop * @memberof Db * @param {Boolean} removePersist Drop persistent storage for this database. */ 'boolean': function (removePersist) { if (!this.isDropped()) { var arr = this.collections(), arrCount = arr.length, arrIndex; this._state = 'dropped'; for (arrIndex = 0; arrIndex < arrCount; arrIndex++) { this.collection(arr[arrIndex].name).drop(removePersist); delete this._collection[arr[arrIndex].name]; } this.emit('drop', this); delete this._listeners; delete this._core._db[this._name]; } return true; }, /** * Drops the database and optionally controls dropping persistent storage * and callback method. * @func drop * @memberof Db * @param {Boolean} removePersist Drop persistent storage for this database. * @param {Function} callback Optional callback method. */ 'boolean, function': function (removePersist, callback) { if (!this.isDropped()) { var arr = this.collections(), arrCount = arr.length, arrIndex, finishCount = 0, afterDrop = function () { finishCount++; if (finishCount === arrCount) { if (callback) { callback(); } } }; this._state = 'dropped'; for (arrIndex = 0; arrIndex < arrCount; arrIndex++) { this.collection(arr[arrIndex].name).drop(removePersist, afterDrop); delete this._collection[arr[arrIndex].name]; } this.emit('drop', this); delete this._listeners; delete this._core._db[this._name]; } return true; } }); /** * Gets a database instance by name. * @memberof Core * @param {String=} name Optional name of the database. If none is provided * a random name is assigned. * @returns {Db} */ Core.prototype.db = function (name) { // Handle being passed an instance if (name instanceof Db) { return name; } if (!name) { name = this.objectId(); } this._db[name] = this._db[name] || new Db(name, this); this._db[name].mongoEmulation(this.mongoEmulation()); return this._db[name]; }; /** * Returns an array of databases that ForerunnerDB currently has. * @memberof Core * @param {String|RegExp=} search The optional search string or regular expression to use * to match collection names against. * @returns {Array} An array of objects containing details of each database * that ForerunnerDB is currently managing and it's child entities. */ Core.prototype.databases = function (search) { var arr = [], tmpObj, addDb, i; if (search) { if (!(search instanceof RegExp)) { // Turn the search into a regular expression search = new RegExp(search); } } for (i in this._db) { if (this._db.hasOwnProperty(i)) { addDb = true; if (search) { if (!search.exec(i)) { addDb = false; } } if (addDb) { tmpObj = { name: i, children: [] }; if (this.shared.moduleExists('Collection')) { tmpObj.children.push({ module: 'collection', moduleName: 'Collections', count: this._db[i].collections().length }); } if (this.shared.moduleExists('CollectionGroup')) { tmpObj.children.push({ module: 'collectionGroup', moduleName: 'Collection Groups', count: this._db[i].collectionGroups().length }); } if (this.shared.moduleExists('Document')) { tmpObj.children.push({ module: 'document', moduleName: 'Documents', count: this._db[i].documents().length }); } if (this.shared.moduleExists('Grid')) { tmpObj.children.push({ module: 'grid', moduleName: 'Grids', count: this._db[i].grids().length }); } if (this.shared.moduleExists('Overview')) { tmpObj.children.push({ module: 'overview', moduleName: 'Overviews', count: this._db[i].overviews().length }); } if (this.shared.moduleExists('View')) { tmpObj.children.push({ module: 'view', moduleName: 'Views', count: this._db[i].views().length }); } arr.push(tmpObj); } } } arr.sort(function (a, b) { return a.name.localeCompare(b.name); }); return arr; }; Shared.finishModule('Db'); module.exports = Db; },{"./Collection.js":3,"./Crc.js":5,"./Metrics.js":10,"./Overload":22,"./Shared":26}],7:[function(_dereq_,module,exports){ "use strict"; /* name id rebuild state match lookup */ var Shared = _dereq_('./Shared'), Path = _dereq_('./Path'), BinaryTree = _dereq_('./BinaryTree'), treeInstance = new BinaryTree(), btree = function () {}; treeInstance.inOrder('hash'); /** * The index class used to instantiate hash map indexes that the database can * use to speed up queries on collections and views. * @constructor */ var IndexBinaryTree = function () { this.init.apply(this, arguments); }; IndexBinaryTree.prototype.init = function (keys, options, collection) { this._btree = new (btree.create(2, this.sortAsc))(); this._size = 0; this._id = this._itemKeyHash(keys, keys); this.unique(options && options.unique ? options.unique : false); if (keys !== undefined) { this.keys(keys); } if (collection !== undefined) { this.collection(collection); } this.name(options && options.name ? options.name : this._id); }; Shared.addModule('IndexBinaryTree', IndexBinaryTree); Shared.mixin(IndexBinaryTree.prototype, 'Mixin.ChainReactor'); Shared.mixin(IndexBinaryTree.prototype, 'Mixin.Sorting'); IndexBinaryTree.prototype.id = function () { return this._id; }; IndexBinaryTree.prototype.state = function () { return this._state; }; IndexBinaryTree.prototype.size = function () { return this._size; }; Shared.synthesize(IndexBinaryTree.prototype, 'data'); Shared.synthesize(IndexBinaryTree.prototype, 'name'); Shared.synthesize(IndexBinaryTree.prototype, 'collection'); Shared.synthesize(IndexBinaryTree.prototype, 'type'); Shared.synthesize(IndexBinaryTree.prototype, 'unique'); IndexBinaryTree.prototype.keys = function (val) { if (val !== undefined) { this._keys = val; // Count the keys this._keyCount = (new Path()).parse(this._keys).length; return this; } return this._keys; }; IndexBinaryTree.prototype.rebuild = function () { // Do we have a collection? if (this._collection) { // Get sorted data var collection = this._collection.subset({}, { $decouple: false, $orderBy: this._keys }), collectionData = collection.find(), dataIndex, dataCount = collectionData.length; // Clear the index data for the index this._btree = new (btree.create(2, this.sortAsc))(); if (this._unique) { this._uniqueLookup = {}; } // Loop the collection data for (dataIndex = 0; dataIndex < dataCount; dataIndex++) { this.insert(collectionData[dataIndex]); } } this._state = { name: this._name, keys: this._keys, indexSize: this._size, built: new Date(), updated: new Date(), ok: true }; }; IndexBinaryTree.prototype.insert = function (dataItem, options) { var uniqueFlag = this._unique, uniqueHash, dataItemHash = this._itemKeyHash(dataItem, this._keys), keyArr; if (uniqueFlag) { uniqueHash = this._itemHash(dataItem, this._keys); this._uniqueLookup[uniqueHash] = dataItem; } // We store multiple items that match a key inside an array // that is then stored against that key in the tree... // Check if item exists for this key already keyArr = this._btree.get(dataItemHash); // Check if the array exists if (keyArr === undefined) { // Generate an array for this key first keyArr = []; // Put the new array into the tree under the key this._btree.put(dataItemHash, keyArr); } // Push the item into the array keyArr.push(dataItem); this._size++; }; IndexBinaryTree.prototype.remove = function (dataItem, options) { var uniqueFlag = this._unique, uniqueHash, dataItemHash = this._itemKeyHash(dataItem, this._keys), keyArr, itemIndex; if (uniqueFlag) { uniqueHash = this._itemHash(dataItem, this._keys); delete this._uniqueLookup[uniqueHash]; } // Try and get the array for the item hash key keyArr = this._btree.get(dataItemHash); if (keyArr !== undefined) { // The key array exits, remove the item from the key array itemIndex = keyArr.indexOf(dataItem); if (itemIndex > -1) { // Check the length of the array if (keyArr.length === 1) { // This item is the last in the array, just kill the tree entry this._btree.del(dataItemHash); } else { // Remove the item keyArr.splice(itemIndex, 1); } this._size } } }; IndexBinaryTree.prototype.violation = function (dataItem) { // Generate item hash var uniqueHash = this._itemHash(dataItem, this._keys); // Check if the item breaks the unique constraint return Boolean(this._uniqueLookup[uniqueHash]); }; IndexBinaryTree.prototype.hashViolation = function (uniqueHash) { // Check if the item breaks the unique constraint return Boolean(this._uniqueLookup[uniqueHash]); }; IndexBinaryTree.prototype.lookup = function (query) { return this._data[this._itemHash(query, this._keys)] || []; }; IndexBinaryTree.prototype.match = function (query, options) { // Check if the passed query has data in the keys our index // operates on and if so, is the query sort matching our order var pathSolver = new Path(); var indexKeyArr = pathSolver.parseArr(this._keys), queryArr = pathSolver.parseArr(query), matchedKeys = [], matchedKeyCount = 0, i; // Loop the query array and check the order of keys against the // index key array to see if this index can be used for (i = 0; i < indexKeyArr.length; i++) { if (queryArr[i] === indexKeyArr[i]) { matchedKeyCount++; matchedKeys.push(queryArr[i]); } else { // Query match failed - this is a hash map index so partial key match won't work return { matchedKeys: [], totalKeyCount: queryArr.length, score: 0 }; } } return { matchedKeys: matchedKeys, totalKeyCount: queryArr.length, score: matchedKeyCount }; //return pathSolver.countObjectPaths(this._keys, query); }; IndexBinaryTree.prototype._itemHash = function (item, keys) { var path = new Path(), pathData, hash = '', k; pathData = path.parse(keys); for (k = 0; k < pathData.length; k++) { if (hash) { hash += '_'; } hash += path.value(item, pathData[k].path).join(':'); } return hash; }; IndexBinaryTree.prototype._itemKeyHash = function (item, keys) { var path = new Path(), pathData, hash = '', k; pathData = path.parse(keys); for (k = 0; k < pathData.length; k++) { if (hash) { hash += '_'; } hash += path.keyValue(item, pathData[k].path); } return hash; }; IndexBinaryTree.prototype._itemHashArr = function (item, keys) { var path = new Path(), pathData, //hash = '', hashArr = [], valArr, i, k, j; pathData = path.parse(keys); for (k = 0; k < pathData.length; k++) { valArr = path.value(item, pathData[k].path); for (i = 0; i < valArr.length; i++) { if (k === 0) { // Setup the initial hash array hashArr.push(valArr[i]); } else { // Loop the hash array and concat the value to it for (j = 0; j < hashArr.length; j++) { hashArr[j] = hashArr[j] + '_' + valArr[i]; } } } } return hashArr; }; Shared.finishModule('IndexBinaryTree'); module.exports = IndexBinaryTree; },{"./BinaryTree":2,"./Path":23,"./Shared":26}],8:[function(_dereq_,module,exports){ "use strict"; var Shared = _dereq_('./Shared'), Path = _dereq_('./Path'); /** * The index class used to instantiate hash map indexes that the database can * use to speed up queries on collections and views. * @constructor */ var IndexHashMap = function () { this.init.apply(this, arguments); }; IndexHashMap.prototype.init = function (keys, options, collection) { this._crossRef = {}; this._size = 0; this._id = this._itemKeyHash(keys, keys); this.data({}); this.unique(options && options.unique ? options.unique : false); if (keys !== undefined) { this.keys(keys); } if (collection !== undefined) { this.collection(collection); } this.name(options && options.name ? options.name : this._id); }; Shared.addModule('IndexHashMap', IndexHashMap); Shared.mixin(IndexHashMap.prototype, 'Mixin.ChainReactor'); IndexHashMap.prototype.id = function () { return this._id; }; IndexHashMap.prototype.state = function () { return this._state; }; IndexHashMap.prototype.size = function () { return this._size; }; Shared.synthesize(IndexHashMap.prototype, 'data'); Shared.synthesize(IndexHashMap.prototype, 'name'); Shared.synthesize(IndexHashMap.prototype, 'collection'); Shared.synthesize(IndexHashMap.prototype, 'type'); Shared.synthesize(IndexHashMap.prototype, 'unique'); IndexHashMap.prototype.keys = function (val) { if (val !== undefined) { this._keys = val; // Count the keys this._keyCount = (new Path()).parse(this._keys).length; return this; } return this._keys; }; IndexHashMap.prototype.rebuild = function () { // Do we have a collection? if (this._collection) { // Get sorted data var collection = this._collection.subset({}, { $decouple: false, $orderBy: this._keys }), collectionData = collection.find(), dataIndex, dataCount = collectionData.length; // Clear the index data for the index this._data = {}; if (this._unique) { this._uniqueLookup = {}; } // Loop the collection data for (dataIndex = 0; dataIndex < dataCount; dataIndex++) { this.insert(collectionData[dataIndex]); } } this._state = { name: this._name, keys: this._keys, indexSize: this._size, built: new Date(), updated: new Date(), ok: true }; }; IndexHashMap.prototype.insert = function (dataItem, options) { var uniqueFlag = this._unique, uniqueHash, itemHashArr, hashIndex; if (uniqueFlag) { uniqueHash = this._itemHash(dataItem, this._keys); this._uniqueLookup[uniqueHash] = dataItem; } // Generate item hash itemHashArr = this._itemHashArr(dataItem, this._keys); // Get the path search results and store them for (hashIndex = 0; hashIndex < itemHashArr.length; hashIndex++) { this.pushToPathValue(itemHashArr[hashIndex], dataItem); } }; IndexHashMap.prototype.update = function (dataItem, options) { // TODO: Write updates to work // 1: Get uniqueHash for the dataItem primary key value (may need to generate a store for this) // 2: Remove the uniqueHash as it currently stands // 3: Generate a new uniqueHash for dataItem // 4: Insert the new uniqueHash }; IndexHashMap.prototype.remove = function (dataItem, options) { var uniqueFlag = this._unique, uniqueHash, itemHashArr, hashIndex; if (uniqueFlag) { uniqueHash = this._itemHash(dataItem, this._keys); delete this._uniqueLookup[uniqueHash]; } // Generate item hash itemHashArr = this._itemHashArr(dataItem, this._keys); // Get the path search results and store them for (hashIndex = 0; hashIndex < itemHashArr.length; hashIndex++) { this.pullFromPathValue(itemHashArr[hashIndex], dataItem); } }; IndexHashMap.prototype.violation = function (dataItem) { // Generate item hash var uniqueHash = this._itemHash(dataItem, this._keys); // Check if the item breaks the unique constraint return Boolean(this._uniqueLookup[uniqueHash]); }; IndexHashMap.prototype.hashViolation = function (uniqueHash) { // Check if the item breaks the unique constraint return Boolean(this._uniqueLookup[uniqueHash]); }; IndexHashMap.prototype.pushToPathValue = function (hash, obj) { var pathValArr = this._data[hash] = this._data[hash] || []; // Make sure we have not already indexed this object at this path/value if (pathValArr.indexOf(obj) === -1) { // Index the object pathValArr.push(obj); // Record the reference to this object in our index size this._size++; // Cross-reference this association for later lookup this.pushToCrossRef(obj, pathValArr); } }; IndexHashMap.prototype.pullFromPathValue = function (hash, obj) { var pathValArr = this._data[hash], indexOfObject; // Make sure we have already indexed this object at this path/value indexOfObject = pathValArr.indexOf(obj); if (indexOfObject > -1) { // Un-index the object pathValArr.splice(indexOfObject, 1); // Record the reference to this object in our index size this._size // Remove object cross-reference this.pullFromCrossRef(obj, pathValArr); } // Check if we should remove the path value array if (!pathValArr.length) { // Remove the array delete this._data[hash]; } }; IndexHashMap.prototype.pull = function (obj) { // Get all places the object has been used and remove them var id = obj[this._collection.primaryKey()], crossRefArr = this._crossRef[id], arrIndex, arrCount = crossRefArr.length, arrItem; for (arrIndex = 0; arrIndex < arrCount; arrIndex++) { arrItem = crossRefArr[arrIndex]; // Remove item from this index lookup array this._pullFromArray(arrItem, obj); } // Record the reference to this object in our index size this._size // Now remove the cross-reference entry for this object delete this._crossRef[id]; }; IndexHashMap.prototype._pullFromArray = function (arr, obj) { var arrCount = arr.length; while (arrCount if (arr[arrCount] === obj) { arr.splice(arrCount, 1); } } }; IndexHashMap.prototype.pushToCrossRef = function (obj, pathValArr) { var id = obj[this._collection.primaryKey()], crObj; this._crossRef[id] = this._crossRef[id] || []; // Check if the cross-reference to the pathVal array already exists crObj = this._crossRef[id]; if (crObj.indexOf(pathValArr) === -1) { // Add the cross-reference crObj.push(pathValArr); } }; IndexHashMap.prototype.pullFromCrossRef = function (obj, pathValArr) { var id = obj[this._collection.primaryKey()]; delete this._crossRef[id]; }; IndexHashMap.prototype.lookup = function (query) { return this._data[this._itemHash(query, this._keys)] || []; }; IndexHashMap.prototype.match = function (query, options) { // Check if the passed query has data in the keys our index // operates on and if so, is the query sort matching our order var pathSolver = new Path(); var indexKeyArr = pathSolver.parseArr(this._keys), queryArr = pathSolver.parseArr(query), matchedKeys = [], matchedKeyCount = 0, i; // Loop the query array and check the order of keys against the // index key array to see if this index can be used for (i = 0; i < indexKeyArr.length; i++) { if (queryArr[i] === indexKeyArr[i]) { matchedKeyCount++; matchedKeys.push(queryArr[i]); } else { // Query match failed - this is a hash map index so partial key match won't work return { matchedKeys: [], totalKeyCount: queryArr.length, score: 0 }; } } return { matchedKeys: matchedKeys, totalKeyCount: queryArr.length, score: matchedKeyCount }; //return pathSolver.countObjectPaths(this._keys, query); }; IndexHashMap.prototype._itemHash = function (item, keys) { var path = new Path(), pathData, hash = '', k; pathData = path.parse(keys); for (k = 0; k < pathData.length; k++) { if (hash) { hash += '_'; } hash += path.value(item, pathData[k].path).join(':'); } return hash; }; IndexHashMap.prototype._itemKeyHash = function (item, keys) { var path = new Path(), pathData, hash = '', k; pathData = path.parse(keys); for (k = 0; k < pathData.length; k++) { if (hash) { hash += '_'; } hash += path.keyValue(item, pathData[k].path); } return hash; }; IndexHashMap.prototype._itemHashArr = function (item, keys) { var path = new Path(), pathData, //hash = '', hashArr = [], valArr, i, k, j; pathData = path.parse(keys); for (k = 0; k < pathData.length; k++) { valArr = path.value(item, pathData[k].path); for (i = 0; i < valArr.length; i++) { if (k === 0) { // Setup the initial hash array hashArr.push(valArr[i]); } else { // Loop the hash array and concat the value to it for (j = 0; j < hashArr.length; j++) { hashArr[j] = hashArr[j] + '_' + valArr[i]; } } } } return hashArr; }; Shared.finishModule('IndexHashMap'); module.exports = IndexHashMap; },{"./Path":23,"./Shared":26}],9:[function(_dereq_,module,exports){ "use strict"; var Shared = _dereq_('./Shared'); /** * The key value store class used when storing basic in-memory KV data, * and can be queried for quick retrieval. Mostly used for collection * primary key indexes and lookups. * @param {String=} name Optional KV store name. * @constructor */ var KeyValueStore = function (name) { this.init.apply(this, arguments); }; KeyValueStore.prototype.init = function (name) { this._name = name; this._data = {}; this._primaryKey = '_id'; }; Shared.addModule('KeyValueStore', KeyValueStore); Shared.mixin(KeyValueStore.prototype, 'Mixin.ChainReactor'); /** * Get / set the name of the key/value store. * @param {String} val The name to set. * @returns {*} */ Shared.synthesize(KeyValueStore.prototype, 'name'); /** * Get / set the primary key. * @param {String} key The key to set. * @returns {*} */ KeyValueStore.prototype.primaryKey = function (key) { if (key !== undefined) { this._primaryKey = key; return this; } return this._primaryKey; }; /** * Removes all data from the store. * @returns {*} */ KeyValueStore.prototype.truncate = function () { this._data = {}; return this; }; /** * Sets data against a key in the store. * @param {String} key The key to set data for. * @param {*} value The value to assign to the key. * @returns {*} */ KeyValueStore.prototype.set = function (key, value) { this._data[key] = value ? value : true; return this; }; /** * Gets data stored for the passed key. * @param {String} key The key to get data for. * @returns {*} */ KeyValueStore.prototype.get = function (key) { return this._data[key]; }; /** * Get / set the primary key. * @param {*} obj A lookup query, can be a string key, an array of string keys, * an object with further query clauses or a regular expression that should be * run against all keys. * @returns {*} */ KeyValueStore.prototype.lookup = function (obj) { var pKeyVal = obj[this._primaryKey], arrIndex, arrCount, lookupItem, result; if (pKeyVal instanceof Array) { // An array of primary keys, find all matches arrCount = pKeyVal.length; result = []; for (arrIndex = 0; arrIndex < arrCount; arrIndex++) { lookupItem = this._data[pKeyVal[arrIndex]]; if (lookupItem) { result.push(lookupItem); } } return result; } else if (pKeyVal instanceof RegExp) { // Create new data result = []; for (arrIndex in this._data) { if (this._data.hasOwnProperty(arrIndex)) { if (pKeyVal.test(arrIndex)) { result.push(this._data[arrIndex]); } } } return result; } else if (typeof pKeyVal === 'object') { // The primary key clause is an object, now we have to do some // more extensive searching if (pKeyVal.$ne) { // Create new data result = []; for (arrIndex in this._data) { if (this._data.hasOwnProperty(arrIndex)) { if (arrIndex !== pKeyVal.$ne) { result.push(this._data[arrIndex]); } } } return result; } if (pKeyVal.$in && (pKeyVal.$in instanceof Array)) { // Create new data result = []; for (arrIndex in this._data) { if (this._data.hasOwnProperty(arrIndex)) { if (pKeyVal.$in.indexOf(arrIndex) > -1) { result.push(this._data[arrIndex]); } } } return result; } if (pKeyVal.$nin && (pKeyVal.$nin instanceof Array)) { // Create new data result = []; for (arrIndex in this._data) { if (this._data.hasOwnProperty(arrIndex)) { if (pKeyVal.$nin.indexOf(arrIndex) === -1) { result.push(this._data[arrIndex]); } } } return result; } if (pKeyVal.$or && (pKeyVal.$or instanceof Array)) { // Create new data result = []; for (arrIndex = 0; arrIndex < pKeyVal.$or.length; arrIndex++) { result = result.concat(this.lookup(pKeyVal.$or[arrIndex])); } return result; } } else { // Key is a basic lookup from string lookupItem = this._data[pKeyVal]; if (lookupItem !== undefined) { return [lookupItem]; } else { return []; } } }; /** * Removes data for the given key from the store. * @param {String} key The key to un-set. * @returns {*} */ KeyValueStore.prototype.unSet = function (key) { delete this._data[key]; return this; }; /** * Sets data for the give key in the store only where the given key * does not already have a value in the store. * @param {String} key The key to set data for. * @param {*} value The value to assign to the key. * @returns {Boolean} True if data was set or false if data already * exists for the key. */ KeyValueStore.prototype.uniqueSet = function (key, value) { if (this._data[key] === undefined) { this._data[key] = value; return true; } return false; }; Shared.finishModule('KeyValueStore'); module.exports = KeyValueStore; },{"./Shared":26}],10:[function(_dereq_,module,exports){ "use strict"; var Shared = _dereq_('./Shared'), Operation = _dereq_('./Operation'); /** * The metrics class used to store details about operations. * @constructor */ var Metrics = function () { this.init.apply(this, arguments); }; Metrics.prototype.init = function () { this._data = []; }; Shared.addModule('Metrics', Metrics); Shared.mixin(Metrics.prototype, 'Mixin.ChainReactor'); /** * Creates an operation within the metrics instance and if metrics * are currently enabled (by calling the start() method) the operation * is also stored in the metrics log. * @param {String} name The name of the operation. * @returns {Operation} */ Metrics.prototype.create = function (name) { var op = new Operation(name); if (this._enabled) { this._data.push(op); } return op; }; /** * Starts logging operations. * @returns {Metrics} */ Metrics.prototype.start = function () { this._enabled = true; return this; }; /** * Stops logging operations. * @returns {Metrics} */ Metrics.prototype.stop = function () { this._enabled = false; return this; }; /** * Clears all logged operations. * @returns {Metrics} */ Metrics.prototype.clear = function () { this._data = []; return this; }; /** * Returns an array of all logged operations. * @returns {Array} */ Metrics.prototype.list = function () { return this._data; }; Shared.finishModule('Metrics'); module.exports = Metrics; },{"./Operation":21,"./Shared":26}],11:[function(_dereq_,module,exports){ "use strict"; var CRUD = { preSetData: function () { }, postSetData: function () { } }; module.exports = CRUD; },{}],12:[function(_dereq_,module,exports){ "use strict"; /** * The chain reactor mixin, provides methods to the target object that allow chain * reaction events to propagate to the target and be handled, processed and passed * on down the chain. * @mixin */ var ChainReactor = { /** * * @param obj */ chain: function (obj) { if (this.debug && this.debug()) { if (obj._reactorIn && obj._reactorOut) { console.log(obj._reactorIn.logIdentifier() + ' Adding target "' + obj._reactorOut.instanceIdentifier() + '" to the chain reactor target list'); } else { console.log(this.logIdentifier() + ' Adding target "' + obj.instanceIdentifier() + '" to the chain reactor target list'); } } this._chain = this._chain || []; var index = this._chain.indexOf(obj); if (index === -1) { this._chain.push(obj); } }, unChain: function (obj) { if (this.debug && this.debug()) { if (obj._reactorIn && obj._reactorOut) { console.log(obj._reactorIn.logIdentifier() + ' Removing target "' + obj._reactorOut.instanceIdentifier() + '" from the chain reactor target list'); } else { console.log(this.logIdentifier() + ' Removing target "' + obj.instanceIdentifier() + '" from the chain reactor target list'); } } if (this._chain) { var index = this._chain.indexOf(obj); if (index > -1) { this._chain.splice(index, 1); } } }, chainSend: function (type, data, options) { if (this._chain) { var arr = this._chain, arrItem, count = arr.length, index; for (index = 0; index < count; index++) { arrItem = arr[index]; if (!arrItem._state || (arrItem._state && !arrItem.isDropped())) { if (this.debug && this.debug()) { if (arrItem._reactorIn && arrItem._reactorOut) { console.log(arrItem._reactorIn.logIdentifier() + ' Sending data down the chain reactor pipe to "' + arrItem._reactorOut.instanceIdentifier() + '"'); } else { console.log(this.logIdentifier() + ' Sending data down the chain reactor pipe to "' + arrItem.instanceIdentifier() + '"'); } } arrItem.chainReceive(this, type, data, options); } else { console.log('Reactor Data:', type, data, options); console.log('Reactor Node:', arrItem); throw('Chain reactor attempting to send data to target reactor node that is in a dropped state!'); } } } }, chainReceive: function (sender, type, data, options) { var chainPacket = { sender: sender, type: type, data: data, options: options }; if (this.debug && this.debug()) { console.log(this.logIdentifier() + 'Received data from parent reactor node'); } // Fire our internal handler if (!this._chainHandler || (this._chainHandler && !this._chainHandler(chainPacket))) { // Propagate the message down the chain this.chainSend(chainPacket.type, chainPacket.data, chainPacket.options); } } }; module.exports = ChainReactor; },{}],13:[function(_dereq_,module,exports){ "use strict"; var idCounter = 0, Overload = _dereq_('./Overload'), Serialiser = _dereq_('./Serialiser'), Common, serialiser = new Serialiser(); /** * Provides commonly used methods to most classes in ForerunnerDB. * @mixin */ Common = { // Expose the serialiser object so it can be extended with new data handlers. serialiser: serialiser, /** * Gets / sets data in the item store. The store can be used to set and * retrieve data against a key. Useful for adding arbitrary key/value data * to a collection / view etc and retrieving it later. * @param {String|*} key The key under which to store the passed value or * retrieve the existing stored value. * @param {*=} val Optional value. If passed will overwrite the existing value * stored against the specified key if one currently exists. * @returns {*} */ store: function (key, val) { if (key !== undefined) { if (val !== undefined) { // Store the data this._store = this._store || {}; this._store[key] = val; return this; } if (this._store) { return this._store[key]; } } return undefined; }, /** * Removes a previously stored key/value pair from the item store, set previously * by using the store() method. * @param {String|*} key The key of the key/value pair to remove; * @returns {Common} Returns this for chaining. */ unStore: function (key) { if (key !== undefined) { delete this._store[key]; } return this; }, /** * Returns a non-referenced version of the passed object / array. * @param {Object} data The object or array to return as a non-referenced version. * @param {Number=} copies Optional number of copies to produce. If specified, the return * value will be an array of decoupled objects, each distinct from the other. * @returns {*} */ decouple: function (data, copies) { if (data !== undefined) { if (!copies) { return this.jParse(this.jStringify(data)); } else { var i, json = this.jStringify(data), copyArr = []; for (i = 0; i < copies; i++) { copyArr.push(this.jParse(json)); } return copyArr; } } return undefined; }, /** * Parses and returns data from stringified version. * @param {String} data The stringified version of data to parse. * @returns {Object} The parsed JSON object from the data. */ jParse: function (data) { return serialiser.parse(data); //return JSON.parse(data); }, /** * Converts a JSON object into a stringified version. * @param {Object} data The data to stringify. * @returns {String} The stringified data. */ jStringify: function (data) { return serialiser.stringify(data); //return JSON.stringify(data); }, /** * Generates a new 16-character hexadecimal unique ID or * generates a new 16-character hexadecimal ID based on * the passed string. Will always generate the same ID * for the same string. * @param {String=} str A string to generate the ID from. * @return {String} */ objectId: function (str) { var id, pow = Math.pow(10, 17); if (!str) { idCounter++; id = (idCounter + ( Math.random() * pow + Math.random() * pow + Math.random() * pow + Math.random() * pow )).toString(16); } else { var val = 0, count = str.length, i; for (i = 0; i < count; i++) { val += str.charCodeAt(i) * pow; } id = val.toString(16); } return id; }, /** * Gets / sets debug flag that can enable debug message output to the * console if required. * @param {Boolean} val The value to set debug flag to. * @return {Boolean} True if enabled, false otherwise. */ /** * Sets debug flag for a particular type that can enable debug message * output to the console if required. * @param {String} type The name of the debug type to set flag for. * @param {Boolean} val The value to set debug flag to. * @return {Boolean} True if enabled, false otherwise. */ debug: new Overload([ function () { return this._debug && this._debug.all; }, function (val) { if (val !== undefined) { if (typeof val === 'boolean') { this._debug = this._debug || {}; this._debug.all = val; this.chainSend('debug', this._debug); return this; } else { return (this._debug && this._debug[val]) || (this._db && this._db._debug && this._db._debug[val]) || (this._debug && this._debug.all); } } return this._debug && this._debug.all; }, function (type, val) { if (type !== undefined) { if (val !== undefined) { this._debug = this._debug || {}; this._debug[type] = val; this.chainSend('debug', this._debug); return this; } return (this._debug && this._debug[val]) || (this._db && this._db._debug && this._db._debug[type]); } return this._debug && this._debug.all; } ]), /** * Returns a string describing the class this instance is derived from. * @returns {string} */ classIdentifier: function () { return 'ForerunnerDB.' + this.className; }, /** * Returns a string describing the instance by it's class name and instance * object name. * @returns {String} The instance identifier. */ instanceIdentifier: function () { return '[' + this.className + ']' + this.name(); }, /** * Returns a string used to denote a console log against this instance, * consisting of the class identifier and instance identifier. * @returns {string} The log identifier. */ logIdentifier: function () { return this.classIdentifier() + ': ' + this.instanceIdentifier(); }, /** * Converts a query object with MongoDB dot notation syntax * to Forerunner's object notation syntax. * @param {Object} obj The object to convert. */ convertToFdb: function (obj) { var varName, splitArr, objCopy, i; for (i in obj) { if (obj.hasOwnProperty(i)) { objCopy = obj; if (i.indexOf('.') > -1) { // Replace .$ with a placeholder before splitting by . char i = i.replace('.$', '[|$|]'); splitArr = i.split('.'); while ((varName = splitArr.shift())) { // Replace placeholder back to original .$ varName = varName.replace('[|$|]', '.$'); if (splitArr.length) { objCopy[varName] = {}; } else { objCopy[varName] = obj[i]; } objCopy = objCopy[varName]; } delete obj[i]; } } } }, /** * Checks if the state is dropped. * @returns {boolean} True when dropped, false otherwise. */ isDropped: function () { return this._state === 'dropped'; } }; module.exports = Common; },{"./Overload":22,"./Serialiser":25}],14:[function(_dereq_,module,exports){ "use strict"; /** * Provides some database constants. * @mixin */ var Constants = { TYPE_INSERT: 0, TYPE_UPDATE: 1, TYPE_REMOVE: 2, PHASE_BEFORE: 0, PHASE_AFTER: 1 }; module.exports = Constants; },{}],15:[function(_dereq_,module,exports){ "use strict"; var Overload = _dereq_('./Overload'); /** * Provides event emitter functionality including the methods: on, off, once, emit, deferEmit. * @mixin */ var Events = { on: new Overload({ /** * Attach an event listener to the passed event. * @param {String} event The name of the event to listen for. * @param {Function} listener The method to call when the event is fired. */ 'string, function': function (event, listener) { this._listeners = this._listeners || {}; this._listeners[event] = this._listeners[event] || {}; this._listeners[event]['*'] = this._listeners[event]['*'] || []; this._listeners[event]['*'].push(listener); return this; }, /** * Attach an event listener to the passed event only if the passed * id matches the document id for the event being fired. * @param {String} event The name of the event to listen for. * @param {*} id The document id to match against. * @param {Function} listener The method to call when the event is fired. */ 'string, *, function': function (event, id, listener) { this._listeners = this._listeners || {}; this._listeners[event] = this._listeners[event] || {}; this._listeners[event][id] = this._listeners[event][id] || []; this._listeners[event][id].push(listener); return this; } }), once: new Overload({ 'string, function': function (eventName, callback) { var self = this, internalCallback = function () { self.off(eventName, internalCallback); callback.apply(self, arguments); }; return this.on(eventName, internalCallback); }, 'string, *, function': function (eventName, id, callback) { var self = this, internalCallback = function () { self.off(eventName, id, internalCallback); callback.apply(self, arguments); }; return this.on(eventName, id, internalCallback); } }), off: new Overload({ 'string': function (event) { if (this._listeners && this._listeners[event] && event in this._listeners) { delete this._listeners[event]; } return this; }, 'string, function': function (event, listener) { var arr, index; if (typeof(listener) === 'string') { if (this._listeners && this._listeners[event] && this._listeners[event][listener]) { delete this._listeners[event][listener]; } } else { if (this._listeners && event in this._listeners) { arr = this._listeners[event]['*']; index = arr.indexOf(listener); if (index > -1) { arr.splice(index, 1); } } } return this; }, 'string, *, function': function (event, id, listener) { if (this._listeners && event in this._listeners && id in this.listeners[event]) { var arr = this._listeners[event][id], index = arr.indexOf(listener); if (index > -1) { arr.splice(index, 1); } } }, 'string, *': function (event, id) { if (this._listeners && event in this._listeners && id in this._listeners[event]) { // Kill all listeners for this event id delete this._listeners[event][id]; } } }), emit: function (event, data) { this._listeners = this._listeners || {}; if (event in this._listeners) { var arrIndex, arrCount, tmpFunc, arr, listenerIdArr, listenerIdCount, listenerIdIndex; // Handle global emit if (this._listeners[event]['*']) { arr = this._listeners[event]['*']; arrCount = arr.length; for (arrIndex = 0; arrIndex < arrCount; arrIndex++) { // Check we have a function to execute tmpFunc = arr[arrIndex]; if (typeof tmpFunc === 'function') { tmpFunc.apply(this, Array.prototype.slice.call(arguments, 1)); } } } // Handle individual emit if (data instanceof Array) { // Check if the array is an array of objects in the collection if (data[0] && data[0][this._primaryKey]) { // Loop the array and check for listeners against the primary key listenerIdArr = this._listeners[event]; arrCount = data.length; for (arrIndex = 0; arrIndex < arrCount; arrIndex++) { if (listenerIdArr[data[arrIndex][this._primaryKey]]) { // Emit for this id listenerIdCount = listenerIdArr[data[arrIndex][this._primaryKey]].length; for (listenerIdIndex = 0; listenerIdIndex < listenerIdCount; listenerIdIndex++) { tmpFunc = listenerIdArr[data[arrIndex][this._primaryKey]][listenerIdIndex]; if (typeof tmpFunc === 'function') { listenerIdArr[data[arrIndex][this._primaryKey]][listenerIdIndex].apply(this, Array.prototype.slice.call(arguments, 1)); } } } } } } } return this; }, /** * Queues an event to be fired. This has automatic de-bouncing so that any * events of the same type that occur within 100 milliseconds of a previous * one will all be wrapped into a single emit rather than emitting tons of * events for lots of chained inserts etc. Only the data from the last * de-bounced event will be emitted. * @param {String} eventName The name of the event to emit. * @param {*=} data Optional data to emit with the event. */ deferEmit: function (eventName, data) { var self = this, args; if (!this._noEmitDefer && (!this._db || (this._db && !this._db._noEmitDefer))) { args = arguments; // Check for an existing timeout this._deferTimeout = this._deferTimeout || {}; if (this._deferTimeout[eventName]) { clearTimeout(this._deferTimeout[eventName]); } // Set a timeout this._deferTimeout[eventName] = setTimeout(function () { if (self.debug()) { console.log(self.logIdentifier() + ' Emitting ' + args[0]); } self.emit.apply(self, args); }, 1); } else { this.emit.apply(this, arguments); } return this; } }; module.exports = Events; },{"./Overload":22}],16:[function(_dereq_,module,exports){ "use strict"; /** * Provides object matching algorithm methods. * @mixin */ var Matching = { /** * Internal method that checks a document against a test object. * @param {*} source The source object or value to test against. * @param {*} test The test object or value to test with. * @param {Object} queryOptions The options the query was passed with. * @param {String=} opToApply The special operation to apply to the test such * as 'and' or an 'or' operator. * @param {Object=} options An object containing options to apply to the * operation such as limiting the fields returned etc. * @returns {Boolean} True if the test was positive, false on negative. * @private */ _match: function (source, test, queryOptions, opToApply, options) { // TODO: This method is quite long, break into smaller pieces var operation, applyOp = opToApply, recurseVal, tmpIndex, sourceType = typeof source, testType = typeof test, matchedAll = true, opResult, substringCache, i; options = options || {}; queryOptions = queryOptions || {}; // Check if options currently holds a root query object if (!options.$rootQuery) { // Root query not assigned, hold the root query options.$rootQuery = test; } // Check if options currently holds a root source object if (!options.$rootSource) { // Root query not assigned, hold the root query options.$rootSource = source; } // Assign current query data options.$currentQuery = test; options.$rootData = options.$rootData || {}; // Check if the comparison data are both strings or numbers if ((sourceType === 'string' || sourceType === 'number') && (testType === 'string' || testType === 'number')) { // The source and test data are flat types that do not require recursive searches, // so just compare them and return the result if (sourceType === 'number') { // Number comparison if (source !== test) { matchedAll = false; } } else { // String comparison // TODO: We can probably use a queryOptions.$locale as a second parameter here if (source.localeCompare(test)) { matchedAll = false; } } } else if ((sourceType === 'string' || sourceType === 'number') && (testType === 'object' && test instanceof RegExp)) { if (!test.test(source)) { matchedAll = false; } } else { for (i in test) { if (test.hasOwnProperty(i)) { // Assign previous query data options.$previousQuery = options.$parent; // Assign parent query data options.$parent = { query: test[i], key: i, parent: options.$previousQuery }; // Reset operation flag operation = false; // Grab first two chars of the key name to check for $ substringCache = i.substr(0, 2); // Check if the property is a comment (ignorable) if (substringCache === ' // Skip this property continue; } // Check if the property starts with a dollar (function) if (substringCache.indexOf('$') === 0) { // Ask the _matchOp method to handle the operation opResult = this._matchOp(i, source, test[i], queryOptions, options); // Check the result of the matchOp operation // If the result is -1 then no operation took place, otherwise the result // will be a boolean denoting a match (true) or no match (false) if (opResult > -1) { if (opResult) { if (opToApply === 'or') { return true; } } else { // Set the matchedAll flag to the result of the operation // because the operation did not return true matchedAll = opResult; } // Record that an operation was handled operation = true; } } // Check for regex if (!operation && test[i] instanceof RegExp) { operation = true; if (sourceType === 'object' && source[i] !== undefined && test[i].test(source[i])) { if (opToApply === 'or') { return true; } } else { matchedAll = false; } } if (!operation) { // Check if our query is an object if (typeof(test[i]) === 'object') { // Because test[i] is an object, source must also be an object // Check if our source data we are checking the test query against // is an object or an array if (source[i] !== undefined) { if (source[i] instanceof Array && !(test[i] instanceof Array)) { // The source data is an array, so check each item until a // match is found recurseVal = false; for (tmpIndex = 0; tmpIndex < source[i].length; tmpIndex++) { recurseVal = this._match(source[i][tmpIndex], test[i], queryOptions, applyOp, options); if (recurseVal) { // One of the array items matched the query so we can // include this item in the results, so break now break; } } if (recurseVal) { if (opToApply === 'or') { return true; } } else { matchedAll = false; } } else if (!(source[i] instanceof Array) && test[i] instanceof Array) { // The test key data is an array and the source key data is not so check // each item in the test key data to see if the source item matches one // of them. This is effectively an $in search. recurseVal = false; for (tmpIndex = 0; tmpIndex < test[i].length; tmpIndex++) { recurseVal = this._match(source[i], test[i][tmpIndex], queryOptions, applyOp, options); if (recurseVal) { // One of the array items matched the query so we can // include this item in the results, so break now break; } } if (recurseVal) { if (opToApply === 'or') { return true; } } else { matchedAll = false; } } else if (typeof(source) === 'object') { // Recurse down the object tree recurseVal = this._match(source[i], test[i], queryOptions, applyOp, options); if (recurseVal) { if (opToApply === 'or') { return true; } } else { matchedAll = false; } } else { recurseVal = this._match(undefined, test[i], queryOptions, applyOp, options); if (recurseVal) { if (opToApply === 'or') { return true; } } else { matchedAll = false; } } } else { // First check if the test match is an $exists if (test[i] && test[i].$exists !== undefined) { // Push the item through another match recurse recurseVal = this._match(undefined, test[i], queryOptions, applyOp, options); if (recurseVal) { if (opToApply === 'or') { return true; } } else { matchedAll = false; } } else { matchedAll = false; } } } else { // Check if the prop matches our test value if (source && source[i] === test[i]) { if (opToApply === 'or') { return true; } } else if (source && source[i] && source[i] instanceof Array && test[i] && typeof(test[i]) !== "object") { // We are looking for a value inside an array // The source data is an array, so check each item until a // match is found recurseVal = false; for (tmpIndex = 0; tmpIndex < source[i].length; tmpIndex++) { recurseVal = this._match(source[i][tmpIndex], test[i], queryOptions, applyOp, options); if (recurseVal) { // One of the array items matched the query so we can // include this item in the results, so break now break; } } if (recurseVal) { if (opToApply === 'or') { return true; } } else { matchedAll = false; } } else { matchedAll = false; } } } if (opToApply === 'and' && !matchedAll) { return false; } } } } return matchedAll; }, /** * Internal method, performs a matching process against a query operator such as $gt or $nin. * @param {String} key The property name in the test that matches the operator to perform * matching against. * @param {*} source The source data to match the query against. * @param {*} test The query to match the source against. * @param {Object} queryOptions The options the query was passed with. * @param {Object=} options An options object. * @returns {*} * @private */ _matchOp: function (key, source, test, queryOptions, options) { // Check for commands switch (key) { case '$gt': // Greater than return source > test; case '$gte': // Greater than or equal return source >= test; case '$lt': // Less than return source < test; case '$lte': // Less than or equal return source <= test; case '$exists': // Property exists return (source === undefined) !== test; case '$eq': // Equals return source == test; // jshint ignore:line case '$eeq': // Equals equals return source === test; case '$ne': // Not equals return source != test; // jshint ignore:line case '$nee': // Not equals equals return source !== test; case '$or': // Match true on ANY check to pass for (var orIndex = 0; orIndex < test.length; orIndex++) { if (this._match(source, test[orIndex], queryOptions, 'and', options)) { return true; } } return false; case '$and': // Match true on ALL checks to pass for (var andIndex = 0; andIndex < test.length; andIndex++) { if (!this._match(source, test[andIndex], queryOptions, 'and', options)) { return false; } } return true; case '$in': // Check that the in test is an array if (test instanceof Array) { var inArr = test, inArrCount = inArr.length, inArrIndex; for (inArrIndex = 0; inArrIndex < inArrCount; inArrIndex++) { if (this._match(source, inArr[inArrIndex], queryOptions, 'and', options)) { return true; } } return false; } else if (typeof test === 'object') { return this._match(source, test, queryOptions, 'and', options); } else { throw(this.logIdentifier() + ' Cannot use an $in operator on a non-array key: ' + key); } break; case '$nin': // Not in // Check that the not-in test is an array if (test instanceof Array) { var notInArr = test, notInArrCount = notInArr.length, notInArrIndex; for (notInArrIndex = 0; notInArrIndex < notInArrCount; notInArrIndex++) { if (this._match(source, notInArr[notInArrIndex], queryOptions, 'and', options)) { return false; } } return true; } else if (typeof test === 'object') { return this._match(source, test, queryOptions, 'and', options); } else { throw(this.logIdentifier() + ' Cannot use a $nin operator on a non-array key: ' + key); } break; case '$distinct': // Ensure options holds a distinct lookup options.$rootData['//distinctLookup'] = options.$rootData['//distinctLookup'] || {}; for (var distinctProp in test) { if (test.hasOwnProperty(distinctProp)) { options.$rootData['//distinctLookup'][distinctProp] = options.$rootData['//distinctLookup'][distinctProp] || {}; // Check if the options distinct lookup has this field's value if (options.$rootData['//distinctLookup'][distinctProp][source[distinctProp]]) { // Value is already in use return false; } else { // Set the value in the lookup options.$rootData['//distinctLookup'][distinctProp][source[distinctProp]] = true; // Allow the item in the results return true; } } } break; case '$count': var countKey, countArr, countVal; // Iterate the count object's keys for (countKey in test) { if (test.hasOwnProperty(countKey)) { // Check the property exists and is an array. If the property being counted is not // an array (or doesn't exist) then use a value of zero in any further count logic countArr = source[countKey]; if (typeof countArr === 'object' && countArr instanceof Array) { countVal = countArr.length; } else { countVal = 0; } // Now recurse down the query chain further to satisfy the query for this key (countKey) if (!this._match(countVal, test[countKey], queryOptions, 'and', options)) { return false; } } } // Allow the item in the results return true; case '$find': case '$findOne': case '$findSub': var fromType = 'collection', findQuery, findOptions, subQuery, subOptions, subPath, result, operation = {}; // Check we have a database object to work from if (!this.db()) { throw('Cannot operate a ' + key + ' sub-query on an anonymous collection (one with no db set)!'); } // Check all parts of the $find operation exist if (!test.$from) { throw(key + ' missing $from property!'); } if (test.$fromType) { fromType = test.$fromType; // Check the fromType exists as a method if (!this.db()[fromType] || typeof this.db()[fromType] !== 'function') { throw(key + ' cannot operate against $fromType "' + fromType + '" because the database does not recognise this type of object!'); } } // Perform the find operation findQuery = test.$query || {}; findOptions = test.$options || {}; if (key === '$findSub') { if (!test.$path) { throw(key + ' missing $path property!'); } subPath = test.$path; subQuery = test.$subQuery || {}; subOptions = test.$subOptions || {}; result = this.db()[fromType](test.$from).findSub(findQuery, subPath, subQuery, subOptions); } else { result = this.db()[fromType](test.$from)[key.substr(1)](findQuery, findOptions); } operation[options.$parent.parent.key] = result; return this._match(source, operation, queryOptions, 'and', options); } return -1; } }; module.exports = Matching; },{}],17:[function(_dereq_,module,exports){ "use strict"; /** * Provides sorting methods. * @mixin */ var Sorting = { /** * Sorts the passed value a against the passed value b ascending. * @param {*} a The first value to compare. * @param {*} b The second value to compare. * @returns {*} 1 if a is sorted after b, -1 if a is sorted before b. */ sortAsc: function (a, b) { if (typeof(a) === 'string' && typeof(b) === 'string') { return a.localeCompare(b); } else { if (a > b) { return 1; } else if (a < b) { return -1; } } return 0; }, /** * Sorts the passed value a against the passed value b descending. * @param {*} a The first value to compare. * @param {*} b The second value to compare. * @returns {*} 1 if a is sorted after b, -1 if a is sorted before b. */ sortDesc: function (a, b) { if (typeof(a) === 'string' && typeof(b) === 'string') { return b.localeCompare(a); } else { if (a > b) { return -1; } else if (a < b) { return 1; } } return 0; } }; module.exports = Sorting; },{}],18:[function(_dereq_,module,exports){ "use strict"; var Tags, tagMap = {}; /** * Provides class instance tagging and tag operation methods. * @mixin */ Tags = { /** * Tags a class instance for later lookup. * @param {String} name The tag to add. * @returns {boolean} */ tagAdd: function (name) { var i, self = this, mapArr = tagMap[name] = tagMap[name] || []; for (i = 0; i < mapArr.length; i++) { if (mapArr[i] === self) { return true; } } mapArr.push(self); // Hook the drop event for this so we can react if (self.on) { self.on('drop', function () { // We've been dropped so remove ourselves from the tag map self.tagRemove(name); }); } return true; }, /** * Removes a tag from a class instance. * @param {String} name The tag to remove. * @returns {boolean} */ tagRemove: function (name) { var i, mapArr = tagMap[name]; if (mapArr) { for (i = 0; i < mapArr.length; i++) { if (mapArr[i] === this) { mapArr.splice(i, 1); return true; } } } return false; }, /** * Gets an array of all instances tagged with the passed tag name. * @param {String} name The tag to lookup. * @returns {Array} The array of instances that have the passed tag. */ tagLookup: function (name) { return tagMap[name] || []; }, /** * Drops all instances that are tagged with the passed tag name. * @param {String} name The tag to lookup. * @param {Function} callback Callback once dropping has completed * for all instances that match the passed tag name. * @returns {boolean} */ tagDrop: function (name, callback) { var arr = this.tagLookup(name), dropCb, dropCount, i; dropCb = function () { dropCount if (callback && dropCount === 0) { callback(false); } }; if (arr.length) { dropCount = arr.length; // Loop the array and drop all items for (i = arr.length - 1; i >= 0; i arr[i].drop(dropCb); } } return true; } }; module.exports = Tags; },{}],19:[function(_dereq_,module,exports){ "use strict"; var Overload = _dereq_('./Overload'); /** * Provides trigger functionality methods. * @mixin */ var Triggers = { /** * Add a trigger by id. * @param {String} id The id of the trigger. This must be unique to the type and * phase of the trigger. Only one trigger may be added with this id per type and * phase. * @param {Number} type The type of operation to apply the trigger to. See * Mixin.Constants for constants to use. * @param {Number} phase The phase of an operation to fire the trigger on. See * Mixin.Constants for constants to use. * @param {Function} method The method to call when the trigger is fired. * @returns {boolean} True if the trigger was added successfully, false if not. */ addTrigger: function (id, type, phase, method) { var self = this, triggerIndex; // Check if the trigger already exists triggerIndex = self._triggerIndexOf(id, type, phase); if (triggerIndex === -1) { // The trigger does not exist, create it self._trigger = self._trigger || {}; self._trigger[type] = self._trigger[type] || {}; self._trigger[type][phase] = self._trigger[type][phase] || []; self._trigger[type][phase].push({ id: id, method: method, enabled: true }); return true; } return false; }, /** * * @param {String} id The id of the trigger to remove. * @param {Number} type The type of operation to remove the trigger from. See * Mixin.Constants for constants to use. * @param {Number} phase The phase of the operation to remove the trigger from. * See Mixin.Constants for constants to use. * @returns {boolean} True if removed successfully, false if not. */ removeTrigger: function (id, type, phase) { var self = this, triggerIndex; // Check if the trigger already exists triggerIndex = self._triggerIndexOf(id, type, phase); if (triggerIndex > -1) { // The trigger exists, remove it self._trigger[type][phase].splice(triggerIndex, 1); } return false; }, enableTrigger: new Overload({ 'string': function (id) { // Alter all triggers of this type var self = this, types = self._trigger, phases, triggers, result = false, i, k, j; if (types) { for (j in types) { if (types.hasOwnProperty(j)) { phases = types[j]; if (phases) { for (i in phases) { if (phases.hasOwnProperty(i)) { triggers = phases[i]; // Loop triggers and set enabled flag for (k = 0; k < triggers.length; k++) { if (triggers[k].id === id) { triggers[k].enabled = true; result = true; } } } } } } } } return result; }, 'number': function (type) { // Alter all triggers of this type var self = this, phases = self._trigger[type], triggers, result = false, i, k; if (phases) { for (i in phases) { if (phases.hasOwnProperty(i)) { triggers = phases[i]; // Loop triggers and set to enabled for (k = 0; k < triggers.length; k++) { triggers[k].enabled = true; result = true; } } } } return result; }, 'number, number': function (type, phase) { // Alter all triggers of this type and phase var self = this, phases = self._trigger[type], triggers, result = false, k; if (phases) { triggers = phases[phase]; if (triggers) { // Loop triggers and set to enabled for (k = 0; k < triggers.length; k++) { triggers[k].enabled = true; result = true; } } } return result; }, 'string, number, number': function (id, type, phase) { // Check if the trigger already exists var self = this, triggerIndex = self._triggerIndexOf(id, type, phase); if (triggerIndex > -1) { // Update the trigger self._trigger[type][phase][triggerIndex].enabled = true; return true; } return false; } }), disableTrigger: new Overload({ 'string': function (id) { // Alter all triggers of this type var self = this, types = self._trigger, phases, triggers, result = false, i, k, j; if (types) { for (j in types) { if (types.hasOwnProperty(j)) { phases = types[j]; if (phases) { for (i in phases) { if (phases.hasOwnProperty(i)) { triggers = phases[i]; // Loop triggers and set enabled flag for (k = 0; k < triggers.length; k++) { if (triggers[k].id === id) { triggers[k].enabled = false; result = true; } } } } } } } } return result; }, 'number': function (type) { // Alter all triggers of this type var self = this, phases = self._trigger[type], triggers, result = false, i, k; if (phases) { for (i in phases) { if (phases.hasOwnProperty(i)) { triggers = phases[i]; // Loop triggers and set to disabled for (k = 0; k < triggers.length; k++) { triggers[k].enabled = false; result = true; } } } } return result; }, 'number, number': function (type, phase) { // Alter all triggers of this type and phase var self = this, phases = self._trigger[type], triggers, result = false, k; if (phases) { triggers = phases[phase]; if (triggers) { // Loop triggers and set to disabled for (k = 0; k < triggers.length; k++) { triggers[k].enabled = false; result = true; } } } return result; }, 'string, number, number': function (id, type, phase) { // Check if the trigger already exists var self = this, triggerIndex = self._triggerIndexOf(id, type, phase); if (triggerIndex > -1) { // Update the trigger self._trigger[type][phase][triggerIndex].enabled = false; return true; } return false; } }), /** * Checks if a trigger will fire based on the type and phase provided. * @param {Number} type The type of operation. See Mixin.Constants for * constants to use. * @param {Number} phase The phase of the operation. See Mixin.Constants * for constants to use. * @returns {Boolean} True if the trigger will fire, false otherwise. */ willTrigger: function (type, phase) { if (this._trigger && this._trigger[type] && this._trigger[type][phase] && this._trigger[type][phase].length) { // Check if a trigger in this array is enabled var arr = this._trigger[type][phase], i; for (i = 0; i < arr.length; i++) { if (arr[i].enabled) { return true; } } } return false; }, /** * Processes trigger actions based on the operation, type and phase. * @param {Object} operation Operation data to pass to the trigger. * @param {Number} type The type of operation. See Mixin.Constants for * constants to use. * @param {Number} phase The phase of the operation. See Mixin.Constants * for constants to use. * @param {Object} oldDoc The document snapshot before operations are * carried out against the data. * @param {Object} newDoc The document snapshot after operations are * carried out against the data. * @returns {boolean} */ processTrigger: function (operation, type, phase, oldDoc, newDoc) { var self = this, triggerArr, triggerIndex, triggerCount, triggerItem, response; if (self._trigger && self._trigger[type] && self._trigger[type][phase]) { triggerArr = self._trigger[type][phase]; triggerCount = triggerArr.length; for (triggerIndex = 0; triggerIndex < triggerCount; triggerIndex++) { triggerItem = triggerArr[triggerIndex]; // Check if the trigger is enabled if (triggerItem.enabled) { if (this.debug()) { var typeName, phaseName; switch (type) { case this.TYPE_INSERT: typeName = 'insert'; break; case this.TYPE_UPDATE: typeName = 'update'; break; case this.TYPE_REMOVE: typeName = 'remove'; break; default: typeName = ''; break; } switch (phase) { case this.PHASE_BEFORE: phaseName = 'before'; break; case this.PHASE_AFTER: phaseName = 'after'; break; default: phaseName = ''; break; } //console.log('Triggers: Processing trigger "' + id + '" for ' + typeName + ' in phase "' + phaseName + '"'); } // Run the trigger's method and store the response response = triggerItem.method.call(self, operation, oldDoc, newDoc); // Check the response for a non-expected result (anything other than // undefined, true or false is considered a throwable error) if (response === false) { // The trigger wants us to cancel operations return false; } if (response !== undefined && response !== true && response !== false) { // Trigger responded with error, throw the error throw('ForerunnerDB.Mixin.Triggers: Trigger error: ' + response); } } } // Triggers all ran without issue, return a success (true) return true; } }, /** * Returns the index of a trigger by id based on type and phase. * @param {String} id The id of the trigger to find the index of. * @param {Number} type The type of operation. See Mixin.Constants for * constants to use. * @param {Number} phase The phase of the operation. See Mixin.Constants * for constants to use. * @returns {number} * @private */ _triggerIndexOf: function (id, type, phase) { var self = this, triggerArr, triggerCount, triggerIndex; if (self._trigger && self._trigger[type] && self._trigger[type][phase]) { triggerArr = self._trigger[type][phase]; triggerCount = triggerArr.length; for (triggerIndex = 0; triggerIndex < triggerCount; triggerIndex++) { if (triggerArr[triggerIndex].id === id) { return triggerIndex; } } } return -1; } }; module.exports = Triggers; },{"./Overload":22}],20:[function(_dereq_,module,exports){ "use strict"; /** * Provides methods to handle object update operations. * @mixin */ var Updating = { /** * Updates a property on an object. * @param {Object} doc The object whose property is to be updated. * @param {String} prop The property to update. * @param {*} val The new value of the property. * @private */ _updateProperty: function (doc, prop, val) { doc[prop] = val; if (this.debug()) { console.log(this.logIdentifier() + ' Setting non-data-bound document property "' + prop + '"'); } }, /** * Increments a value for a property on a document by the passed number. * @param {Object} doc The document to modify. * @param {String} prop The property to modify. * @param {Number} val The amount to increment by. * @private */ _updateIncrement: function (doc, prop, val) { doc[prop] += val; }, /** * Changes the index of an item in the passed array. * @param {Array} arr The array to modify. * @param {Number} indexFrom The index to move the item from. * @param {Number} indexTo The index to move the item to. * @private */ _updateSpliceMove: function (arr, indexFrom, indexTo) { arr.splice(indexTo, 0, arr.splice(indexFrom, 1)[0]); if (this.debug()) { console.log(this.logIdentifier() + ' Moving non-data-bound document array index from "' + indexFrom + '" to "' + indexTo + '"'); } }, /** * Inserts an item into the passed array at the specified index. * @param {Array} arr The array to insert into. * @param {Number} index The index to insert at. * @param {Object} doc The document to insert. * @private */ _updateSplicePush: function (arr, index, doc) { if (arr.length > index) { arr.splice(index, 0, doc); } else { arr.push(doc); } }, /** * Inserts an item at the end of an array. * @param {Array} arr The array to insert the item into. * @param {Object} doc The document to insert. * @private */ _updatePush: function (arr, doc) { arr.push(doc); }, /** * Removes an item from the passed array. * @param {Array} arr The array to modify. * @param {Number} index The index of the item in the array to remove. * @private */ _updatePull: function (arr, index) { arr.splice(index, 1); }, /** * Multiplies a value for a property on a document by the passed number. * @param {Object} doc The document to modify. * @param {String} prop The property to modify. * @param {Number} val The amount to multiply by. * @private */ _updateMultiply: function (doc, prop, val) { doc[prop] *= val; }, /** * Renames a property on a document to the passed property. * @param {Object} doc The document to modify. * @param {String} prop The property to rename. * @param {Number} val The new property name. * @private */ _updateRename: function (doc, prop, val) { doc[val] = doc[prop]; delete doc[prop]; }, /** * Sets a property on a document to the passed value. * @param {Object} doc The document to modify. * @param {String} prop The property to set. * @param {*} val The new property value. * @private */ _updateOverwrite: function (doc, prop, val) { doc[prop] = val; }, /** * Deletes a property on a document. * @param {Object} doc The document to modify. * @param {String} prop The property to delete. * @private */ _updateUnset: function (doc, prop) { delete doc[prop]; }, /** * Removes all properties from an object without destroying * the object instance, thereby maintaining data-bound linking. * @param {Object} doc The parent object to modify. * @param {String} prop The name of the child object to clear. * @private */ _updateClear: function (doc, prop) { var obj = doc[prop], i; if (obj && typeof obj === 'object') { for (i in obj) { if (obj.hasOwnProperty(i)) { this._updateUnset(obj, i); } } } }, /** * Pops an item or items from the array stack. * @param {Object} doc The document to modify. * @param {Number} val If set to a positive integer, will pop the number specified * from the stack, if set to a negative integer will shift the number specified * from the stack. * @return {Boolean} * @private */ _updatePop: function (doc, val) { var updated = false, i; if (doc.length > 0) { if (val > 0) { for (i = 0; i < val; i++) { doc.pop(); } updated = true; } else if (val < 0) { for (i = 0; i > val; i doc.shift(); } updated = true; } } return updated; } }; module.exports = Updating; },{}],21:[function(_dereq_,module,exports){ "use strict"; var Shared = _dereq_('./Shared'), Path = _dereq_('./Path'); /** * The operation class, used to store details about an operation being * performed by the database. * @param {String} name The name of the operation. * @constructor */ var Operation = function (name) { this.pathSolver = new Path(); this.counter = 0; this.init.apply(this, arguments); }; Operation.prototype.init = function (name) { this._data = { operation: name, // The name of the operation executed such as "find", "update" etc index: { potential: [], // Indexes that could have potentially been used used: false // The index that was picked to use }, steps: [], // The steps taken to generate the query results, time: { startMs: 0, stopMs: 0, totalMs: 0, process: {} }, flag: {}, // An object with flags that denote certain execution paths log: [] // Any extra data that might be useful such as warnings or helpful hints }; }; Shared.addModule('Operation', Operation); Shared.mixin(Operation.prototype, 'Mixin.ChainReactor'); /** * Starts the operation timer. */ Operation.prototype.start = function () { this._data.time.startMs = new Date().getTime(); }; /** * Adds an item to the operation log. * @param {String} event The item to log. * @returns {*} */ Operation.prototype.log = function (event) { if (event) { var lastLogTime = this._log.length > 0 ? this._data.log[this._data.log.length - 1].time : 0, logObj = { event: event, time: new Date().getTime(), delta: 0 }; this._data.log.push(logObj); if (lastLogTime) { logObj.delta = logObj.time - lastLogTime; } return this; } return this._data.log; }; /** * Called when starting and ending a timed operation, used to time * internal calls within an operation's execution. * @param {String} section An operation name. * @returns {*} */ Operation.prototype.time = function (section) { if (section !== undefined) { var process = this._data.time.process, processObj = process[section] = process[section] || {}; if (!processObj.startMs) { // Timer started processObj.startMs = new Date().getTime(); processObj.stepObj = { name: section }; this._data.steps.push(processObj.stepObj); } else { processObj.stopMs = new Date().getTime(); processObj.totalMs = processObj.stopMs - processObj.startMs; processObj.stepObj.totalMs = processObj.totalMs; delete processObj.stepObj; } return this; } return this._data.time; }; /** * Used to set key/value flags during operation execution. * @param {String} key * @param {String} val * @returns {*} */ Operation.prototype.flag = function (key, val) { if (key !== undefined && val !== undefined) { this._data.flag[key] = val; } else if (key !== undefined) { return this._data.flag[key]; } else { return this._data.flag; } }; Operation.prototype.data = function (path, val, noTime) { if (val !== undefined) { // Assign value to object path this.pathSolver.set(this._data, path, val); return this; } return this.pathSolver.get(this._data, path); }; Operation.prototype.pushData = function (path, val, noTime) { // Assign value to object path this.pathSolver.push(this._data, path, val); }; /** * Stops the operation timer. */ Operation.prototype.stop = function () { this._data.time.stopMs = new Date().getTime(); this._data.time.totalMs = this._data.time.stopMs - this._data.time.startMs; }; Shared.finishModule('Operation'); module.exports = Operation; },{"./Path":23,"./Shared":26}],22:[function(_dereq_,module,exports){ "use strict"; /** * Allows a method to accept overloaded calls with different parameters controlling * which passed overload function is called. * @param {Object} def * @returns {Function} * @constructor */ var Overload = function (def) { if (def) { var self = this, index, count, tmpDef, defNewKey, sigIndex, signatures; if (!(def instanceof Array)) { tmpDef = {}; // Def is an object, make sure all prop names are devoid of spaces for (index in def) { if (def.hasOwnProperty(index)) { defNewKey = index.replace(/ /g, ''); // Check if the definition array has a * string in it if (defNewKey.indexOf('*') === -1) { // No * found tmpDef[defNewKey] = def[index]; } else { // A * was found, generate the different signatures that this // definition could represent signatures = this.<API key>(defNewKey); for (sigIndex = 0; sigIndex < signatures.length; sigIndex++) { if (!tmpDef[signatures[sigIndex]]) { tmpDef[signatures[sigIndex]] = def[index]; } } } } } def = tmpDef; } return function () { var arr = [], lookup, type, name; // Check if we are being passed a key/function object or an array of functions if (def instanceof Array) { // We were passed an array of functions count = def.length; for (index = 0; index < count; index++) { if (def[index].length === arguments.length) { return self.callExtend(this, '$main', def, def[index], arguments); } } } else { // Generate lookup key from arguments // Copy arguments to an array for (index = 0; index < arguments.length; index++) { type = typeof arguments[index]; // Handle detecting arrays if (type === 'object' && arguments[index] instanceof Array) { type = 'array'; } // Handle been presented with a single undefined argument if (arguments.length === 1 && type === 'undefined') { break; } // Add the type to the argument types array arr.push(type); } lookup = arr.join(','); // Check for an exact lookup match if (def[lookup]) { return self.callExtend(this, '$main', def, def[lookup], arguments); } else { for (index = arr.length; index >= 0; index // Get the closest match lookup = arr.slice(0, index).join(','); if (def[lookup + ',...']) { // Matched against arguments + "any other" return self.callExtend(this, '$main', def, def[lookup + ',...'], arguments); } } } } name = typeof this.name === 'function' ? this.name() : 'Unknown'; console.log('Overload: ', def); throw('ForerunnerDB.Overload "' + name + '": Overloaded method does not have a matching signature for the passed arguments: ' + this.jStringify(arr)); }; } return function () {}; }; /** * Generates an array of all the different definition signatures that can be * created from the passed string with a catch-all wildcard *. E.g. it will * convert the signature: string,*,string to all potentials: * string,string,string * string,number,string * string,object,string, * string,function,string, * string,undefined,string * * @param {String} str Signature string with a wildcard in it. * @returns {Array} An array of signature strings that are generated. */ Overload.prototype.<API key> = function (str) { var signatures = [], newSignature, types = ['string', 'object', 'number', 'function', 'undefined'], index; if (str.indexOf('*') > -1) { // There is at least one "any" type, break out into multiple keys // We could do this at query time with regular expressions but // would be significantly slower for (index = 0; index < types.length; index++) { newSignature = str.replace('*', types[index]); signatures = signatures.concat(this.<API key>(newSignature)); } } else { signatures.push(str); } return signatures; }; Overload.prototype.callExtend = function (context, prop, propContext, func, args) { var tmp, ret; if (context && propContext[prop]) { tmp = context[prop]; context[prop] = propContext[prop]; ret = func.apply(context, args); context[prop] = tmp; return ret; } else { return func.apply(context, args); } }; module.exports = Overload; },{}],23:[function(_dereq_,module,exports){ "use strict"; var Shared = _dereq_('./Shared'); /** * Path object used to resolve object paths and retrieve data from * objects by using paths. * @param {String=} path The path to assign. * @constructor */ var Path = function (path) { this.init.apply(this, arguments); }; Path.prototype.init = function (path) { if (path) { this.path(path); } }; Shared.addModule('Path', Path); Shared.mixin(Path.prototype, 'Mixin.Common'); Shared.mixin(Path.prototype, 'Mixin.ChainReactor'); /** * Gets / sets the given path for the Path instance. * @param {String=} path The path to assign. */ Path.prototype.path = function (path) { if (path !== undefined) { this._path = this.clean(path); this._pathParts = this._path.split('.'); return this; } return this._path; }; /** * Tests if the passed object has the paths that are specified and that * a value exists in those paths. * @param {Object} testKeys The object describing the paths to test for. * @param {Object} testObj The object to test paths against. * @returns {Boolean} True if the object paths exist. */ Path.prototype.hasObjectPaths = function (testKeys, testObj) { var result = true, i; for (i in testKeys) { if (testKeys.hasOwnProperty(i)) { if (testObj[i] === undefined) { return false; } if (typeof testKeys[i] === 'object') { // Recurse object result = this.hasObjectPaths(testKeys[i], testObj[i]); // Should we exit early? if (!result) { return false; } } } } return result; }; /** * Counts the total number of key endpoints in the passed object. * @param {Object} testObj The object to count key endpoints for. * @returns {Number} The number of endpoints. */ Path.prototype.countKeys = function (testObj) { var totalKeys = 0, i; for (i in testObj) { if (testObj.hasOwnProperty(i)) { if (testObj[i] !== undefined) { if (typeof testObj[i] !== 'object') { totalKeys++; } else { totalKeys += this.countKeys(testObj[i]); } } } } return totalKeys; }; /** * Tests if the passed object has the paths that are specified and that * a value exists in those paths and if so returns the number matched. * @param {Object} testKeys The object describing the paths to test for. * @param {Object} testObj The object to test paths against. * @returns {Object} Stats on the matched keys */ Path.prototype.countObjectPaths = function (testKeys, testObj) { var matchData, matchedKeys = {}, matchedKeyCount = 0, totalKeyCount = 0, i; for (i in testObj) { if (testObj.hasOwnProperty(i)) { if (typeof testObj[i] === 'object') { // The test / query object key is an object, recurse matchData = this.countObjectPaths(testKeys[i], testObj[i]); matchedKeys[i] = matchData.matchedKeys; totalKeyCount += matchData.totalKeyCount; matchedKeyCount += matchData.matchedKeyCount; } else { // The test / query object has a property that is not an object so add it as a key totalKeyCount++; // Check if the test keys also have this key and it is also not an object if (testKeys && testKeys[i] && typeof testKeys[i] !== 'object') { matchedKeys[i] = true; matchedKeyCount++; } else { matchedKeys[i] = false; } } } } return { matchedKeys: matchedKeys, matchedKeyCount: matchedKeyCount, totalKeyCount: totalKeyCount }; }; /** * Takes a non-recursive object and converts the object hierarchy into * a path string. * @param {Object} obj The object to parse. * @param {Boolean=} withValue If true will include a 'value' key in the returned * object that represents the value the object path points to. * @returns {Object} */ Path.prototype.parse = function (obj, withValue) { var paths = [], path = '', resultData, i, k; for (i in obj) { if (obj.hasOwnProperty(i)) { // Set the path to the key path = i; if (typeof(obj[i]) === 'object') { if (withValue) { resultData = this.parse(obj[i], withValue); for (k = 0; k < resultData.length; k++) { paths.push({ path: path + '.' + resultData[k].path, value: resultData[k].value }); } } else { resultData = this.parse(obj[i]); for (k = 0; k < resultData.length; k++) { paths.push({ path: path + '.' + resultData[k].path }); } } } else { if (withValue) { paths.push({ path: path, value: obj[i] }); } else { paths.push({ path: path }); } } } } return paths; }; /** * Takes a non-recursive object and converts the object hierarchy into * an array of path strings that allow you to target all possible paths * in an object. * * The options object accepts an "ignore" field with a regular expression * as the value. If any key matches the expression it is not included in * the results. * * The options object accepts a boolean "verbose" field. If set to true * the results will include all paths leading up to endpoints as well as * they endpoints themselves. * * @returns {Array} */ Path.prototype.parseArr = function (obj, options) { options = options || {}; return this._parseArr(obj, '', [], options); }; Path.prototype._parseArr = function (obj, path, paths, options) { var i, newPath = ''; path = path || ''; paths = paths || []; for (i in obj) { if (obj.hasOwnProperty(i)) { if (!options.ignore || (options.ignore && !options.ignore.test(i))) { if (path) { newPath = path + '.' + i; } else { newPath = i; } if (typeof(obj[i]) === 'object') { if (options.verbose) { paths.push(newPath); } this._parseArr(obj[i], newPath, paths, options); } else { paths.push(newPath); } } } } return paths; }; Path.prototype.valueOne = function (obj, path) { return this.value(obj, path)[0]; }; /** * Gets the value(s) that the object contains for the currently assigned path string. * @param {Object} obj The object to evaluate the path against. * @param {String=} path A path to use instead of the existing one passed in path(). * @param {Object=} options An optional options object. * @returns {Array} An array of values for the given path. */ Path.prototype.value = function (obj, path, options) { var pathParts, arr, arrCount, objPart, objPartParent, valuesArr, returnArr, i, k; if (obj !== undefined && typeof obj === 'object') { if (!options || options && !options.skipArrCheck) { // Check if we were passed an array of objects and if so, // iterate over the array and return the value from each // array item if (obj instanceof Array) { returnArr = []; for (i = 0; i < obj.length; i++) { returnArr.push(this.valueOne(obj[i], path)); } return returnArr; } } valuesArr = []; if (path !== undefined) { path = this.clean(path); pathParts = path.split('.'); } arr = pathParts || this._pathParts; arrCount = arr.length; objPart = obj; for (i = 0; i < arrCount; i++) { objPart = objPart[arr[i]]; if (objPartParent instanceof Array) { // Search inside the array for the next key for (k = 0; k < objPartParent.length; k++) { valuesArr = valuesArr.concat(this.value(objPartParent, k + '.' + arr[i], {skipArrCheck: true})); } return valuesArr; } else { if (!objPart || typeof(objPart) !== 'object') { break; } } objPartParent = objPart; } return [objPart]; } else { return []; } }; /** * Sets a value on an object for the specified path. * @param {Object} obj The object to update. * @param {String} path The path to update. * @param {*} val The value to set the object path to. * @returns {*} */ Path.prototype.set = function (obj, path, val) { if (obj !== undefined && path !== undefined) { var pathParts, part; path = this.clean(path); pathParts = path.split('.'); part = pathParts.shift(); if (pathParts.length) { // Generate the path part in the object if it does not already exist obj[part] = obj[part] || {}; // Recurse this.set(obj[part], pathParts.join('.'), val); } else { // Set the value obj[part] = val; } } return obj; }; Path.prototype.get = function (obj, path) { return this.value(obj, path)[0]; }; /** * Push a value to an array on an object for the specified path. * @param {Object} obj The object to update. * @param {String} path The path to the array to push to. * @param {*} val The value to push to the array at the object path. * @returns {*} */ Path.prototype.push = function (obj, path, val) { if (obj !== undefined && path !== undefined) { var pathParts, part; path = this.clean(path); pathParts = path.split('.'); part = pathParts.shift(); if (pathParts.length) { // Generate the path part in the object if it does not already exist obj[part] = obj[part] || {}; // Recurse this.set(obj[part], pathParts.join('.'), val); } else { // Set the value obj[part] = obj[part] || []; if (obj[part] instanceof Array) { obj[part].push(val); } else { throw('ForerunnerDB.Path: Cannot push to a path whose endpoint is not an array!'); } } } return obj; }; /** * Gets the value(s) that the object contains for the currently assigned path string * with their associated keys. * @param {Object} obj The object to evaluate the path against. * @param {String=} path A path to use instead of the existing one passed in path(). * @returns {Array} An array of values for the given path with the associated key. */ Path.prototype.keyValue = function (obj, path) { var pathParts, arr, arrCount, objPart, objPartParent, objPartHash, i; if (path !== undefined) { path = this.clean(path); pathParts = path.split('.'); } arr = pathParts || this._pathParts; arrCount = arr.length; objPart = obj; for (i = 0; i < arrCount; i++) { objPart = objPart[arr[i]]; if (!objPart || typeof(objPart) !== 'object') { objPartHash = arr[i] + ':' + objPart; break; } objPartParent = objPart; } return objPartHash; }; /** * Sets a value on an object for the specified path. * @param {Object} obj The object to update. * @param {String} path The path to update. * @param {*} val The value to set the object path to. * @returns {*} */ Path.prototype.set = function (obj, path, val) { if (obj !== undefined && path !== undefined) { var pathParts, part; path = this.clean(path); pathParts = path.split('.'); part = pathParts.shift(); if (pathParts.length) { // Generate the path part in the object if it does not already exist obj[part] = obj[part] || {}; // Recurse this.set(obj[part], pathParts.join('.'), val); } else { // Set the value obj[part] = val; } } return obj; }; /** * Removes leading period (.) from string and returns it. * @param {String} str The string to clean. * @returns {*} */ Path.prototype.clean = function (str) { if (str.substr(0, 1) === '.') { str = str.substr(1, str.length -1); } return str; }; Shared.finishModule('Path'); module.exports = Path; },{"./Shared":26}],24:[function(_dereq_,module,exports){ "use strict"; var Shared = _dereq_('./Shared'); /** * Provides chain reactor node linking so that a chain reaction can propagate * down a node tree. Effectively creates a chain link between the reactorIn and * reactorOut objects where a chain reaction from the reactorIn is passed through * the reactorProcess before being passed to the reactorOut object. Reactor * packets are only passed through to the reactorOut if the reactor IO method * chainSend is used. * @param {*} reactorIn An object that has the Mixin.ChainReactor methods mixed * in to it. Chain reactions that occur inside this object will be passed through * to the reactorOut object. * @param {*} reactorOut An object that has the Mixin.ChainReactor methods mixed * in to it. Chain reactions that occur in the reactorIn object will be passed * through to this object. * @param {Function} reactorProcess The processing method to use when chain * reactions occur. * @constructor */ var ReactorIO = function (reactorIn, reactorOut, reactorProcess) { if (reactorIn && reactorOut && reactorProcess) { this._reactorIn = reactorIn; this._reactorOut = reactorOut; this._chainHandler = reactorProcess; if (!reactorIn.chain || !reactorOut.chainReceive) { throw('ForerunnerDB.ReactorIO: ReactorIO requires passed in and out objects to implement the ChainReactor mixin!'); } // Register the reactorIO with the input reactorIn.chain(this); // Register the output with the reactorIO this.chain(reactorOut); } else { throw('ForerunnerDB.ReactorIO: ReactorIO requires in, out and process arguments to instantiate!'); } }; Shared.addModule('ReactorIO', ReactorIO); /** * Drop a reactor IO object, breaking the reactor link between the in and out * reactor nodes. * @returns {boolean} */ ReactorIO.prototype.drop = function () { if (!this.isDropped()) { this._state = 'dropped'; // Remove links if (this._reactorIn) { this._reactorIn.unChain(this); } if (this._reactorOut) { this.unChain(this._reactorOut); } delete this._reactorIn; delete this._reactorOut; delete this._chainHandler; this.emit('drop', this); delete this._listeners; } return true; }; /** * Gets / sets the current state. * @param {String=} val The name of the state to set. * @returns {*} */ Shared.synthesize(ReactorIO.prototype, 'state'); Shared.mixin(ReactorIO.prototype, 'Mixin.Common'); Shared.mixin(ReactorIO.prototype, 'Mixin.ChainReactor'); Shared.mixin(ReactorIO.prototype, 'Mixin.Events'); Shared.finishModule('ReactorIO'); module.exports = ReactorIO; },{"./Shared":26}],25:[function(_dereq_,module,exports){ "use strict"; /** * Provides functionality to encode and decode JavaScript objects to strings * and back again. This differs from JSON.stringify and JSON.parse in that * special objects such as dates can be encoded to strings and back again * so that the reconstituted version of the string still contains a JavaScript * date object. * @constructor */ var Serialiser = function () { this.init.apply(this, arguments); }; Serialiser.prototype.init = function () { this._encoder = []; this._decoder = {}; // Register our handlers this.registerEncoder('$date', function (data) { if (data instanceof Date) { return data.toISOString(); } }); this.registerDecoder('$date', function (data) { return new Date(data); }); }; /** * Register an encoder that can handle encoding for a particular * object type. * @param {String} handles The name of the handler e.g. $date. * @param {Function} method The encoder method. */ Serialiser.prototype.registerEncoder = function (handles, method) { this._encoder.push(function (data) { var methodVal = method(data), returnObj; if (methodVal !== undefined) { returnObj = {}; returnObj[handles] = methodVal; } return returnObj; }); }; /** * Register a decoder that can handle decoding for a particular * object type. * @param {String} handles The name of the handler e.g. $date. When an object * has a field matching this handler name then this decode will be invoked * to provide a decoded version of the data that was previously encoded by * it's counterpart encoder method. * @param {Function} method The decoder method. */ Serialiser.prototype.registerDecoder = function (handles, method) { this._decoder[handles] = method; }; /** * Loops the encoders and asks each one if it wants to handle encoding for * the passed data object. If no value is returned (undefined) then the data * will be passed to the next encoder and so on. If a value is returned the * loop will break and the encoded data will be used. * @param {Object} data The data object to handle. * @returns {*} The encoded data. * @private */ Serialiser.prototype._encode = function (data) { // Loop the encoders and if a return value is given by an encoder // the loop will exit and return that value. var count = this._encoder.length, retVal; while (count-- && !retVal) { retVal = this._encoder[count](data); } return retVal; }; /** * Converts a previously encoded string back into an object. * @param {String} data The string to convert to an object. * @returns {Object} The reconstituted object. */ Serialiser.prototype.parse = function (data) { return this._parse(JSON.parse(data)); }; /** * Handles restoring an object with special data markers back into * it's original format. * @param {Object} data The object to recurse. * @param {Object=} target The target object to restore data to. * @returns {Object} The final restored object. * @private */ Serialiser.prototype._parse = function (data, target) { var i; if (typeof data === 'object' && data !== null) { if (data instanceof Array) { target = target || []; } else { target = target || {}; } // Iterate through the object's keys and handle // special object types and restore them for (i in data) { if (data.hasOwnProperty(i)) { if (i.substr(0, 1) === '$' && this._decoder[i]) { // This is a special object type and a handler // exists, restore it return this._decoder[i](data[i]); } // Not a special object or no handler, recurse as normal target[i] = this._parse(data[i], target[i]); } } } else { target = data; } // The data is a basic type return target; }; /** * Converts an object to a encoded string representation. * @param {Object} data The object to encode. */ Serialiser.prototype.stringify = function (data) { return JSON.stringify(this._stringify(data)); }; /** * Recurse down an object and encode special objects so they can be * stringified and later restored. * @param {Object} data The object to parse. * @param {Object=} target The target object to store converted data to. * @returns {Object} The converted object. * @private */ Serialiser.prototype._stringify = function (data, target) { var handledData, i; if (typeof data === 'object' && data !== null) { // Handle special object types so they can be encoded with // a special marker and later restored by a decoder counterpart handledData = this._encode(data); if (handledData) { // An encoder handled this object type so return it now return handledData; } if (data instanceof Array) { target = target || []; } else { target = target || {}; } // Iterate through the object's keys and serialise for (i in data) { if (data.hasOwnProperty(i)) { target[i] = this._stringify(data[i], target[i]); } } } else { target = data; } // The data is a basic type return target; }; module.exports = Serialiser; },{}],26:[function(_dereq_,module,exports){ "use strict"; var Overload = _dereq_('./Overload'); /** * A shared object that can be used to store arbitrary data between class * instances, and access helper methods. * @mixin */ var Shared = { version: '1.3.505', modules: {}, plugins: {}, _synth: {}, /** * Adds a module to ForerunnerDB. * @memberof Shared * @param {String} name The name of the module. * @param {Function} module The module class. */ addModule: function (name, module) { // Store the module in the module registry this.modules[name] = module; // Tell the universe we are loading this module this.emit('moduleLoad', [name, module]); }, /** * Called by the module once all processing has been completed. Used to determine * if the module is ready for use by other modules. * @memberof Shared * @param {String} name The name of the module. */ finishModule: function (name) { if (this.modules[name]) { // Set the finished loading flag to true this.modules[name]._fdbFinished = true; // Assign the module name to itself so it knows what it // is called if (this.modules[name].prototype) { this.modules[name].prototype.className = name; } else { this.modules[name].className = name; } this.emit('moduleFinished', [name, this.modules[name]]); } else { throw('ForerunnerDB.Shared: finishModule called on a module that has not been registered with addModule(): ' + name); } }, /** * Will call your callback method when the specified module has loaded. If the module * is already loaded the callback is called immediately. * @memberof Shared * @param {String} name The name of the module. * @param {Function} callback The callback method to call when the module is loaded. */ moduleFinished: function (name, callback) { if (this.modules[name] && this.modules[name]._fdbFinished) { if (callback) { callback(name, this.modules[name]); } } else { this.on('moduleFinished', callback); } }, /** * Determines if a module has been added to ForerunnerDB or not. * @memberof Shared * @param {String} name The name of the module. * @returns {Boolean} True if the module exists or false if not. */ moduleExists: function (name) { return Boolean(this.modules[name]); }, /** * Adds the properties and methods defined in the mixin to the passed object. * @memberof Shared * @param {Object} obj The target object to add mixin key/values to. * @param {String} mixinName The name of the mixin to add to the object. */ mixin: new Overload({ 'object, string': function (obj, mixinName) { var mixinObj; if (typeof mixinName === 'string') { mixinObj = this.mixins[mixinName]; if (!mixinObj) { throw('ForerunnerDB.Shared: Cannot find mixin named: ' + mixinName); } } return this.$main.call(this, obj, mixinObj); }, 'object, *': function (obj, mixinObj) { return this.$main.call(this, obj, mixinObj); }, '$main': function (obj, mixinObj) { if (mixinObj && typeof mixinObj === 'object') { for (var i in mixinObj) { if (mixinObj.hasOwnProperty(i)) { obj[i] = mixinObj[i]; } } } return obj; } }), /** * Generates a generic getter/setter method for the passed method name. * @memberof Shared * @param {Object} obj The object to add the getter/setter to. * @param {String} name The name of the getter/setter to generate. * @param {Function=} extend A method to call before executing the getter/setter. * The existing getter/setter can be accessed from the extend method via the * $super e.g. this.$super(); */ synthesize: function (obj, name, extend) { this._synth[name] = this._synth[name] || function (val) { if (val !== undefined) { this['_' + name] = val; return this; } return this['_' + name]; }; if (extend) { var self = this; obj[name] = function () { var tmp = this.$super, ret; this.$super = self._synth[name]; ret = extend.apply(this, arguments); this.$super = tmp; return ret; }; } else { obj[name] = this._synth[name]; } }, /** * Allows a method to be overloaded. * @memberof Shared * @param arr * @returns {Function} * @constructor */ overload: Overload, /** * Define the mixins that other modules can use as required. * @memberof Shared */ mixins: { 'Mixin.Common': _dereq_('./Mixin.Common'), 'Mixin.Events': _dereq_('./Mixin.Events'), 'Mixin.ChainReactor': _dereq_('./Mixin.ChainReactor'), 'Mixin.CRUD': _dereq_('./Mixin.CRUD'), 'Mixin.Constants': _dereq_('./Mixin.Constants'), 'Mixin.Triggers': _dereq_('./Mixin.Triggers'), 'Mixin.Sorting': _dereq_('./Mixin.Sorting'), 'Mixin.Matching': _dereq_('./Mixin.Matching'), 'Mixin.Updating': _dereq_('./Mixin.Updating'), 'Mixin.Tags': _dereq_('./Mixin.Tags') } }; // Add event handling to shared Shared.mixin(Shared, 'Mixin.Events'); module.exports = Shared; },{"./Mixin.CRUD":11,"./Mixin.ChainReactor":12,"./Mixin.Common":13,"./Mixin.Constants":14,"./Mixin.Events":15,"./Mixin.Matching":16,"./Mixin.Sorting":17,"./Mixin.Tags":18,"./Mixin.Triggers":19,"./Mixin.Updating":20,"./Overload":22}],27:[function(_dereq_,module,exports){ /* jshint strict:false */ if (!Array.prototype.filter) { Array.prototype.filter = function(fun/*, thisArg*/) { if (this === void 0 || this === null) { throw new TypeError(); } var t = Object(this); var len = t.length >>> 0; // jshint ignore:line if (typeof fun !== 'function') { throw new TypeError(); } var res = []; var thisArg = arguments.length >= 2 ? arguments[1] : void 0; for (var i = 0; i < len; i++) { if (i in t) { var val = t[i]; // NOTE: Technically this should Object.defineProperty at // the next index, as push can be affected by // properties on Object.prototype and Array.prototype. // But that method's new, and collisions should be // rare, so use the more-compatible alternative. if (fun.call(thisArg, val, i, t)) { res.push(val); } } } return res; }; } if (typeof Object.create !== 'function') { Object.create = (function() { var Temp = function() {}; return function (prototype) { if (arguments.length > 1) { throw Error('Second argument not supported'); } if (typeof prototype !== 'object') { throw TypeError('Argument must be an object'); } Temp.prototype = prototype; var result = new Temp(); Temp.prototype = null; return result; }; })(); } // Production steps of ECMA-262, Edition 5, 15.4.4.14 // Reference: http://es5.github.io/#x15.4.4.14e if (!Array.prototype.indexOf) { Array.prototype.indexOf = function(searchElement, fromIndex) { var k; // 1. Let O be the result of calling ToObject passing // the this value as the argument. if (this === null) { throw new TypeError('"this" is null or not defined'); } var O = Object(this); // 2. Let lenValue be the result of calling the Get // internal method of O with the argument "length". // 3. Let len be ToUint32(lenValue). var len = O.length >>> 0; // jshint ignore:line // 4. If len is 0, return -1. if (len === 0) { return -1; } // 5. If argument fromIndex was passed let n be // ToInteger(fromIndex); else let n be 0. var n = +fromIndex || 0; if (Math.abs(n) === Infinity) { n = 0; } // 6. If n >= len, return -1. if (n >= len) { return -1; } // 7. If n >= 0, then Let k be n. // 8. Else, n<0, Let k be len - abs(n). // If k is less than 0, then let k be 0. k = Math.max(n >= 0 ? n : len - Math.abs(n), 0); // 9. Repeat, while k < len while (k < len) { // a. Let Pk be ToString(k). // This is implicit for LHS operands of the in operator // b. Let kPresent be the result of calling the // HasProperty internal method of O with argument Pk. // This step can be combined with c // c. If kPresent is true, then // i. Let elementK be the result of calling the Get // internal method of O with the argument ToString(k). // ii. Let same be the result of applying the // Strict Equality Comparison Algorithm to // searchElement and elementK. // iii. If same is true, return k. if (k in O && O[k] === searchElement) { return k; } k++; } return -1; }; } module.exports = {}; },{}]},{},[1]);
<?php /** * Minileven functions and definitions * * Sets up the theme and provides some helper functions. Some helper functions * are used in the theme as custom template tags. Others are attached to action and * filter hooks in WordPress to change core functionality. * * The first function, minileven_setup(), sets up the theme by registering support * for various features in WordPress, such as post thumbnails, navigation menus, and the like. * * @package Minileven */ /** * Set the content width based on the theme's design and stylesheet. */ if ( ! isset( $content_width ) ) $content_width = 584; /** * Tell WordPress to run minileven_setup() when the 'after_setup_theme' hook is run. */ add_action( 'after_setup_theme', 'minileven_setup' ); if ( ! function_exists( 'minileven_setup' ) ): /** * Sets up theme defaults and registers support for various WordPress features. */ function minileven_setup() { global $wp_version; /** * Custom template tags for this theme. */ require( <API key>() . '/inc/template-tags.php' ); /** * Custom functions that act independently of the theme templates */ require( <API key>() . '/inc/tweaks.php' ); /** * Implement the Custom Header functions */ require( <API key>() . '/inc/custom-header.php' ); /* Make Minileven available for translation. * Translations can be added to the /languages/ directory. * If you're building a theme based on Minileven, use a find and replace * to change 'minileven' to the name of your theme in all the template files. */ /* Don't load a minileven textdomain, as it uses the Jetpack textdomain. <API key>( 'minileven', <API key>() . '/languages' ); */ // Add default posts and comments RSS feed links to <head>. add_theme_support( '<API key>' ); // This theme uses wp_nav_menu() in one location. register_nav_menu( 'primary', __( 'Primary Menu', 'jetpack' ) ); // Add support for a variety of post formats add_theme_support( 'post-formats', array( 'gallery' ) ); // Add support for custom backgrounds add_theme_support( 'custom-background' ); // Add support for post thumbnails add_theme_support( 'post-thumbnails' ); } endif; // minileven_setup /** * Enqueue scripts and styles */ function minileven_scripts() { global $post; wp_enqueue_style( 'style', get_stylesheet_uri() ); wp_enqueue_script( 'small-menu', <API key>() . '/js/small-menu.js', array( 'jquery' ), '20120206', true ); if ( is_singular() && comments_open() && get_option( 'thread_comments' ) ) { wp_enqueue_script( 'comment-reply' ); } } add_action( 'wp_enqueue_scripts', 'minileven_scripts' ); function minileven_fonts() { /* translators: If there are characters in your language that are not supported by Open Sans, translate this to 'off'. Do not translate into your own language. */ if ( 'off' !== _x( 'on', 'Open Sans font: on or off', 'jetpack' ) ) { $opensans_subsets = 'latin,latin-ext'; /* translators: To add an additional Open Sans character subset specific to your language, translate this to 'greek', 'cyrillic' or 'vietnamese'. Do not translate into your own language. */ $opensans_subset = _x( 'no-subset', 'Open Sans font: add new subset (greek, cyrillic, vietnamese)', 'jetpack' ); if ( 'cyrillic' == $opensans_subset ) $opensans_subsets .= ',cyrillic,cyrillic-ext'; elseif ( 'greek' == $opensans_subset ) $opensans_subsets .= ',greek,greek-ext'; elseif ( 'vietnamese' == $opensans_subset ) $opensans_subsets .= ',vietnamese'; $opensans_query_args = array( 'family' => 'Open+Sans:200,200italic,300,300italic,400,400italic,600,600italic,700,700italic', 'subset' => $opensans_subsets, ); wp_register_style( 'minileven-open-sans', add_query_arg( $opensans_query_args, "//fonts.googleapis.com/css" ), array(), null ); } } add_action( 'init', 'minileven_fonts' ); /** * Register our sidebars and widgetized areas. * @since Minileven 1.0 */ function <API key>() { register_sidebar( array( 'name' => __( 'Main Sidebar', 'jetpack' ), 'id' => 'sidebar-1', 'before_widget' => '<aside id="%1$s" class="widget %2$s">', 'after_widget' => "</aside>", 'before_title' => '<h3 class="widget-title">', 'after_title' => '</h3>', ) ); } add_action( 'widgets_init', '<API key>' ); function <API key>() { return 5; } add_filter('<API key>', '<API key>'); /** * Determine the currently active theme. */ function <API key>() { $removed = remove_action( 'option_stylesheet', '<API key>' ); $stylesheet = get_option( 'stylesheet' ); if ( $removed ) add_action( 'option_stylesheet', '<API key>' ); return $stylesheet; } /* This function grabs the location of the custom menus from the current theme. If no menu is set in a location * it will return a boolean "false". This function helps Minileven know which custom menu to display. */ function <API key>() { $theme_slug = <API key>(); $mods = get_option( "theme_mods_{$theme_slug}" ); if ( has_filter( '<API key>' ) ) { /** * Filter the menu displayed in the Mobile Theme. * * @since 3.4.0 * * @param int $menu_id ID of the menu to display. */ return array( 'primary' => apply_filters( '<API key>', $menu_id ) ); } if ( isset( $mods['nav_menu_locations'] ) && ! empty( $mods['nav_menu_locations'] ) ) return $mods['nav_menu_locations']; return false; } /* This function grabs the custom background image from the user's current theme so that Minileven can display it. */ function <API key>() { $theme_slug = <API key>(); $mods = get_option( "theme_mods_$theme_slug" ); if ( ! empty( $mods ) ) { return array( 'color' => isset( $mods['background_color'] ) ? $mods['background_color'] : null, 'image' => isset( $mods['background_image'] ) ? $mods['background_image'] : null, 'repeat' => isset( $mods['background_repeat'] ) ? $mods['background_repeat'] : null, 'position' => isset( $mods['<API key>'] ) ? $mods['<API key>'] : null, 'attachment' => isset( $mods['attachment'] ) ? $mods['attachment'] : null, ); } return false; } /** * If the user has set a static front page, show all posts on the front page, instead of a static page. */ if ( '1' == get_option( '<API key>' ) ) add_filter( '<API key>', '__return_zero' ); /** * Retrieves the IDs for images in a gallery. * * @uses get_post_galleries() first, if available. Falls back to shortcode parsing, * then as last option uses a get_posts() call. * * @return array List of image IDs from the post gallery. */ function <API key>() { $images = array(); if ( function_exists( 'get_post_galleries' ) ) { $galleries = get_post_galleries( get_the_ID(), false ); if ( isset( $galleries[0]['ids'] ) ) $images = explode( ',', $galleries[0]['ids'] ); } else { $pattern = get_shortcode_regex(); preg_match( "/$pattern/s", get_the_content(), $match ); $atts = <API key>( $match[3] ); if ( isset( $atts['ids'] ) ) $images = explode( ',', $atts['ids'] ); } if ( ! $images ) { $images = get_posts( array( 'fields' => 'ids', 'numberposts' => 999, 'order' => 'ASC', 'orderby' => 'menu_order', 'post_mime_type' => 'image', 'post_parent' => get_the_ID(), 'post_type' => 'attachment', ) ); } return $images; } /** * Allow plugins to filter where Featured Images are displayed. * Default has Featured Images disabled on single view and pages. * * @uses is_search() * @uses apply_filters() * @return bool */ function <API key>() { $enabled = ( is_home() || is_search() || is_archive() ) ? true : false; /** * Filter where featured images are displayed in the Mobile Theme. * * By setting $enabled to true or false using functions like is_home() or * is_archive(), you can control where featured images are be displayed. * * @since 3.2.0 * * @param bool $enabled True if featured images should be displayed, false if not. */ return (bool) apply_filters( '<API key>', $enabled ); }
<html> <head> <META http-equiv="Content-Type" content="text/html; charset=iso-8859-1"> <title> CIP JavaScript documentation for sequence " pfcPoint3D" </title> <link href="cipdoc_default.css" type="text/css" rel="stylesheet"> <script language="JAVASCRIPT"> function findProperNode () { top.frames [0].document.apiwizard.api ( 2, 410 ); } </script> </head> <body onLoad="findProperNode()" class="frame-entity"> <table cellspacing="0" width="100%" border="0" class="toolbar"> <tr> <td class="toolbar-navigation"><a href="library.html">Library</a>&nbsp;|&nbsp;<a href="m-pfcBase.html">Module</a></td><td class="toolbar-lib-name"> </td> </tr> </table> <hr> <h2>Class&nbsp;pfcPoint3D</h2> <hr> <br> <b>Description</b> <br> <br>A 3x1 array that stores a three-dimensional point. <br> <br>Array provides methods for accessing its elemnts by index(indices) and for modifying its elements. Array object "does not know" its dimentions.<br> <br> <hr> <br> <br> <b>Array Dimensions: [3] </b> <br> <hr> <br> <b>Method Summary</b> <br> <br> <table cellpadding="3" border="0" class="method-summary"> <tr> <td></td> </tr> <tr> <td class="return-type"><tt>number</tt></td><td class="func-signature"><tt><a class="method" href="#Item">Item</a> ( integer&nbsp;Index ) </tt></td> </tr> <tr> <td></td><td colspan="2" class="entities-text">Accesses an item by its index in the sequence.</td> </tr> <tr> <td class="return-type"><tt>void</tt></td><td class="func-signature"><tt><a class="method" href="#Set">Set</a> ( integer&nbsp;Index ,&nbsp;number&nbsp;Item ) </tt></td> </tr> <tr> <td></td><td colspan="2" class="entities-text">Assigns an item to the designated index in the sequence.</td> </tr> <tr> <td></td> </tr> </table> <br> <hr> <br> <b>Method Detail</b> <br> <hr> <br> <a name="Item"> <table cellpadding="5" cellspacing="0" border="0" class="method-def"> <tr> <td class="return-type"><tt>number</tt></td><td class="func-name"><tt>Item</tt></td><td class="func-params"><tt> ( integer&nbsp;Index ) </tt></td> </tr> </table> </a> <br>Accesses an item by its index in the sequence.<br> <br> <hr> <br> <a name="Set"> <table cellpadding="5" cellspacing="0" border="0" class="method-def"> <tr> <td class="return-type"><tt>void</tt></td><td class="func-name"><tt>Set</tt></td><td class="func-params"><tt> ( integer&nbsp;Index ,&nbsp;number&nbsp;Item ) </tt></td> </tr> </table> </a> <br>Assigns an item to the designated index in the sequence.</body> </html>
#ifndef _SOM_H #define _SOM_H #include "libhppa.h" /* We want reloc.h to provide PA 2.0 defines. */ #define PA_2_0 #include "som/aout.h" #include "som/lst.h" #include "som/internal.h" /* The SOM BFD backend doesn't currently use anything from these two include files, but it's likely to need them in the future. */ #ifdef R_DLT_REL #include <shl.h> #include <dl.h> #endif #if defined (HOST_HPPABSD) || defined (HOST_HPPAOSF) /* BSD uses a completely different scheme for object file identification. so for now, define _PA_RISC_ID to accept any random value for a model number. */ #undef _PA_RISC_ID #define _PA_RISC_ID(__m_num) 1 #endif /* HOST_HPPABSD */ typedef struct som_symbol { asymbol symbol; unsigned int som_type; /* Structured like the ELF tc_data union. Allows more code sharing in GAS this way. */ union { struct { unsigned int hppa_arg_reloc; unsigned int hppa_priv_level; } ap; void * any; } tc_data; /* Index of this symbol in the symbol table. Only used when building relocation streams for incomplete objects. */ int index; /* How many times this symbol is used in a relocation. By sorting the symbols from most used to least used we can significantly reduce the size of the relocation stream for incomplete objects. */ int reloc_count; /* During object file writing, the offset of the name of this symbol in the SOM string table. */ int stringtab_offset; } som_symbol_type; /* A structure containing all the magic information stored in a BFD's private data which needs to be copied during an objcopy/strip run. */ struct som_exec_data { /* Sort-of a magic number. BSD uses it to distinguish between native executables and hpux executables. */ short system_id; /* Magic exec flags. These control things like whether or not null pointer dereferencing is allowed and the like. */ long exec_flags; /* We must preserve the version identifier too. Some versions of the HP linker do not grok NEW_VERSION_ID for reasons unknown. */ unsigned int version_id; /* Add more stuff here as needed. Good examples of information we might want to pass would be presumed_dp, entry_* and maybe others from the file header. */ }; struct somdata { /* All the magic information about an executable which lives in the private BFD structure and needs to be copied from the input bfd to the output bfd during an objcopy/strip. */ struct som_exec_data *exec_data; /* These three fields are only used when writing files and are generated from scratch. They need not be copied for objcopy or strip to work. */ struct som_header *file_hdr; struct som_string_auxhdr *copyright_aux_hdr; struct som_string_auxhdr *version_aux_hdr; struct som_exec_auxhdr *exec_hdr; struct <API key> *comp_unit; /* Pointers to a saved copy of the symbol and string tables. These need not be copied for objcopy or strip to work. */ som_symbol_type *symtab; char *stringtab; asymbol **sorted_syms; /* We remember these offsets so that after check_file_format, we have no dependencies on the particular format of the exec_hdr. These offsets need not be copied for objcopy or strip to work. */ file_ptr sym_filepos; file_ptr str_filepos; file_ptr reloc_filepos; unsigned stringtab_size; void * line_info; }; struct som_data_struct { struct somdata a; }; /* Substructure of <API key> used to hold information which can't be represented by the generic BFD section structure, but which must be copied during objcopy or strip. */ struct <API key> { /* Various fields in space and subspace headers that we need to pass around. */ unsigned int sort_key : 8; unsigned int access_control_bits : 7; unsigned int is_defined : 1; unsigned int is_private : 1; unsigned int quadrant : 2; unsigned int is_comdat : 1; unsigned int is_common : 1; unsigned int dup_common : 1; /* For subspaces, this points to the section which represents the space in which the subspace is contained. For spaces it points back to the section for this space. */ asection *container; /* The user-specified space number. It is wrong to use this as an index since duplicates and holes are allowed. */ int space_number; /* Add more stuff here as needed. Good examples of information we might want to pass would be initialization pointers, and the many subspace flags we do not represent yet. */ }; /* Used to keep extra SOM specific information for a given section. reloc_size holds the size of the relocation stream, note this is very different from the number of relocations as SOM relocations are variable length. reloc_stream is the actual stream of relocation entries. */ struct <API key> { struct <API key> *copy_data; unsigned int reloc_size; unsigned char *reloc_stream; struct <API key> *space_dict; struct <API key> *subspace_dict; }; #define somdata(bfd) ((bfd)->tdata.som_data->a) #define obj_som_exec_data(bfd) (somdata (bfd).exec_data) #define obj_som_file_hdr(bfd) (somdata (bfd).file_hdr) #define obj_som_exec_hdr(bfd) (somdata (bfd).exec_hdr) #define <API key>(bfd) (somdata (bfd).copyright_aux_hdr) #define obj_som_version_hdr(bfd) (somdata (bfd).version_aux_hdr) #define <API key>(bfd) (somdata (bfd).comp_unit) #define obj_som_symtab(bfd) (somdata (bfd).symtab) #define obj_som_stringtab(bfd) (somdata (bfd).stringtab) #define obj_som_sym_filepos(bfd) (somdata (bfd).sym_filepos) #define obj_som_str_filepos(bfd) (somdata (bfd).str_filepos) #define <API key>(bfd) (somdata (bfd).stringtab_size) #define <API key>(bfd) (somdata (bfd).reloc_filepos) #define obj_som_sorted_syms(bfd) (somdata (bfd).sorted_syms) #define som_section_data(sec) ((struct <API key> *) sec->used_by_bfd) #define som_symbol_data(symbol) ((som_symbol_type *) symbol) #define R_HPPA_NONE R_NO_RELOCATION #define R_HPPA R_CODE_ONE_SYMBOL #define R_HPPA_PCREL_CALL R_PCREL_CALL #define R_HPPA_ABS_CALL R_ABS_CALL #define R_HPPA_GOTOFF R_DP_RELATIVE #define R_HPPA_ENTRY R_ENTRY #define R_HPPA_EXIT R_EXIT #define R_HPPA_COMPLEX R_COMP1 #define R_HPPA_BEGIN_BRTAB R_BEGIN_BRTAB #define R_HPPA_END_BRTAB R_END_BRTAB #define R_HPPA_BEGIN_TRY R_BEGIN_TRY #define R_HPPA_END_TRY R_END_TRY /* Exported functions, mostly for use by GAS. */ bfd_boolean <API key> (asection *, int, int, unsigned int, int); bfd_boolean <API key> (asection *, asection *, int, unsigned int, int, int, int, int); void <API key> (asymbol *, unsigned int); bfd_boolean <API key> (bfd *, int, char *); int ** <API key> (bfd *, int, int, enum <API key>, int, asymbol *); bfd_boolean <API key> (bfd *, const char *, const char *, const char *, const char *); #endif /* _SOM_H */
<?php defined('_JEXEC') or die; /** * HTML View class for the Newsfeeds component * * @package Joomla.Site * @subpackage com_newsfeeds * @since 1.0 */ class <API key> extends JViewLegacy { /** * @var object * @since 1.6 */ protected $state; /** * @var object * @since 1.6 */ protected $item; /** * @var boolean * @since 1.6 */ protected $print; /** * @since 1.6 */ public function display($tpl = null) { $app = JFactory::getApplication(); $user = JFactory::getUser(); // Get view related request variables. $print = $app->input->getBool('print'); // Get model data. $state = $this->get('State'); $item = $this->get('Item'); if ($item) { // Get Category Model data $categoryModel = JModelLegacy::getInstance('Category', 'NewsfeedsModel', array('ignore_request' => true)); $categoryModel->setState('category.id', $item->catid); $categoryModel->setState('list.ordering', 'a.name'); $categoryModel->setState('list.direction', 'asc'); // TODO: $items is not used. Remove this line? $items = $categoryModel->getItems(); } // Check for errors. // @TODO Maybe this could go into JComponentHelper::raiseErrors($this->get('Errors')) if (count($errors = $this->get('Errors'))) { JError::raiseWarning(500, implode("\n", $errors)); return false; } // Add router helpers. $item->slug = $item->alias ? ($item->id . ':' . $item->alias) : $item->id; $item->catslug = $item->category_alias ? ($item->catid . ':' . $item->category_alias) : $item->catid; $item->parent_slug = $item->category_alias ? ($item->parent_id . ':' . $item->parent_alias) : $item->parent_id; // check if cache directory is writeable $cacheDir = JPATH_CACHE . '/'; if (!is_writable($cacheDir)) { JError::raiseNotice('0', JText::_('<API key>')); return; } // Merge newsfeed params. If this is single-newsfeed view, menu params override newsfeed params // Otherwise, newsfeed params override menu item params $params = $state->get('params'); $newsfeed_params = clone $item->params; $active = $app->getMenu()->getActive(); $temp = clone ($params); // Check to see which parameters should take priority if ($active) { $currentLink = $active->link; // If the current view is the active item and an newsfeed view for this feed, then the menu item params take priority if (strpos($currentLink, 'view=newsfeed') && (strpos($currentLink, '&id='.(string) $item->id))) { // $item->params are the newsfeed params, $temp are the menu item params // Merge so that the menu item params take priority $newsfeed_params->merge($temp); $item->params = $newsfeed_params; // Load layout from active query (in case it is an alternative menu item) if (isset($active->query['layout'])) { $this->setLayout($active->query['layout']); } } else { // Current view is not a single newsfeed, so the newsfeed params take priority here // Merge the menu item params with the newsfeed params so that the newsfeed params take priority $temp->merge($newsfeed_params); $item->params = $temp; // Check for alternative layouts (since we are not in a single-newsfeed menu item) if ($layout = $item->params->get('newsfeed_layout')) { $this->setLayout($layout); } } } else { // Merge so that newsfeed params take priority $temp->merge($newsfeed_params); $item->params = $temp; // Check for alternative layouts (since we are not in a single-newsfeed menu item) if ($layout = $item->params->get('newsfeed_layout')) { $this->setLayout($layout); } } // Check the access to the newsfeed $levels = $user->get<API key>(); if (!in_array($item->access, $levels) or ((in_array($item->access, $levels) and (!in_array($item->category_access, $levels))))) { JError::raiseWarning(403, JText::_('<API key>')); return; } // Get the current menu item $params = $app->getParams(); // Get the newsfeed $newsfeed = $item; $temp = new JRegistry; $temp->loadString($item->params); $params->merge($temp); try { $feed = new JFeedFactory; $this->rssDoc = $feed->getFeed($newsfeed->link); } catch (<API key> $e) { $msg = JText::_('<API key>'); } catch (RunTimeException $e) { $msg = JText::_('<API key>'); } if (empty($this->rssDoc)) { $msg = JText::_('<API key>'); } $feed_display_order = $params->get('feed_display_order', 'des'); if ($feed_display_order == 'asc') { $newsfeed->items = array_reverse($newsfeed->items); } //Escape strings for HTML output $this->pageclass_sfx = htmlspecialchars($params->get('pageclass_sfx')); $this->assignRef('params', $params); $this->assignRef('newsfeed', $newsfeed); $this->assignRef('state', $state); $this->assignRef('item', $item); $this->assignRef('user', $user); if (!empty($msg)) { $this->assignRef('msg', $msg); } $this->print = $print; $item->tags = new JHelperTags; $item->tags->getItemTags('com_newsfeeds.newsfeed', $item->id); $this->_prepareDocument(); parent::display($tpl); } /** * Prepares the document * * @return void * @since 1.6 */ protected function _prepareDocument() { $app = JFactory::getApplication(); $menus = $app->getMenu(); $pathway = $app->getPathway(); $title = null; // Because the application sets a default page title, // we need to get it from the menu item itself $menu = $menus->getActive(); if ($menu) { $this->params->def('page_heading', $this->params->get('page_title', $menu->title)); } else { $this->params->def('page_heading', JText::_('<API key>')); } $title = $this->params->get('page_title', ''); $id = (int) @$menu->query['id']; // if the menu item does not concern this newsfeed if ($menu && ($menu->query['option'] != 'com_newsfeeds' || $menu->query['view'] != 'newsfeed' || $id != $this->item->id)) { // If this is not a single newsfeed menu item, set the page title to the newsfeed title if ($this->item->name) { $title = $this->item->name; } $path = array(array('title' => $this->item->name, 'link' => '')); $category = JCategories::getInstance('Newsfeeds')->get($this->item->catid); while (($menu->query['option'] != 'com_newsfeeds' || $menu->query['view'] == 'newsfeed' || $id != $category->id) && $category->id > 1) { $path[] = array('title' => $category->title, 'link' => <API key>::getCategoryRoute($category->id)); $category = $category->getParent(); } $path = array_reverse($path); foreach ($path as $item) { $pathway->addItem($item['title'], $item['link']); } } if (empty($title)) { $title = $app->getCfg('sitename'); } elseif ($app->getCfg('sitename_pagetitles', 0) == 1) { $title = JText::sprintf('JPAGETITLE', $app->getCfg('sitename'), $title); } elseif ($app->getCfg('sitename_pagetitles', 0) == 2) { $title = JText::sprintf('JPAGETITLE', $title, $app->getCfg('sitename')); } if (empty($title)) { $title = $this->item->name; } $this->document->setTitle($title); if ($this->item->metadesc) { $this->document->setDescription($this->item->metadesc); } elseif (!$this->item->metadesc && $this->params->get('<API key>')) { $this->document->setDescription($this->params->get('<API key>')); } if ($this->item->metakey) { $this->document->setMetadata('keywords', $this->item->metakey); } elseif (!$this->item->metakey && $this->params->get('menu-meta_keywords')) { $this->document->setMetadata('keywords', $this->params->get('menu-meta_keywords')); } if ($this->params->get('robots')) { $this->document->setMetadata('robots', $this->params->get('robots')); } if ($app->getCfg('MetaTitle') == '1') { $this->document->setMetaData('title', $this->item->name); } if ($app->getCfg('MetaAuthor') == '1') { $this->document->setMetaData('author', $this->item->author); } $mdata = $this->item->metadata->toArray(); foreach ($mdata as $k => $v) { if ($v) { $this->document->setMetadata($k, $v); } } } }
// file at the top-level directory of this distribution and at // option. This file may not be copied, modified, or distributed // except according to those terms. type an_int = int; fn cmp(x: Option<an_int>, y: Option<int>) -> bool { x == y } pub fn main() { assert!(!cmp(Some(3), None)); assert!(!cmp(Some(3), Some(4))); assert!(cmp(Some(3), Some(3))); assert!(cmp(None, None)); }
cask 'midi-monitor' do version '1.3.2' sha256 '<SHA256-like>' url "https://www.snoize.com/MIDIMonitor/MIDIMonitor_#{version.dots_to_underscores}.zip" appcast 'https: checkpoint: '<SHA256-like>' name 'MIDI Monitor' homepage 'https: depends_on macos: '>= :lion' app 'MIDI Monitor.app' uninstall quit: [ 'com.snoize.MIDIMonitor', 'com.snoize.MIDIMonitorDriver', 'com.snoize.MIDISpyFramework', 'com.snoize.SnoizeMIDI', ] zap delete: [ '~/Library/Preferences/com.snoize.MIDIMonitor.plist', '~/Library/Caches/com.snoize.MIDIMonitor', '~/Library/Saved Application State/com.snoize.MIDIMonitor.savedState', ] end
layout: docs title: Plugins permalink: /docs/plugins/ Jekyll has a plugin system with hooks that allow you to create custom generated content specific to your site. You can run custom code for your site without having to modify the Jekyll source itself. <div class="note info"> <h5>Plugins on GitHub Pages</h5> <p> <a href="http://pages.github.com/">GitHub Pages</a> is powered by Jekyll. However, all Pages sites are generated using the <code>--safe</code> option to disable custom plugins for security reasons. Unfortunately, this means your plugins won’t work if you’re deploying to GitHub Pages.<br><br> You can still use GitHub Pages to publish your site, but you’ll need to convert the site locally and push the generated static files to your GitHub repository instead of the Jekyll source files. </p> </div> ## Installing a plugin You have 3 options for installing plugins: 1. In your site source root, make a `_plugins` directory. Place your plugins here. Any file ending in `*.rb` inside this directory will be loaded before Jekyll generates your site. 2. In your `_config.yml` file, add a new array with the key `gems` and the values of the gem names of the plugins you'd like to use. An example: gems: [jekyll-test-plugin, jekyll-jsonify, jekyll-assets] # This will require each of these gems automatically. 3. Add the relevant plugins to a Bundler group in your `Gemfile`. An example: group :jekyll_plugins do gem "my-jekyll-plugin" end <div class="note info"> <h5> <code>_plugins</code>, <code>_config.yml</code> and <code>Gemfile</code> can be used simultaneously </h5> <p> You may use any of the aforementioned plugin options simultaneously in the same site if you so choose. Use of one does not restrict the use of the others. </p> </div> In general, plugins you make will fall into one of four categories: 1. [Generators](#generators) 2. [Converters](#converters) 3. [Commands](#commands) 4. [Tags](#tags) ## Generators You can create a generator when you need Jekyll to create additional content based on your own rules. A generator is a subclass of `Jekyll::Generator` that defines a `generate` method, which receives an instance of [`Jekyll::Site`]({{ site.repository }}/blob/master/lib/jekyll/site.rb). The return value of `generate` is ignored. Generators run after Jekyll has made an inventory of the existing content, and before the site is generated. Pages with YAML Front Matters are stored as instances of [`Jekyll::Page`]({{ site.repository }}/blob/master/lib/jekyll/page.rb) and are available via `site.pages`. Static files become instances of [`Jekyll::StaticFile`]({{ site.repository }}/blob/master/lib/jekyll/static_file.rb) and are available via `site.static_files`. See [the Variables documentation page](/docs/variables/) and [`Jekyll::Site`]({{ site.repository }}/blob/master/lib/jekyll/site.rb) for more details. For instance, a generator can inject values computed at build time for template variables. In the following example the template `reading.html` has two variables `ongoing` and `done` that we fill in the generator: {% highlight ruby %} module Reading class Generator < Jekyll::Generator def generate(site) ongoing, done = Book.all.partition(&:ongoing?) reading = site.pages.detect {|page| page.name == 'reading.html'} reading.data['ongoing'] = ongoing reading.data['done'] = done end end end {% endhighlight %} This is a more complex generator that generates new pages: {% highlight ruby %} module Jekyll class CategoryPage < Page def initialize(site, base, dir, category) @site = site @base = base @dir = dir @name = 'index.html' self.process(@name) self.read_yaml(File.join(base, '_layouts'), 'category_index.html') self.data['category'] = category <API key> = site.config['<API key>'] || 'Category: ' self.data['title'] = "#{<API key>}#{category}" end end class <API key> < Generator safe true def generate(site) if site.layouts.key? 'category_index' dir = site.config['category_dir'] || 'categories' site.categories.each_key do |category| site.pages << CategoryPage.new(site, site.source, File.join(dir, category), category) end end end end end {% endhighlight %} In this example, our generator will create a series of files under the `categories` directory for each category, listing the posts in each category using the `category_index.html` layout. Generators are only required to implement one method: <div class="<API key>"> <table> <thead> <tr> <th>Method</th> <th>Description</th> </tr> </thead> <tbody> <tr> <td> <p><code>generate</code></p> </td> <td> <p>Generates content as a side-effect.</p> </td> </tr> </tbody> </table> </div> ## Converters If you have a new markup language you’d like to use with your site, you can include it by implementing your own converter. Both the Markdown and [Textile](https://github.com/jekyll/<API key>) markup languages are implemented using this method. <div class="note info"> <h5>Remember your YAML Front Matter</h5> <p> Jekyll will only convert files that have a YAML header at the top, even for converters you add using a plugin. </p> </div> Below is a converter that will take all posts ending in `.upcase` and process them using the `UpcaseConverter`: {% highlight ruby %} module Jekyll class UpcaseConverter < Converter safe true priority :low def matches(ext) ext =~ /^\.upcase$/i end def output_ext(ext) ".html" end def convert(content) content.upcase end end end {% endhighlight %} Converters should implement at a minimum 3 methods: <div class="<API key>"> <table> <thead> <tr> <th>Method</th> <th>Description</th> </tr> </thead> <tbody> <tr> <td> <p><code>matches</code></p> </td> <td><p> Does the given extension match this converter’s list of acceptable extensions? Takes one argument: the file’s extension (including the dot). Must return <code>true</code> if it matches, <code>false</code> otherwise. </p></td> </tr> <tr> <td> <p><code>output_ext</code></p> </td> <td><p> The extension to be given to the output file (including the dot). Usually this will be <code>".html"</code>. </p></td> </tr> <tr> <td> <p><code>convert</code></p> </td> <td><p> Logic to do the content conversion. Takes one argument: the raw content of the file (without YAML Front Matter). Must return a String. </p></td> </tr> </tbody> </table> </div> In our example, `UpcaseConverter#matches` checks if our filename extension is `.upcase`, and will render using the converter if it is. It will call `UpcaseConverter simply uppercasing the entire content string. Finally, when it saves the page, it will do so with a `.html` extension. ## Commands As of version 2.5.0, Jekyll can be extended with plugins which provide subcommands for the `jekyll` executable. This is possible by including the relevant plugins in a `Gemfile` group called `:jekyll_plugins`: {% highlight ruby %} group :jekyll_plugins do gem "<API key>" end {% endhighlight %} Each `Command` must be a subclass of the `Jekyll::Command` class and must contain one class method: `init_with_program`. An example: {% highlight ruby %} class MyNewCommand < Jekyll::Command class << self def init_with_program(prog) prog.command(:new) do |c| c.syntax "new [options]" c.description 'Create a new Jekyll site.' c.option 'dest', '-d DEST', 'Where the site should go.' c.action do |args, options| Jekyll::Site.new_site_at(options['dest']) end end end end end {% endhighlight %} Commands should implement this single class method: <div class="<API key>"> <table> <thead> <tr> <th>Method</th> <th>Description</th> </tr> </thead> <tbody> <tr> <td> <p><code>init_with_program</code></p> </td> <td><p> This method accepts one parameter, the <code><a href="http://github.com/jekyll/mercenary#readme">Mercenary::Program</a></code> instance, which is the Jekyll program itself. Upon the program, commands may be created using the above syntax. For more details, visit the Mercenary repository on GitHub.com. </p></td> </tr> </tbody> </table> </div> ## Tags If you’d like to include custom liquid tags in your site, you can do so by hooking into the tagging system. Built-in examples added by Jekyll include the `highlight` and `include` tags. Below is an example of a custom liquid tag that will output the time the page was rendered: {% highlight ruby %} module Jekyll class RenderTimeTag < Liquid::Tag def initialize(tag_name, text, tokens) super @text = text end def render(context) "#{@text} #{Time.now}" end end end Liquid::Template.register_tag('render_time', Jekyll::RenderTimeTag) {% endhighlight %} At a minimum, liquid tags must implement: <div class="<API key>"> <table> <thead> <tr> <th>Method</th> <th>Description</th> </tr> </thead> <tbody> <tr> <td> <p><code>render</code></p> </td> <td> <p>Outputs the content of the tag.</p> </td> </tr> </tbody> </table> </div> You must also register the custom tag with the Liquid template engine as follows: {% highlight ruby %} Liquid::Template.register_tag('render_time', Jekyll::RenderTimeTag) {% endhighlight %} In the example above, we can place the following tag anywhere in one of our pages: {% highlight ruby %} {% raw %} <p>{% render_time page rendered at: %}</p> {% endraw %} {% endhighlight %} And we would get something like this on the page: {% highlight html %} <p>page rendered at: Tue June 22 23:38:47 –0500 2010</p> {% endhighlight %} Liquid filters You can add your own filters to the Liquid template system much like you can add tags above. Filters are simply modules that export their methods to liquid. All methods will have to take at least one parameter which represents the input of the filter. The return value will be the output of the filter. {% highlight ruby %} module Jekyll module AssetFilter def asset_url(input) "http://www.example.com/#{input}?#{Time.now.to_i}" end end end Liquid::Template.register_filter(Jekyll::AssetFilter) {% endhighlight %} <div class="note"> <h5>ProTip™: Access the site object using Liquid</h5> <p> Jekyll lets you access the <code>site</code> object through the <code>context.registers</code> feature of Liquid at <code>context.registers[:site]</code>. For example, you can access the global configuration file <code>_config.yml</code> using <code>context.registers[:site].config</code>. </p> </div> Flags There are two flags to be aware of when writing a plugin: <div class="<API key>"> <table> <thead> <tr> <th>Flag</th> <th>Description</th> </tr> </thead> <tbody> <tr> <td> <p><code>safe</code></p> </td> <td> <p> A boolean flag that informs Jekyll whether this plugin may be safely executed in an environment where arbitrary code execution is not allowed. This is used by GitHub Pages to determine which core plugins may be used, and which are unsafe to run. If your plugin does not allow for arbitrary code execution, set this to <code>true</code>. GitHub Pages still won’t load your plugin, but if you submit it for inclusion in core, it’s best for this to be correct! </p> </td> </tr> <tr> <td> <p><code>priority</code></p> </td> <td> <p> This flag determines what order the plugin is loaded in. Valid values are: <code>:lowest</code>, <code>:low</code>, <code>:normal</code>, <code>:high</code>, and <code>:highest</code>. Highest priority matches are applied first, lowest priority are applied last. </p> </td> </tr> </tbody> </table> </div> To use one of the example plugins above as an illustration, here is how you’d specify these two flags: {% highlight ruby %} module Jekyll class UpcaseConverter < Converter safe true priority :low end end {% endhighlight %} ## Hooks <div class="note unreleased"> <h5>Support for hooks is currently unreleased.</h5> <p> In order to use this feature, <a href="/docs/installation/#pre-releases"> install the latest development version of Jekyll</a>. </p> </div> Using hooks, your plugin can exercise fine-grained control over various aspects of the build process. If your plugin defines any hooks, Jekyll will call them at pre-defined points. Hooks are registered to a container and an event name. To register one, you call Jekyll::Hooks.register, and pass the container, event name, and code to call whenever the hook is triggered. For example, if you want to execute some custom functionality every time Jekyll renders a post, you could register a hook like this: {% highlight ruby %} Jekyll::Hooks.register :post, :post_render do |post| # code to call after Jekyll renders a post end {% endhighlight %} Jekyll provides hooks for <code>:site</code>, <code>:page</code>, <code>:post</code>, and <code>:document</code>. In all cases, Jekyll calls your hooks with the container object as the first callback parameter. But in the case of <code>:pre_render</code>, your hook will also receive a payload hash as a second parameter which allows you full control over the variables that are available while rendering. The complete list of available hooks is below: <div class="<API key>"> <table> <thead> <tr> <th>Container</th> <th>Event</th> <th>Called</th> </tr> </thead> <tbody> <tr> <td> <p><code>:site</code></p> </td> <td> <p><code>:after_reset</code></p> </td> <td> <p>Just after site reset</p> </td> </tr> <tr> <td> <p><code>:site</code></p> </td> <td> <p><code>:pre_render</code></p> </td> <td> <p>Just before rendering the whole site</p> </td> </tr> <tr> <td> <p><code>:site</code></p> </td> <td> <p><code>:post_render</code></p> </td> <td> <p>After rendering the whole site, but before writing any files</p> </td> </tr> <tr> <td> <p><code>:site</code></p> </td> <td> <p><code>:post_write</code></p> </td> <td> <p>After writing the whole site to disk</p> </td> </tr> <tr> <td> <p><code>:page</code></p> </td> <td> <p><code>:post_init</code></p> </td> <td> <p>Whenever a page is initialized</p> </td> </tr> <tr> <td> <p><code>:page</code></p> </td> <td> <p><code>:pre_render</code></p> </td> <td> <p>Just before rendering a page</p> </td> </tr> <tr> <td> <p><code>:page</code></p> </td> <td> <p><code>:post_render</code></p> </td> <td> <p>After rendering a page, but before writing it to disk</p> </td> </tr> <tr> <td> <p><code>:page</code></p> </td> <td> <p><code>:post_write</code></p> </td> <td> <p>After writing a page to disk</p> </td> </tr> <tr> <td> <p><code>:post</code></p> </td> <td> <p><code>:post_init</code></p> </td> <td> <p>Whenever a post is initialized</p> </td> </tr> <tr> <td> <p><code>:post</code></p> </td> <td> <p><code>:pre_render</code></p> </td> <td> <p>Just before rendering a post</p> </td> </tr> <tr> <td> <p><code>:post</code></p> </td> <td> <p><code>:post_render</code></p> </td> <td> <p>After rendering a post, but before writing it to disk</p> </td> </tr> <tr> <td> <p><code>:post</code></p> </td> <td> <p><code>:post_write</code></p> </td> <td> <p>After writing a post to disk</p> </td> </tr> <tr> <td> <p><code>:document</code></p> </td> <td> <p><code>:pre_render</code></p> </td> <td> <p>Just before rendering a document</p> </td> </tr> <tr> <td> <p><code>:document</code></p> </td> <td> <p><code>:post_render</code></p> </td> <td> <p>After rendering a document, but before writing it to disk</p> </td> </tr> <tr> <td> <p><code>:document</code></p> </td> <td> <p><code>:post_write</code></p> </td> <td> <p>After writing a document to disk</p> </td> </tr> </tbody> </table> </div> ## Available Plugins You can find a few useful plugins at the following locations: # Generators - [ArchiveGenerator by Ilkka Laukkanen](https: - [LESS.js Generator by Andy Fowler](https://gist.github.com/642739): Renders LESS.js files during generation. - [Version Reporter by Blake Smith](https://gist.github.com/449491): Creates a version.html file containing the Jekyll version. - [Sitemap.xml Generator by Michael Levin](https://github.com/kinnetica/jekyll-plugins): Generates a sitemap.xml file by traversing all of the available posts and pages. - [Full-text search by Pascal Widdershoven](https://github.com/PascalW/jekyll_indextank): Adds full-text search to your Jekyll site with a plugin and a bit of JavaScript. - [AliasGenerator by Thomas Mango](https://github.com/tsmango/<API key>): Generates redirect pages for posts when an alias is specified in the YAML Front Matter. - [Pageless Redirect Generator by Nick Quinlan](https://github.com/nquinlan/<API key>): Generates redirects based on files in the Jekyll root, with support for htaccess style redirects. - [RssGenerator by Assaf Gelber](https://github.com/agelber/jekyll-rss): Automatically creates an RSS 2.0 feed from your posts. - [Monthly archive generator by Shigeya Suzuki](https://github.com/shigeya/<API key>): Generator and template which renders monthly archive like MovableType style, based on the work by Ilkka Laukkanen and others above. - [Category archive generator by Shigeya Suzuki](https://github.com/shigeya/<API key>): Generator and template which renders category archive like MovableType style, based on Monthly archive generator. - [Emoji for Jekyll](https://github.com/yihangho/emoji-for-jekyll): Seamlessly enable emoji for all posts and pages. - [Compass integration for Jekyll](https://github.com/mscharley/jekyll-compass): Easily integrate Compass and Sass with your Jekyll website. - [Pages Directory by Ben Baker-Smith](https://github.com/bbakersmith/<API key>): Defines a `_pages` directory for page files which routes its output relative to the project root. - [Page Collections by Jeff Kolesky](https://github.com/jeffkole/<API key>): Generates collections of pages with functionality that resembles posts. - [Windows 8.1 Live Tile Generation by Matt Sheehan](https://github.com/sheehamj13/jekyll-live-tiles): Generates Internet Explorer 11 config.xml file and Tile Templates for pinning your site to Windows 8.1. - [Typescript Generator by Matt Sheehan](https://github.com/sheehamj13/jekyll_ts): Generate Javascript on build from your Typescript. - [Jekyll::AutolinkEmail by Ivan Tse](https://github.com/ivantsepp/<API key>): Autolink your emails. - [Jekyll::GitMetadata by Ivan Tse](https://github.com/ivantsepp/jekyll-git_metadata): Expose Git metadata for your templates. - [Jekyll Http Basic Auth Plugin](https://gist.github.com/snrbrnjna/<API key>): Plugin to manage http basic auth for jekyll generated pages and directories. - [Jekyll Auto Image by Merlos](https: - [Jekyll Portfolio Generator by Shannon Babincsak](https://github.com/codeinpink/<API key>): Generates project pages and computes related projects out of project data files. - [Jekyll-Umlauts by Arne Gockeln](https: # Converters - [Textile converter](https://github.com/jekyll/<API key>): Convert `.textile` files into HTML. Also includes the `textilize` Liquid filter. - [Slim plugin](https://github.com/slim-template/jekyll-slim): Slim converter and includes for Jekyll with support for Liquid tags. - [Jade plugin by John Papandriopoulos](https://github.com/snappylabs/jade-jekyll-plugin): Jade converter for Jekyll. - [HAML plugin by Sam Z](https://gist.github.com/517556): HAML converter for Jekyll. - [HAML-Sass Converter by Adam Pearson](https: - [Sass SCSS Converter by Mark Wolfe](https: - [LESS Converter by Jason Graham](https://gist.github.com/639920): Convert LESS files to CSS. - [LESS Converter by Josh Brown](https://gist.github.com/760265): Simple LESS converter. - [Upcase Converter by Blake Smith](https://gist.github.com/449463): An example Jekyll converter. - [CoffeeScript Converter by phaer](https: - [Markdown References by Olov Lassus](https://github.com/olov/jekyll-references): Keep all your markdown reference-style link definitions in one \_references.md file. - [Stylus Converter](https://gist.github.com/988201): Convert .styl to .css. - [ReStructuredText Converter](https://github.com/xdissent/jekyll-rst): Converts ReST documents to HTML with Pygments syntax highlighting. - [<API key>](https://github.com/dsanson/<API key>): Use pandoc for rendering markdown. - [<API key>](https: - [Transform Layouts](https://gist.github.com/1472645): Allows HAML layouts (you need a HAML Converter plugin for this to work). - [Org-mode Converter](https://gist.github.com/abhiyerra/7377603): Org-mode converter for Jekyll. - [Customized Kramdown Converter](https://github.com/mvdbos/<API key>): Enable Pygments syntax highlighting for Kramdown-parsed fenced code blocks. - [Bigfootnotes Plugin](https://github.com/TheFox/jekyll-bigfootnotes): Enables big footnotes for Kramdown. - [AsciiDoc Plugin](https: # Filters - [Truncate HTML](https: - [Domain Name Filter by Lawrence Woodman](https://github.com/LawrenceWoodman/<API key>): Filters the input text so that just the domain name is left. - [Summarize Filter by Mathieu Arnold](https://gist.github.com/731597): Remove markup after a `<div id="extended">` tag. - [i18n_filter](https://github.com/gacha/gacha.id.lv/blob/master/_plugins/i18n_filter.rb): Liquid filter to use I18n localization. - [Smilify](https: - [Read in X Minutes](https: - [Jekyll-timeago](https://github.com/markets/jekyll-timeago): Converts a time value to the time ago in words. - [pluralize](https: - [reading_time](https://github.com/bdesham/reading_time): Count words and estimate reading time for a piece of text, ignoring HTML elements that are unlikely to contain running text. - [Table of Content Generator](https://github.com/dafi/<API key>): Generate the HTML code containing a table of content (TOC), the TOC can be customized in many way, for example you can decide which pages can be without TOC. - [jekyll-humanize](https://github.com/23maverick23/jekyll-humanize): This is a port of the Django app humanize which adds a "human touch" to data. Each method represents a Fluid type filter that can be used in your Jekyll site templates. Given that Jekyll produces static sites, some of the original methods do not make logical sense to port (e.g. naturaltime). - [Jekyll-Ordinal](https://github.com/PatrickC8t/Jekyll-Ordinal): Jekyll liquid filter to output a date ordinal such as "st", "nd", "rd", or "th". - [Deprecated articles keeper](https: - [Jekyll-jalali](https: - [Jekyll Thumbnail Filter](https://github.com/matallo/<API key>): Related posts thumbnail filter. - [Jekyll-Smartify](https://github.com/pathawks/jekyll-smartify): SmartyPants filter. Make &quot;quotes&quot; &ldquo;curly&rdquo; - [liquid-md5](https://github.com/pathawks/liquid-md5): Returns an MD5 hash. Helpful for generating Gravatars in templates. # Tags - [Asset Path Tag](https: - [Delicious Plugin by Christian Hellsten](https://github.com/christianhellsten/jekyll-plugins): Fetches and renders bookmarks from delicious.com. - [Ultraviolet Plugin by Steve Alex](https: - [Tag Cloud Plugin by Ilkka Laukkanen](https://gist.github.com/710577): Generate a tag cloud that links to tag pages. - [GIT Tag by Alexandre Girard](https://gist.github.com/730347): Add Git activity inside a list. - [MathJax Liquid Tags by Jessy Cowan-Sharp](https://gist.github.com/834610): Simple liquid tags for Jekyll that convert inline math and block equations to the appropriate MathJax script tags. - [Non-JS Gist Tag by Brandon Tilley](https://gist.github.com/1027674) A Liquid tag that embeds Gists and shows code for non-JavaScript enabled browsers and readers. - [Render Time Tag by Blake Smith](https://gist.github.com/449509): Displays the time a Jekyll page was generated. - [Status.net/OStatus Tag by phaer](https://gist.github.com/912466): Displays the notices in a given status.net/ostatus feed. - [Embed.ly client by Robert Böhnke](https://github.com/robb/<API key>): Autogenerate embeds from URLs using oEmbed. - [Logarithmic Tag Cloud](https://gist.github.com/2290195): Flexible. Logarithmic distribution. Documentation inline. - [oEmbed Tag by Tammo van Lessen](https://gist.github.com/1455726): Enables easy content embedding (e.g. from YouTube, Flickr, Slideshare) via oEmbed. - [FlickrSetTag by Thomas Mango](https://github.com/tsmango/<API key>): Generates image galleries from Flickr sets. - [Tweet Tag by Scott W. Bradley](https: - [Jekyll Twitter Plugin](https: - [<API key>](https://github.com/rustygeldmacher/<API key>): Lets you use Rails-like content_for tags in your templates, for passing content from your posts up to your layouts. - [Generate YouTube Embed](https: - [Jekyll-beastiepress](https://github.com/okeeblow/jekyll-beastiepress): FreeBSD utility tags for Jekyll sites. - [Jsonball](https://gist.github.com/1895282): Reads json files and produces maps for use in Jekyll files. - [Bibjekyll](https://github.com/pablooliveira/bibjekyll): Render BibTeX-formatted bibliographies/citations included in posts and pages using bibtex2html. - [Jekyll-citation](https://github.com/archome/jekyll-citation): Render BibTeX-formatted bibliographies/citations included in posts and pages (pure Ruby). - [Jekyll Dribbble Set Tag](https://github.com/ericdfields/<API key>): Builds Dribbble image galleries from any user. - [Debbugs](https://gist.github.com/2218470): Allows posting links to Debian BTS easily. - [Refheap_tag](https: - [Jekyll-devonly_tag](https://gist.github.com/2403522): A block tag for including markup only during development. - [JekyllGalleryTag](https: - [Youku and Tudou Embed](https://gist.github.com/Yexiaoxing/5891929): Liquid plugin for embedding Youku and Tudou videos. - [Jekyll-swfobject](https: - [Jekyll Picture Tag](https://github.com/robwierzbowski/jekyll-picture-tag): Easy responsive images for Jekyll. Based on the proposed [`<picture>`](https://html.spec.whatwg.org/multipage/embedded-content.html - [Jekyll Image Tag](https://github.com/robwierzbowski/jekyll-image-tag): Better images for Jekyll. Save image presets, generate resized images, and add classes, alt text, and other attributes. - [Ditaa Tag](https: - [Jekyll Suggested Tweet](https: - [Jekyll Date Chart](https: - [Jekyll Image Encode](https: - [Jekyll Quick Man](https: - [jekyll-font-awesome](https://gist.github.com/23maverick23/8532525): Quickly and easily add Font Awesome icons to your posts. - [Lychee Gallery Tag](https: - [Image Set/Gallery Tag](https: - [jekyll_figure](https://github.com/lmullen/jekyll_figure): Generate figures and captions with links to the figure in a variety of formats - [Jekyll Github Sample Tag](https://github.com/bwillis/<API key>): A liquid tag to include a sample of a github repo file in your Jekyll site. - [Jekyll Project Version Tag](https://github.com/rob-murray/<API key>): A Liquid tag plugin that renders a version identifier for your Jekyll site sourced from the git repository containing your code. - [Piwigo Gallery](https: - [mathml.rb](https://github.com/tmthrgd/jekyll-plugins) by Tom Thorogood: A plugin to convert TeX mathematics into MathML for display. - [webmention_io.rb](https: - [Jekyll 500px Embed](https: - [inline\_highlight](https://github.com/bdesham/inline_highlight): A tag for inline syntax highlighting. - [jekyll-mermaid](https://github.com/jasonbellamy/jekyll-mermaid): Simplify the creation of mermaid diagrams and flowcharts in your posts and pages. - [twa](https://github.com/Ezmyrelda/twa): Twemoji Awesome plugin for Jekyll. Liquid tag allowing you to use twitter emoji in your jekyll pages. - [jekyll-files](https: - [Fetch remote file content](https: - [jekyll-asciinema](https: - [Jekyll-Youtube](https://github.com/dommmel/jekyll-youtube) A Liquid tag that embeds Youtube videos. The default emded markup is responsive but you can also specify your own by using an include/partial. # Collections - [Jekyll Plugins by Recursive Design](https://github.com/recurser/jekyll-plugins): Plugins to generate Project pages from GitHub readmes, a Category page, and a Sitemap generator. - [Company website and blog plugins](https: - [Jekyll plugins by Aucor](https://github.com/aucor/jekyll-plugins): Plugins for trimming unwanted newlines/whitespace and sorting pages by weight attribute. # Other - [ditaa-ditaa](https://github.com/tmthrgd/ditaa-ditaa) by Tom Thorogood: a drastic revision of jekyll-ditaa that renders diagrams drawn using ASCII art into PNG images. - [Pygments Cache Path by Raimonds Simanovskis](https://github.com/rsim/blog.rayapps.com/blob/master/_plugins/<API key>.rb): Plugin to cache syntax-highlighted code from Pygments. - [Draft/Publish Plugin by Michael Ivey](https://gist.github.com/49630): Save posts as drafts. - [Growl Notification Generator by Tate Johnson](https://gist.github.com/490101): Send Jekyll notifications to Growl. - [Growl Notification Hook by Tate Johnson](https: - [Related Posts by Lawrence Woodman](https://github.com/LawrenceWoodman/<API key>): Overrides `site.related_posts` to use categories to assess relationship. - [Tiered Archives by Eli Naeher](https://gist.github.com/<API key>): Create tiered template variable that allows you to group archives by year and month. - [Jekyll-localization](https://github.com/blackwinter/jekyll-localization): Jekyll plugin that adds localization features to the rendering engine. - [Jekyll-rendering](https://github.com/blackwinter/jekyll-rendering): Jekyll plugin to provide alternative rendering engines. - [Jekyll-pagination](https://github.com/blackwinter/jekyll-pagination): Jekyll plugin to extend the pagination generator. - [Jekyll-tagging](https://github.com/pattex/jekyll-tagging): Jekyll plugin to automatically generate a tag cloud and tag pages. - [Jekyll-scholar](https://github.com/inukshuk/jekyll-scholar): Jekyll extensions for the blogging scholar. - [<API key>](https://github.com/moshen/<API key>): Bundles and minifies JavaScript and CSS. - [Jekyll-assets](http: - [JAPR](https://github.com/kitsched/japr): Jekyll Asset Pipeline Reborn - Powerful asset pipeline for Jekyll that collects, converts and compresses JavaScript and CSS assets. - [File compressor](https: - [Jekyll-minibundle](https://github.com/tkareine/jekyll-minibundle): Asset bundling and cache busting using external minification tool of your choice. No gem dependencies. - [Singlepage-jekyll](https: - [generator-jekyllrb](https: - [grunt-jekyll](https: - [jekyll-postfiles](https://github.com/indirect/jekyll-postfiles): Add `_postfiles` directory and {% raw %}`{{ postfile }}`{% endraw %} tag so the files a post refers to will always be right there inside your repo. - [A layout that compresses HTML](http://jch.penibelst.de/): Github Pages compatible, configurable way to compress HTML files on site build. - [Jekyll CO₂](https: - [remote-include](http: - [jekyll-minifier](https://github.com/digitalsparky/jekyll-minifier): Minifies HTML, XML, CSS, and Javascript both inline and as separate files utilising yui-compressor and htmlcompressor. - [Jekyll views router](https://bitbucket.org/nyufac/jekyll-views-router): Simple router between generator plugins and templates. # Editors - [sublime-jekyll](https: - [vim-jekyll](https://github.com/parkr/vim-jekyll): A vim plugin to generate new posts and run `jekyll build` all without leaving vim. - [markdown-writer](https://atom.io/packages/markdown-writer): An Atom package for Jekyll. It can create new posts/drafts, manage tags/categories, insert link/images and add many useful key mappings. <div class="note info"> <h5>Jekyll Plugins Wanted</h5> <p> If you have a Jekyll plugin that you would like to see added to this list, you should <a href="../contributing/">read the contributing page</a> to find out how to make that happen. </p> </div>
// #docregion import { Component } from '@angular/core'; import { DataService } from './data.service'; @Component({ selector: 'hero-di', template: `<h1>Hero: {{name}}</h1>` }) export class HeroComponent { name = ''; constructor(dataService: DataService) { this.name = dataService.getHeroName(); } } // #enddocregion
#!/bin/zsh # WARP # oh-my-zsh plugin # @github.com/mfaerevaag/wd wd() { . $ZSH/plugins/wd/wd.sh }
import css from './source.css'; __export__ = css; export default css;
require 'rails_helper' require_dependency 'version' describe Admin::VersionsController do before do Jobs::VersionCheck.any_instance.stubs(:execute).returns(true) DiscourseUpdates.stubs(:updated_at).returns(2.hours.ago) DiscourseUpdates.stubs(:latest_version).returns('1.2.33') DiscourseUpdates.stubs(:<API key>?).returns(false) end it "is a subclass of AdminController" do expect(Admin::VersionsController < Admin::AdminController).to eq(true) end context 'while logged in as an admin' do before do @user = log_in(:admin) end describe 'show' do subject { xhr :get, :show } it { is_expected.to be_success } it 'should return the currently available version' do json = JSON.parse(subject.body) expect(json['latest_version']).to eq('1.2.33') end it "should return the installed version" do json = JSON.parse(subject.body) expect(json['installed_version']).to eq(Discourse::VERSION::STRING) end end end end
package fpinscala.testing.exhaustive import language.implicitConversions import language.postfixOps /* This source file contains the answers to the last two exercises in the section "Test Case Minimization" of chapter 8 on property-based testing. The Gen data type in this file incorporates exhaustive checking of finite domains. */ import fpinscala.laziness.{Stream,Cons,Empty} import fpinscala.state._ import fpinscala.parallelism._ import fpinscala.parallelism.Par.Par import Gen._ import Prop._ import Status._ import java.util.concurrent.{Executors,ExecutorService} case class Prop(run: (MaxSize,TestCases,RNG) => Result) { def &&(p: Prop) = Prop { (max,n,rng) => run(max,n,rng) match { case Right((a,n)) => p.run(max,n,rng).right.map { case (s,m) => (s,n+m) } case l => l } } def ||(p: Prop) = Prop { (max,n,rng) => run(max,n,rng) match { case Left(msg) => p.tag(msg).run(max,n,rng) case r => r } } /* This is rather simplistic - in the event of failure, we simply prepend * the given message on a newline in front of the existing message. */ def tag(msg: String) = Prop { (max,n,rng) => run(max,n,rng) match { case Left(e) => Left(msg + "\n" + e) case r => r } } } object Prop { type TestCases = Int type MaxSize = Int type FailedCase = String type Result = Either[FailedCase,(Status,TestCases)] def forAll[A](a: Gen[A])(f: A => Boolean): Prop = Prop { (n,rng) => { def go(i: Int, j: Int, s: Stream[Option[A]], onEnd: Int => Result): Result = if (i == j) Right((Unfalsified, i)) else s match { case Cons(h,t) => h() match { case Some(h) => try { if (f(h)) go(i+1,j,t(),onEnd) else Left(h.toString) } catch { case e: Exception => Left(buildMsg(h, e)) } case None => Right((Unfalsified,i)) } case _ => onEnd(i) } go(0, n/3, a.exhaustive, i => Right((Proven, i))) match { case Right((Unfalsified,_)) => val rands = randomStream(a)(rng).map(Some(_)) go(n/3, n, rands, i => Right((Unfalsified, i))) case s => s // If proven or failed, stop immediately } } } def buildMsg[A](s: A, e: Exception): String = "test case: " + s + "\n" + "generated an exception: " + e.getMessage + "\n" + "stack trace:\n" + e.getStackTrace.mkString("\n") def apply(f: (TestCases,RNG) => Result): Prop = Prop { (_,n,rng) => f(n,rng) } /* We pattern match on the `SGen`, and delegate to our `Gen` version of `forAll` * if `g` is unsized; otherwise, we call the sized version of `forAll` (below). */ def forAll[A](g: SGen[A])(f: A => Boolean): Prop = g match { case Unsized(g2) => forAll(g2)(f) case Sized(gs) => forAll(gs)(f) } /* The sized case of `forAll` is as before, though we convert from `Proven` to * `Exhausted`. A sized generator can never be proven, since there are always * larger-sized tests that were not run which may have failed. */ def forAll[A](g: Int => Gen[A])(f: A => Boolean): Prop = Prop { (max,n,rng) => val casesPerSize = n / max + 1 val props: List[Prop] = Stream.from(0).take(max+1).map(i => forAll(g(i))(f)).toList val p: Prop = props.map(p => Prop((max,n,rng) => p.run(max,casesPerSize,rng))). reduceLeft(_ && _) p.run(max,n,rng).right.map { case (Proven,n) => (Exhausted,n) case x => x } } def run(p: Prop, maxSize: Int = 100, // A default argument of `200` testCases: Int = 100, rng: RNG = RNG.Simple(System.currentTimeMillis)): Unit = { p.run(maxSize, testCases, rng) match { case Left(msg) => println("! test failed:\n" + msg) case Right((Unfalsified,n)) => println("+ property unfalsified, ran " + n + " tests") case Right((Proven,n)) => println("+ property proven, ran " + n + " tests") case Right((Exhausted,n)) => println("+ property unfalsified up to max size, ran " + n + " tests") } } val ES: ExecutorService = Executors.newCachedThreadPool val p1 = Prop.forAll(Gen.unit(Par.unit(1)))(i => Par.map(i)(_ + 1)(ES).get == Par.unit(2)(ES).get) def check(p: => Boolean): Prop = // Note that we are non-strict here forAll(unit(()))(_ => p) val p2 = check { val p = Par.map(Par.unit(1))(_ + 1) val p2 = Par.unit(2) p(ES).get == p2(ES).get } def equal[A](p: Par[A], p2: Par[A]): Par[Boolean] = Par.map2(p,p2)(_ == _) val p3 = check { equal ( Par.map(Par.unit(1))(_ + 1), Par.unit(2) ) (ES) get } val S = weighted( choose(1,4).map(Executors.newFixedThreadPool) -> .75, unit(Executors.newCachedThreadPool) -> .25) // `a -> b` is syntax sugar for `(a,b)` def forAllPar[A](g: Gen[A])(f: A => Par[Boolean]): Prop = forAll(S.map2(g)((_,_))) { case (s,a) => f(a)(s).get } def checkPar(p: Par[Boolean]): Prop = forAllPar(Gen.unit(()))(_ => p) def forAllPar2[A](g: Gen[A])(f: A => Par[Boolean]): Prop = forAll(S ** g) { case (s,a) => f(a)(s).get } def forAllPar3[A](g: Gen[A])(f: A => Par[Boolean]): Prop = forAll(S ** g) { case s ** a => f(a)(s).get } val pint = Gen.choose(0,10) map (Par.unit(_)) val p4 = forAllPar(pint)(n => equal(Par.map(n)(y => y), n)) val forkProp = Prop.forAllPar(pint2)(i => equal(Par.fork(i), i)) tag "fork" } sealed trait Status {} object Status { case object Exhausted extends Status case object Proven extends Status case object Unfalsified extends Status } /* The `Gen` type now has a random generator as well as an exhaustive stream. Infinite domains will simply generate infinite streams of None. A finite domain is exhausted when the stream reaches empty. */ case class Gen[+A](sample: State[RNG,A], exhaustive: Stream[Option[A]]) { def map[B](f: A => B): Gen[B] = Gen(sample.map(f), exhaustive.map(_.map(f))) def map2[B,C](g: Gen[B])(f: (A,B) => C): Gen[C] = Gen(sample.map2(g.sample)(f), map2Stream(exhaustive,g.exhaustive)(map2Option(_,_)(f))) def flatMap[B](f: A => Gen[B]): Gen[B] = Gen(sample.flatMap(a => f(a).sample), exhaustive.flatMap { case None => unbounded case Some(a) => f(a).exhaustive }) /* A method alias for the function we wrote earlier. */ def listOfN(size: Int): Gen[List[A]] = Gen.listOfN(size, this) /* A version of `listOfN` that generates the size to use dynamically. */ def listOfN(size: Gen[Int]): Gen[List[A]] = size flatMap (n => this.listOfN(n)) def listOf: SGen[List[A]] = Gen.listOf(this) def listOf1: SGen[List[A]] = Gen.listOf1(this) def unsized = Unsized(this) def **[B](g: Gen[B]): Gen[(A,B)] = (this map2 g)((_,_)) } object Gen { type Domain[+A] = Stream[Option[A]] def bounded[A](a: Stream[A]): Domain[A] = a map (Some(_)) def unbounded: Domain[Nothing] = Stream(None) def unit[A](a: => A): Gen[A] = Gen(State.unit(a), bounded(Stream(a))) def boolean: Gen[Boolean] = Gen(State(RNG.boolean), bounded(Stream(true,false))) def choose(start: Int, stopExclusive: Int): Gen[Int] = Gen(State(RNG.nonNegativeInt).map(n => start + n % (stopExclusive-start)), bounded(Stream.from(start).take(stopExclusive-start))) /* This implementation is rather tricky, but almost impossible to get wrong * if you follow the types. It relies on several helper functions (see below). */ def listOfN[A](n: Int, g: Gen[A]): Gen[List[A]] = Gen(State.sequence(List.fill(n)(g.sample)), cartesian(Stream.constant(g.exhaustive).take(n)). map(l => sequenceOption(l.toList))) /* `cartesian` generates all possible combinations of a `Stream[Stream[A]]`. For instance: * * cartesian(Stream(Stream(1,2), Stream(3), Stream(4,5))) == * Stream(Stream(1,3,4), Stream(1,3,5), Stream(2,3,4), Stream(2,3,5)) */ def cartesian[A](s: Stream[Stream[A]]): Stream[Stream[A]] = s.foldRight(Stream(Stream[A]()))((hs,ts) => map2Stream(hs,ts)(Stream.cons(_,_))) /* `map2Option` and `map2Stream`. Notice the duplication! */ def map2Option[A,B,C](oa: Option[A], ob: Option[B])(f: (A,B) => C): Option[C] = for { a <- oa; b <- ob } yield f(a,b) /* This is not the same as `zipWith`, a function we've implemented before. * We are generating all (A,B) combinations and using each to produce a `C`. * This implementation desugars to sa.flatMap(a => sb.map(b => f(a,b))). */ def map2Stream[A,B,C](sa: Stream[A], sb: => Stream[B])(f: (A,=>B) => C): Stream[C] = for { a <- sa; b <- sb } yield f(a,b) /* This is a function we've implemented before. Unfortunately, it does not * exist in the standard library. This implementation is uses a foldLeft, * followed by a reverse, which is equivalent to a foldRight, but does not * use any stack space. */ def sequenceOption[A](o: List[Option[A]]): Option[List[A]] = o.foldLeft[Option[List[A]]](Some(List()))( (t,h) => map2Option(h,t)(_ :: _)).map(_.reverse) /* Notice we are using the `unbounded` definition here, which is just * `Stream(None)` in our current representation of `exhaustive`. */ def uniform: Gen[Double] = Gen(State(RNG.double), unbounded) def choose(i: Double, j: Double): Gen[Double] = Gen(State(RNG.double).map(d => i + d*(j-i)), unbounded) /* Basic idea is add 1 to the result of `choose` if it is of the wrong * parity, but we require some special handling to deal with the maximum * integer in the range. */ def even(start: Int, stopExclusive: Int): Gen[Int] = choose(start, if (stopExclusive%2 == 0) stopExclusive - 1 else stopExclusive). map (n => if (n%2 != 0) n+1 else n) def odd(start: Int, stopExclusive: Int): Gen[Int] = choose(start, if (stopExclusive%2 != 0) stopExclusive - 1 else stopExclusive). map (n => if (n%2 == 0) n+1 else n) def sameParity(from: Int, to: Int): Gen[(Int,Int)] = for { i <- choose(from,to) j <- if (i%2 == 0) even(from,to) else odd(from,to) } yield (i,j) def listOfN_1[A](n: Int, g: Gen[A]): Gen[List[A]] = List.fill(n)(g).foldRight(unit(List[A]()))((a,b) => a.map2(b)(_ :: _)) /* The simplest possible implementation. This will put all elements of one * `Gen` before the other in the exhaustive traversal. It might be nice to * interleave the two streams, so we get a more representative sample if we * don't get to examine the entire exhaustive stream. */ def union_1[A](g1: Gen[A], g2: Gen[A]): Gen[A] = boolean.flatMap(b => if (b) g1 else g2) def union[A](g1: Gen[A], g2: Gen[A]): Gen[A] = Gen( State(RNG.boolean).flatMap(b => if (b) g1.sample else g2.sample), interleave(g1.exhaustive, g2.exhaustive) ) def interleave[A](s1: Stream[A], s2: Stream[A]): Stream[A] = s1.zipAll(s2).flatMap { case (a,a2) => Stream((a.toList ++ a2.toList): _*) } /* The random case is simple - we generate a double and use this to choose between * the two random samplers. The exhaustive case is trickier if we want to try * to produce a stream that does a weighted interleave of the two exhaustive streams. */ def weighted[A](g1: (Gen[A],Double), g2: (Gen[A],Double)): Gen[A] = { /* The probability we should pull from `g1`. */ val g1Threshold = g1._2.abs / (g1._2.abs + g2._2.abs) /* Some random booleans to use for selecting between g1 and g2 in the exhaustive case. * Making up a seed locally is fine here, since we just want a deterministic schedule * with the right distribution. */ def bools: Stream[Boolean] = randomStream(uniform.map(_ < g1Threshold))(RNG.Simple(302837L)) Gen(State(RNG.double).flatMap(d => if (d < g1Threshold) g1._1.sample else g2._1.sample), interleave(bools, g1._1.exhaustive, g2._1.exhaustive)) } /* Produce an infinite random stream from a `Gen` and a starting `RNG`. */ def randomStream[A](g: Gen[A])(rng: RNG): Stream[A] = Stream.unfold(rng)(rng => Some(g.sample.run(rng))) /* Interleave the two streams, using `b` to control which stream to pull from at each step. * A value of `true` attempts to pull from `s1`; `false` attempts to pull from `s1`. * When either stream is exhausted, insert all remaining elements from the other stream. */ def interleave[A](b: Stream[Boolean], s1: Stream[A], s2: Stream[A]): Stream[A] = b.headOption map { hd => if (hd) s1 match { case Cons(h, t) => Stream.cons(h(), interleave(b drop 1, t(), s2)) case _ => s2 } else s2 match { case Cons(h, t) => Stream.cons(h(), interleave(b drop 1, s1, t())) case _ => s1 } } getOrElse Stream.empty def listOf[A](g: Gen[A]): SGen[List[A]] = Sized(n => g.listOfN(n)) /* Not the most efficient implementation, but it's simple. * This generates ASCII strings. */ def stringN(n: Int): Gen[String] = listOfN(n, choose(0,127)).map(_.map(_.toChar).mkString) def string: SGen[String] = Sized(stringN) case class Sized[+A](forSize: Int => Gen[A]) extends SGen[A] case class Unsized[+A](get: Gen[A]) extends SGen[A] implicit def unsized[A](g: Gen[A]): SGen[A] = Unsized(g) val smallInt = Gen.choose(-10,10) val maxProp = forAll(listOf(smallInt)) { l => val max = l.max !l.exists(_ > max) // No value greater than `max` should exist in `l` } def listOf1[A](g: Gen[A]): SGen[List[A]] = Sized(n => g.listOfN(n max 1)) val maxProp1 = forAll(listOf1(smallInt)) { l => val max = l.max !l.exists(_ > max) // No value greater than `max` should exist in `l` } val sortedProp = forAll(listOf(smallInt)) { l => val ls = l.sorted l.isEmpty || ls.tail.isEmpty || !l.zip(ls.tail).exists { case (a,b) => a > b } } object ** { def unapply[A,B](p: (A,B)) = Some(p) } /* A `Gen[Par[Int]]` generated from a list summation that spawns a new parallel * computation for each element of the input list summed to produce the final * result. This is not the most compelling example, but it provides at least some * variation in structure to use for testing. */ lazy val pint2: Gen[Par[Int]] = choose(-100,100).listOfN(choose(0,20)).map(l => l.foldLeft(Par.unit(0))((p,i) => Par.fork { Par.map2(p, Par.unit(i))(_ + _) })) def genStringIntFn(g: Gen[Int]): Gen[String => Int] = g map (i => (s => i)) } trait SGen[+A] { def map[B](f: A => B): SGen[B] = this match { case Sized(g) => Sized(g andThen (_ map f)) case Unsized(g) => Unsized(g map f) } def flatMap[B](f: A => Gen[B]): SGen[B] = this match { case Sized(g) => Sized(g andThen (_ flatMap f)) case Unsized(g) => Unsized(g flatMap f) } def **[B](s2: SGen[B]): SGen[(A,B)] = (this,s2) match { case (Sized(g), Sized(g2)) => Sized(n => g(n) ** g2(n)) case (Unsized(g), Unsized(g2)) => Unsized(g ** g2) case (Sized(g), Unsized(g2)) => Sized(n => g(n) ** g2) case (Unsized(g), Sized(g2)) => Sized(n => g ** g2(n)) } }
// errorcheck // Use of this source code is governed by a BSD-style // issue 5358: incorrect error message when using f(g()) form on ... args. package main func f(x int, y ...int) {} func g() (int, []int) func main() { f(g()) // ERROR "as type int in|incompatible type" }
;(function(window) { /** Backup possible window/global object */ var oldWin = window, /** Possible global object */ thisBinding = this, /** Detect free variable `exports` */ freeExports = typeof exports == 'object' && exports, /** Detect free variable `global` */ freeGlobal = typeof global == 'object' && global && (global == global.global ? (window = global) : global), /** Used to check for own properties of an object */ hasOwnProperty = {}.hasOwnProperty, /** Used to resolve a value's internal [[Class]] */ toString = {}.toString, /** Detect Java environment */ java = /Java/.test(getClassOf(window.java)) && window.java, /** A character to represent alpha */ alpha = java ? 'a' : '\u03b1', /** A character to represent beta */ beta = java ? 'b' : '\u03b2', /** Browser document object */ doc = window.document || {}, /** Browser navigator object */ nav = window.navigator || {}, /** Previous platform object */ old = window.platform, /** Browser user agent string */ userAgent = nav.userAgent || '', opera = window.operamini || window.opera, /** Opera regexp */ reOpera = /Opera/, /** Opera [[Class]] */ operaClass = reOpera.test(operaClass = getClassOf(opera)) ? operaClass : (opera = null); /** * Capitalizes a string value. * @private * @param {String} string The string to capitalize. * @returns {String} The capitalized string. */ function capitalize(string) { string = String(string); return string.charAt(0).toUpperCase() + string.slice(1); } /** * An iteration utility for arrays and objects. * @private * @param {Array|Object} object The object to iterate over. * @param {Function} callback The function called per iteration. */ function each(object, callback) { var index = -1, length = object.length; if (length == length >>> 0) { while (++index < length) { callback(object[index], index, object); } } else { forOwn(object, callback); } } /** * Iterates over an object's own properties, executing the `callback` for each. * @private * @param {Object} object The object to iterate over. * @param {Function} callback The function executed per own property. */ function forOwn(object, callback) { for (var key in object) { hasKey(object, key) && callback(object[key], key, object); } } /** * Trim and conditionally capitalize string values. * @private * @param {String} string The string to format. * @returns {String} The formatted string. */ function format(string) { string = trim(string); return /^(?:webOS|i(?:OS|P))/.test(string) ? string : capitalize(string); } /** * Gets the internal [[Class]] of a value. * @private * @param {Mixed} value The value. * @returns {String} The [[Class]]. */ function getClassOf(value) { return value == null ? capitalize(value) : toString.call(value).slice(8, -1); } /** * Checks if an object has the specified key as a direct property. * @private * @param {Object} object The object to check. * @param {String} key The key to check for. * @returns {Boolean} Returns `true` if key is a direct property, else `false`. */ function hasKey() { // lazy define for others (not as accurate) hasKey = function(object, key) { var parent = object != null && (object.constructor || Object).prototype; return !!parent && key in Object(object) && !(key in parent && object[key] === parent[key]); }; // for modern browsers if (getClassOf(hasOwnProperty) == 'Function') { hasKey = function(object, key) { return object != null && hasOwnProperty.call(object, key); }; } // for Safari 2 else if ({}.__proto__ == Object.prototype) { hasKey = function(object, key) { var result = false; if (object != null) { object = Object(object); object.__proto__ = [object.__proto__, object.__proto__ = null, result = key in object][0]; } return result; }; } return hasKey.apply(this, arguments); } /** * Host objects can return type values that are different from their actual * data type. The objects we are concerned with usually return non-primitive * types of object, function, or unknown. * @private * @param {Mixed} object The owner of the property. * @param {String} property The property to check. * @returns {Boolean} Returns `true` if the property value is a non-primitive, else `false`. */ function isHostType(object, property) { var type = object != null ? typeof object[property] : 'number'; return !/^(?:boolean|number|string|undefined)$/.test(type) && (type == 'object' ? !!object[property] : true); } /** * A bare-bones` Array#reduce` utility function. * @private * @param {Array} array The array to iterate over. * @param {Function} callback The function called per iteration. * @param {Mixed} accumulator Initial value of the accumulator. * @returns {Mixed} The accumulator. */ function reduce(array, callback) { var accumulator = null; each(array, function(value, index) { accumulator = callback(accumulator, value, index, array); }); return accumulator; } /** * Prepares a string for use in a RegExp constructor by making hyphens and spaces optional. * @private * @param {String} string The string to qualify. * @returns {String} The qualified string. */ function qualify(string) { return String(string).replace(/([ -])(?!$)/g, '$1?'); } /** * Removes leading and trailing whitespace from a string. * @private * @param {String} string The string to trim. * @returns {String} The trimmed string. */ function trim(string) { return String(string).replace(/^ +| +$/g, ''); } /** * Creates a new platform object. * @memberOf platform * @param {String} [ua = navigator.userAgent] The user agent string. * @returns {Object} A platform object. */ function parse(ua) { ua || (ua = userAgent); /** Temporary variable used over the script's lifetime */ var data, /** The CPU architecture */ arch = ua, /** Platform description array */ description = [], /** Platform alpha/beta indicator */ prerelease = null, /** A flag to indicate that environment features should be used to resolve the platform */ useFeatures = ua == userAgent, /** The browser/environment version */ version = useFeatures && opera && typeof opera.version == 'function' && opera.version(), /* Detectable layout engines (order is important) */ layout = getLayout([ { 'label': 'WebKit', 'pattern': 'AppleWebKit' }, 'iCab', 'Presto', 'NetFront', 'Tasman', 'Trident', 'KHTML', 'Gecko' ]), /* Detectable browser names (order is important) */ name = getName([ 'Adobe AIR', 'Arora', 'Avant Browser', 'Camino', 'Epiphany', 'Fennec', 'Flock', 'Galeon', 'GreenBrowser', 'iCab', 'Iceweasel', 'Iron', 'K-Meleon', 'Konqueror', 'Lunascape', 'Maxthon', 'Midori', 'Nook Browser', 'PhantomJS', 'Raven', 'Rekonq', 'RockMelt', 'SeaMonkey', { 'label': 'Silk', 'pattern': '(?:Cloud9|Silk)' }, 'Sleipnir', 'SlimBrowser', 'Sunrise', 'Swiftfox', 'WebPositive', 'Opera Mini', 'Opera', 'Chrome', { 'label': 'Firefox', 'pattern': '(?:Firefox|Minefield)' }, { 'label': 'IE', 'pattern': 'MSIE' }, 'Safari' ]), /* Detectable products (order is important) */ product = getProduct([ 'BlackBerry', { 'label': 'Galaxy S', 'pattern': 'GT-I9000' }, { 'label': 'Galaxy S2', 'pattern': 'GT-I9100' }, 'iPad', 'iPod', 'iPhone', 'Kindle', { 'label': 'Kindle Fire', 'pattern': '(?:Cloud9|Silk)' }, 'Nook', 'PlayBook', 'TouchPad', 'Transformer', 'Xoom' ]), /* Detectable manufacturers */ manufacturer = getManufacturer({ 'Apple': { 'iPad': 1, 'iPhone': 1, 'iPod': 1 }, 'Amazon': { 'Kindle': 1, 'Kindle Fire': 1 }, 'Asus': { 'Transformer': 1 }, 'Barnes & Noble': { 'Nook': 1 }, 'BlackBerry': { 'PlayBook': 1 }, 'HP': { 'TouchPad': 1 }, 'LG': { }, 'Motorola': { 'Xoom': 1 }, 'Nokia': { }, 'Samsung': { 'Galaxy S': 1, 'Galaxy S2': 1 } }), /* Detectable OSes (order is important) */ os = getOS([ 'Android', 'CentOS', 'Debian', 'Fedora', 'FreeBSD', 'Gentoo', 'Haiku', 'Kubuntu', 'Linux Mint', 'Red Hat', 'SuSE', 'Ubuntu', 'Xubuntu', 'Cygwin', 'Symbian OS', 'hpwOS', 'webOS ', 'webOS', 'Tablet OS', 'Linux', 'Mac OS X', 'Macintosh', 'Mac', 'Windows 98;', 'Windows ' ]); /** * Picks the layout engine from an array of guesses. * @private * @param {Array} guesses An array of guesses. * @returns {String|Null} The detected layout engine. */ function getLayout(guesses) { return reduce(guesses, function(result, guess) { return result || RegExp('\\b' + ( guess.pattern || qualify(guess) ) + '\\b', 'i').exec(ua) && (guess.label || guess); }); } /** * Picks the manufacturer from an array of guesses. * @private * @param {Array} guesses An array of guesses. * @returns {String|Null} The detected manufacturer. */ function getManufacturer(guesses) { return reduce(guesses, function(result, value, key) { // lookup the manufacturer by product or scan the UA for the manufacturer return result || ( value[product] || value[0/*Opera 9.25 fix*/, /^[a-z]+/i.exec(product)] || RegExp('\\b' + (key.pattern || qualify(key)) + '(?:\\b|\\w*\\d)', 'i').exec(ua) ) && (key.label || key); }); } /** * Picks the browser name from an array of guesses. * @private * @param {Array} guesses An array of guesses. * @returns {String|Null} The detected browser name. */ function getName(guesses) { return reduce(guesses, function(result, guess) { return result || RegExp('\\b' + ( guess.pattern || qualify(guess) ) + '\\b', 'i').exec(ua) && (guess.label || guess); }); } /** * Picks the OS name from an array of guesses. * @private * @param {Array} guesses An array of guesses. * @returns {String|Null} The detected OS name. */ function getOS(guesses) { return reduce(guesses, function(result, guess) { var pattern = guess.pattern || qualify(guess); if (!result && (result = RegExp('\\b' + pattern + '(?:/[\\d.]+|[ \\w.]*)', 'i').exec(ua))) { // platform tokens defined at data = { '6.2': '8', '6.1': 'Server 2008 R2 / 7', '6.0': 'Server 2008 / Vista', '5.2': 'Server 2003 / XP 64-bit', '5.1': 'XP', '5.01': '2000 SP1', '5.0': '2000', '4.0': 'NT', '4.90': 'ME' }; // detect Windows version from platform tokens if (/^Win/i.test(result) && (data = data[0/*Opera 9.25 fix*/, /[\d.]+$/.exec(result)])) { result = 'Windows ' + data; } // correct character case and cleanup result = format(String(result) .replace(RegExp(pattern, 'i'), guess.label || guess) .replace(/ ce$/i, ' CE') .replace(/hpw/i, 'web') .replace(/Macintosh/, 'Mac OS') .replace(/_PowerPC/i, ' OS') .replace(/(OS X) [^ \d]+/i, '$1') .replace(/\/(\d)/, ' $1') .replace(/_/g, '.') .replace(/(?: BePC|[ .]*fc[ \d.]+)$/i, '') .replace(/x86\.64/gi, 'x86_64') .split(' on ')[0]); } return result; }); } /** * Picks the product name from an array of guesses. * @private * @param {Array} guesses An array of guesses. * @returns {String|Null} The detected product name. */ function getProduct(guesses) { return reduce(guesses, function(result, guess) { var pattern = guess.pattern || qualify(guess); if (!result && (result = RegExp('\\b' + pattern + ' *\\d+[.\\w_]*', 'i').exec(ua) || RegExp('\\b' + pattern + '(?:; *(?:[a-z]+[_-])?[a-z]+\\d+|[^ ();-]*)', 'i').exec(ua) )) { // split by forward slash and append product version if needed if ((result = String(guess.label || result).split('/'))[1] && !/[\d.]+/.test(result[0])) { result[0] += ' ' + result[1]; } // correct character case and cleanup guess = guess.label || guess; result = format(result[0] .replace(RegExp(pattern, 'i'), guess) .replace(RegExp('; *(?:' + guess + '[_-])?', 'i'), ' ') .replace(RegExp('(' + guess + ')(\\w)', 'i'), '$1 $2')); } return result; }); } /** * Resolves the version using an array of UA patterns. * @private * @param {Array} patterns An array of UA patterns. * @returns {String|Null} The detected version. */ function getVersion(patterns) { return reduce(patterns, function(result, pattern) { return result || (RegExp(pattern + '(?:-[\\d.]+/|(?: for [\\w-]+)?[ /-])([\\d.]+[^ ();/-]*)', 'i').exec(ua) || 0)[1] || null; }); } /** * Restores a previously overwritten platform object. * @memberOf platform * @type Function * @returns {Object} The current platform object. */ function noConflict() { window['platform'] = old; return this; } /** * Return platform description when the platform object is coerced to a string. * @name toString * @memberOf platform * @type Function * @returns {String} The platform description. */ function toStringPlatform() { return this.description || ''; } // convert layout to an array so we can add extra details layout && (layout = [layout]); // detect product names that contain their manufacturer's name if (manufacturer && !product) { product = getProduct([manufacturer]); } // detect simulators if (/\bSimulator\b/i.test(ua)) { product = (product ? product + ' ' : '') + 'Simulator'; } // detect iOS if (/^iP/.test(product)) { name || (name = 'Safari'); os = 'iOS' + ((data = / OS ([\d_]+)/i.exec(ua)) ? ' ' + data[1].replace(/_/g, '.') : ''); } // detect Kubuntu else if (name == 'Konqueror' && !/buntu/i.test(os)) { os = 'Kubuntu'; } // detect Android browsers else if (name == 'Chrome' && manufacturer) { name = 'Android Browser'; os = /Android/.test(os) ? os : 'Android'; } // detect false positives for Firefox/Safari else if (!name || (data = !/\bMinefield\b/i.test(ua) && /Firefox|Safari/.exec(name))) { // escape the `/` for Firefox 1 if (name && !product && /[\/,]|^[^(]+?\)/.test(ua.slice(ua.indexOf(data + '/') + 8))) { // clear name of false positives name = null; } // reassign a generic name if ((data = product || manufacturer || os) && (product || manufacturer || /Android|Symbian OS|Tablet OS|webOS/.test(os))) { name = /[a-z]+(?: Hat)?/i.exec(/Android/.test(os) ? os : data) + ' Browser'; } } // detect non-Opera versions (order is important) if (!version) { version = getVersion([ '(?:Cloud9|Opera ?Mini|Raven|Silk)', 'Version', qualify(name), '(?:Firefox|Minefield|NetFront)' ]); } // detect stubborn layout engines if (layout == 'iCab' && parseFloat(version) > 3) { layout = ['WebKit']; } else if (name == 'Konqueror' && /\bKHTML\b/i.test(ua)) { layout = ['KHTML']; } else if (data = /Opera/.test(name) && 'Presto' || /\b(?:Midori|Nook|Safari)\b/i.test(ua) && 'WebKit' || !layout && /\bMSIE\b/i.test(ua) && (/^Mac/.test(os) ? 'Tasman' : 'Trident')) { layout = [data]; } // leverage environment features if (useFeatures) { // detect server-side environments // Rhino has a global function while others have a global object if (isHostType(thisBinding, 'global')) { if (java) { data = java.lang.System; arch = data.getProperty('os.arch'); os = os || data.getProperty('os.name') + ' ' + data.getProperty('os.version'); } if (typeof exports == 'object' && exports) { // if `thisBinding` is the [ModuleScope] if (thisBinding == oldWin && typeof system == 'object' && (data = [system])[0]) { os || (os = data[0].os || null); try { data[1] = require('ringo/engine').version; version = data[1].join('.'); name = 'RingoJS'; } catch(e) { if (data[0].global == freeGlobal) { name = 'Narwhal'; } } } else if (typeof process == 'object' && (data = process)) { name = 'Node.js'; arch = data.arch; os = data.platform; version = /[\d.]+/.exec(data.version)[0]; } } else if (getClassOf(window.environment) == 'Environment') { name = 'Rhino'; } } // detect Adobe AIR else if (getClassOf(data = window.runtime) == '<API key>') { name = 'Adobe AIR'; os = data.flash.system.Capabilities.os; } // detect PhantomJS else if (getClassOf(data = window.phantom) == 'RuntimeObject') { name = 'PhantomJS'; version = (data = data.version || null) && (data.major + '.' + data.minor + '.' + data.patch); } // detect IE compatibility modes else if (typeof doc.documentMode == 'number' && (data = /\bTrident\/(\d+)/i.exec(ua))) { // we're in compatibility mode when the Trident version + 4 doesn't // equal the document mode version = [version, doc.documentMode]; if ((data = +data[1] + 4) != version[1]) { description.push('IE ' + version[1] + ' mode'); layout[1] = ''; version[1] = data; } version = name == 'IE' ? String(version[1].toFixed(1)) : version[0]; } os = os && format(os); } // detect prerelease phases if (version && (data = /(?:[ab]|dp|pre|[ab]\d+pre)(?:\d+\+?)?$/i.exec(version) || /(?:alpha|beta)(?: ?\d)?/i.exec(ua + ';' + (useFeatures && nav.appMinorVersion)) || /\bMinefield\b/i.test(ua) && 'a')) { prerelease = /b/i.test(data) ? 'beta' : 'alpha'; version = version.replace(RegExp(data + '\\+?$'), '') + (prerelease == 'beta' ? beta : alpha) + (/\d+\+?/.exec(data) || ''); } // obscure Maxthon's unreliable version if (name == 'Maxthon' && version) { version = version.replace(/\.[\d.]+/, '.x'); } // detect Silk desktop/accelerated modes else if (name == 'Silk') { if (!/Mobi/i.test(ua)) { os = 'Android'; description.unshift('desktop mode'); } if (/Accelerated *= *true/i.test(ua)) { description.unshift('accelerated'); } } // detect Windows Phone desktop mode else if (name == 'IE' && (data = (/; *(?:XBLWP|ZuneWP)(\d+)/i.exec(ua) || 0)[1])) { name += ' Mobile'; os = 'Windows Phone OS ' + data + '.x'; description.unshift('desktop mode'); } // add mobile postfix else if ((name == 'IE' || name && !product && !/Browser/.test(name)) && (os == 'Windows CE' || /Mobi/i.test(ua))) { name += ' Mobile'; } // detect IE platform preview else if (name == 'IE' && useFeatures && typeof external == 'object' && !external) { description.unshift('platform preview'); } // detect BlackBerry OS version else if (/BlackBerry/.test(product) && (data = (RegExp(product.replace(/ +/g, ' *') + '/([.\\d]+)', 'i').exec(ua) || 0)[1] || version)) { os = 'Device Software ' + data; version = null; } // detect Opera identifying/masking itself as another browser else if (this != forOwn && ( (useFeatures && opera) || (/Opera/.test(name) && /\b(?:MSIE|Firefox)\b/i.test(ua)) || (name == 'Firefox' && /OS X (?:\d+\.){2,}/.test(os)) || (name == 'IE' && ( (os && !/^Win/.test(os) && version > 5.5) || /Windows XP/.test(os) && version > 8 || version == 8 && !/Trident/.test(ua) )) ) && !reOpera.test(data = parse.call(forOwn, ua.replace(reOpera, '') + ';')) && data.name) { // when "indentifying" the UA contains both Opera and the other browser's name data = 'ing as ' + data.name + ((data = data.version) ? ' ' + data : ''); if (reOpera.test(name)) { if (/IE/.test(data) && os == 'Mac OS') { os = null; } data = 'identify' + data; } // when "masking" the UA contains only the other browser's name else { data = 'mask' + data; if (operaClass) { name = format(operaClass.replace(/([a-z])([A-Z])/g, '$1 $2')); } else { name = 'Opera'; } if (/IE/.test(data)) { os = null; } if (!useFeatures) { version = null; } } layout = ['Presto']; description.push(data); } // detect WebKit Nightly and approximate Chrome/Safari versions if ((data = (/AppleWebKit\/([\d.]+\+?)/i.exec(ua) || 0)[1])) { // nightly builds are postfixed with a `+` data = [parseFloat(data), data]; if (name == 'Safari' && data[1].slice(-1) == '+') { name = 'WebKit Nightly'; prerelease = 'alpha'; version = data[1].slice(0, -1); } // clear incorrect browser versions else if (version == data[1] || version == (/Safari\/([\d.]+\+?)/i.exec(ua) || 0)[1]) { version = null; } // use the full Chrome version when available data = [data[0], (/Chrome\/([\d.]+)/i.exec(ua) || 0)[1]]; // detect JavaScriptCore if (!useFeatures || (/internal|\n/i.test(toString.toString()) && !data[1])) { layout[1] = 'like Safari'; data = (data = data[0], data < 400 ? 1 : data < 500 ? 2 : data < 526 ? 3 : data < 533 ? 4 : data < 534 ? '4+' : data < 535 ? 5 : '5'); } else { layout[1] = 'like Chrome'; data = data[1] || (data = data[0], data < 530 ? 1 : data < 532 ? 2 : data < 532.5 ? 3 : data < 533 ? 4 : data < 534.3 ? 5 : data < 534.7 ? 6 : data < 534.1 ? 7 : data < 534.13 ? 8 : data < 534.16 ? 9 : data < 534.24 ? 10 : data < 534.3 ? 11 : data < 535.1 ? 12 : data < 535.2 ? '13+' : data < 535.5 ? 15 : data < 535.7 ? 16 : '17'); } // add the postfix of ".x" or "+" for approximate versions layout[1] += ' ' + (data += typeof data == 'number' ? '.x' : /[.+]/.test(data) ? '' : '+'); // obscure version for some Safari 1-2 releases if (name == 'Safari' && (!version || parseInt(version) > 45)) { version = data; } } // strip incorrect OS versions if (version && version.indexOf(data = /[\d.]+$/.exec(os)) == 0 && ua.indexOf('/' + data + '-') > -1) { os = trim(os.replace(data, '')); } // add layout engine if (layout && !/Avant|Nook/.test(name) && ( /Browser|Lunascape|Maxthon/.test(name) || /^(?:Adobe|Arora|Midori|Phantom|Rekonq|Rock|Sleipnir|Web)/.test(name) && layout[1])) { // don't add layout details to description if they are falsey (data = layout[layout.length - 1]) && description.push(data); } // combine contextual information if (description.length) { description = ['(' + description.join('; ') + ')']; } // append manufacturer if (manufacturer && product && product.indexOf(manufacturer) < 0) { description.push('on ' + manufacturer); } // append product if (product) { description.push((/^on /.test(description[description.length -1]) ? '' : 'on ') + product); } // add browser/OS architecture if ((data = /\b(?:AMD|IA|Win|WOW|x86_|x)64\b/i).test(arch) && !/\bi686\b/i.test(arch)) { os = os && os + (data.test(os) ? '' : ' 64-bit'); if (name && (/WOW64/i.test(ua) || (useFeatures && /\w(?:86|32)$/.test(nav.cpuClass || nav.platform)))) { description.unshift('32-bit'); } } ua || (ua = null); /** * The platform object. * @name platform * @type Object */ return { /** * The browser/environment version. * @memberOf platform * @type String|Null */ 'version': name && version && (description.unshift(version), version), /** * The name of the browser/environment. * @memberOf platform * @type String|Null */ 'name': name && (description.unshift(name), name), /** * The name of the operating system. * @memberOf platform * @type String|Null */ 'os': os && (name && !(os == os.split(' ')[0] && (os == name.split(' ')[0] || product)) && description.push(product ? '(' + os + ')' : 'on ' + os), os), /** * The platform description. * @memberOf platform * @type String|Null */ 'description': description.length ? description.join(' ') : ua, /** * The name of the browser layout engine. * @memberOf platform * @type String|Null */ 'layout': layout && layout[0], /** * The name of the product's manufacturer. * @memberOf platform * @type String|Null */ 'manufacturer': manufacturer, /** * The alpha/beta release indicator. * @memberOf platform * @type String|Null */ 'prerelease': prerelease, /** * The name of the product hosting the browser. * @memberOf platform * @type String|Null */ 'product': product, /** * The browser's user agent string. * @memberOf platform * @type String|Null */ 'ua': ua, // avoid platform object conflicts in browsers 'noConflict': noConflict, // parses a user agent string into a platform object 'parse': parse, // returns the platform description 'toString': toStringPlatform }; } // expose platform // in Narwhal, Node.js, or RingoJS if (freeExports) { forOwn(parse(), function(value, key) { freeExports[key] = value; }); } // via curl.js or RequireJS else if (typeof define == 'function' && typeof define.amd == 'object' && define.amd) { define('platform', function() { return parse(); }); } // in a browser or Rhino else { // use square bracket notation so Closure Compiler won't munge `platform` // http://code.google.com/closure/compiler/docs/api-tutorial3.html#export window['platform'] = parse(); } }(this));
'use strict'; module.exports = function (t, a) { var x; a.throws(function () { t(0); }, TypeError, "0"); a.throws(function () { t(false); }, TypeError, "false"); a.throws(function () { t(''); }, TypeError, "''"); a(t(x = {}), x, "Object"); a(t(x = function () {}), x, "Function"); a(t(x = new String('raz')), x, "String object"); //jslint: ignore a(t(x = new Date()), x, "Date"); a.throws(function () { t(); }, TypeError, "Undefined"); a.throws(function () { t(null); }, TypeError, "null"); };
#ifndef _ROUTE_H #define _ROUTE_H #include <net/dst.h> #include <net/inetpeer.h> #include <net/flow.h> #include <net/inet_sock.h> #include <linux/in_route.h> #include <linux/rtnetlink.h> #include <linux/rcupdate.h> #include <linux/route.h> #include <linux/ip.h> #include <linux/cache.h> #include <linux/security.h> #define RTO_ONLINK 0x01 #define RT_CONN_FLAGS(sk) (RT_TOS(inet_sk(sk)->tos) | sock_flag(sk, SOCK_LOCALROUTE)) struct fib_nh; struct fib_info; struct rtable { struct dst_entry dst; int rt_genid; unsigned int rt_flags; __u16 rt_type; __u8 rt_is_input; __u8 rt_uses_gateway; int rt_iif; /* Info on neighbour */ __be32 rt_gateway; /* Miscellaneous cached information */ u32 rt_pmtu; struct list_head rt_uncached; }; static inline bool rt_is_input_route(const struct rtable *rt) { return rt->rt_is_input != 0; } static inline bool rt_is_output_route(const struct rtable *rt) { return rt->rt_is_input == 0; } static inline __be32 rt_nexthop(const struct rtable *rt, __be32 daddr) { if (rt->rt_gateway) return rt->rt_gateway; return daddr; } struct ip_rt_acct { __u32 o_bytes; __u32 o_packets; __u32 i_bytes; __u32 i_packets; }; struct rt_cache_stat { unsigned int in_hit; unsigned int in_slow_tot; unsigned int in_slow_mc; unsigned int in_no_route; unsigned int in_brd; unsigned int in_martian_dst; unsigned int in_martian_src; unsigned int out_hit; unsigned int out_slow_tot; unsigned int out_slow_mc; unsigned int gc_total; unsigned int gc_ignored; unsigned int gc_goal_miss; unsigned int gc_dst_overflow; unsigned int in_hlist_search; unsigned int out_hlist_search; }; extern struct ip_rt_acct __percpu *ip_rt_acct; struct in_device; extern int ip_rt_init(void); extern void rt_cache_flush(struct net *net); extern void rt_flush_dev(struct net_device *dev); extern struct rtable *<API key>(struct net *, struct flowi4 *flp); extern struct rtable *<API key>(struct net *, struct flowi4 *flp, struct sock *sk); extern struct dst_entry *<API key>(struct net *net, struct dst_entry *dst_orig); static inline struct rtable *ip_route_output_key(struct net *net, struct flowi4 *flp) { return <API key>(net, flp, NULL); } static inline struct rtable *ip_route_output(struct net *net, __be32 daddr, __be32 saddr, u8 tos, int oif) { struct flowi4 fl4 = { .flowi4_oif = oif, .flowi4_tos = tos, .daddr = daddr, .saddr = saddr, }; return ip_route_output_key(net, &fl4); } static inline struct rtable *<API key>(struct net *net, struct flowi4 *fl4, struct sock *sk, __be32 daddr, __be32 saddr, __be16 dport, __be16 sport, __u8 proto, __u8 tos, int oif) { flowi4_init_output(fl4, oif, sk ? sk->sk_mark : 0, tos, RT_SCOPE_UNIVERSE, proto, sk ? inet_sk_flowi_flags(sk) : 0, daddr, saddr, dport, sport, sock_net_uid(net, sk)); if (sk) <API key>(sk, flowi4_to_flowi(fl4)); return <API key>(net, fl4, sk); } static inline struct rtable *ip_route_output_gre(struct net *net, struct flowi4 *fl4, __be32 daddr, __be32 saddr, __be32 gre_key, __u8 tos, int oif) { memset(fl4, 0, sizeof(*fl4)); fl4->flowi4_oif = oif; fl4->daddr = daddr; fl4->saddr = saddr; fl4->flowi4_tos = tos; fl4->flowi4_proto = IPPROTO_GRE; fl4->fl4_gre_key = gre_key; return ip_route_output_key(net, fl4); } extern int <API key>(struct sk_buff *skb, __be32 dst, __be32 src, u8 tos, struct net_device *devin); static inline int ip_route_input(struct sk_buff *skb, __be32 dst, __be32 src, u8 tos, struct net_device *devin) { int err; rcu_read_lock(); err = <API key>(skb, dst, src, tos, devin); if (!err) skb_dst_force(skb); rcu_read_unlock(); return err; } extern void ipv4_update_pmtu(struct sk_buff *skb, struct net *net, u32 mtu, int oif, u32 mark, u8 protocol, int flow_flags); extern void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu); extern void ipv4_redirect(struct sk_buff *skb, struct net *net, int oif, u32 mark, u8 protocol, int flow_flags); extern void ipv4_sk_redirect(struct sk_buff *skb, struct sock *sk); extern void ip_rt_send_redirect(struct sk_buff *skb); extern unsigned int inet_addr_type(struct net *net, __be32 addr); extern unsigned int inet_dev_addr_type(struct net *net, const struct net_device *dev, __be32 addr); extern void <API key>(struct in_device *); extern int ip_rt_ioctl(struct net *, unsigned int cmd, void __user *arg); extern void ip_rt_get_source(u8 *src, struct sk_buff *skb, struct rtable *rt); extern int ip_rt_dump(struct sk_buff *skb, struct netlink_callback *cb); struct in_ifaddr; extern void fib_add_ifaddr(struct in_ifaddr *); extern void fib_del_ifaddr(struct in_ifaddr *, struct in_ifaddr *); static inline void ip_rt_put(struct rtable *rt) { /* dst_release() accepts a NULL parameter. * We rely on dst being first structure in struct rtable */ BUILD_BUG_ON(offsetof(struct rtable, dst) != 0); dst_release(&rt->dst); } #define IPTOS_RT_MASK (IPTOS_TOS_MASK & ~3) extern const __u8 ip_tos2prio[16]; static inline char rt_tos2priority(u8 tos) { return ip_tos2prio[IPTOS_TOS(tos)>>1]; } /* ip_route_connect() and ip_route_newports() work in tandem whilst * binding a socket for a new outgoing connection. * * In order to use IPSEC properly, we must, in the end, have a * route that was looked up using all available keys including source * and destination ports. * * However, if a source port needs to be allocated (the user specified * a wildcard source port) we need to obtain addressing information * in order to perform that allocation. * * So ip_route_connect() looks up a route using wildcarded source and * destination ports in the key, simply so that we can get a pair of * addresses to use for port allocation. * * Later, once the ports are allocated, ip_route_newports() will make * another route lookup if needed to make sure we catch any IPSEC * rules keyed on the port information. * * The callers allocate the flow key on their stack, and must pass in * the same flowi4 object to both the ip_route_connect() and the * ip_route_newports() calls. */ static inline void <API key>(struct flowi4 *fl4, __be32 dst, __be32 src, u32 tos, int oif, u8 protocol, __be16 sport, __be16 dport, struct sock *sk, bool can_sleep) { __u8 flow_flags = 0; if (inet_sk(sk)->transparent) flow_flags |= FLOWI_FLAG_ANYSRC; if (can_sleep) flow_flags |= <API key>; flowi4_init_output(fl4, oif, sk->sk_mark, tos, RT_SCOPE_UNIVERSE, protocol, flow_flags, dst, src, dport, sport, sk->sk_uid); } static inline struct rtable *ip_route_connect(struct flowi4 *fl4, __be32 dst, __be32 src, u32 tos, int oif, u8 protocol, __be16 sport, __be16 dport, struct sock *sk, bool can_sleep) { struct net *net = sock_net(sk); struct rtable *rt; <API key>(fl4, dst, src, tos, oif, protocol, sport, dport, sk, can_sleep); if (!dst || !src) { rt = <API key>(net, fl4); if (IS_ERR(rt)) return rt; ip_rt_put(rt); <API key>(fl4, oif, tos, fl4->daddr, fl4->saddr); } <API key>(sk, flowi4_to_flowi(fl4)); return <API key>(net, fl4, sk); } static inline struct rtable *ip_route_newports(struct flowi4 *fl4, struct rtable *rt, __be16 orig_sport, __be16 orig_dport, __be16 sport, __be16 dport, struct sock *sk) { if (sport != orig_sport || dport != orig_dport) { fl4->fl4_dport = dport; fl4->fl4_sport = sport; ip_rt_put(rt); <API key>(fl4, sk->sk_bound_dev_if, RT_CONN_FLAGS(sk), fl4->daddr, fl4->saddr); <API key>(sk, flowi4_to_flowi(fl4)); return <API key>(sock_net(sk), fl4, sk); } return rt; } static inline int inet_iif(const struct sk_buff *skb) { int iif = skb_rtable(skb)->rt_iif; if (iif) return iif; return skb->skb_iif; } extern int <API key>; static inline int ip4_dst_hoplimit(const struct dst_entry *dst) { int hoplimit = dst_metric_raw(dst, RTAX_HOPLIMIT); if (hoplimit == 0) hoplimit = <API key>; return hoplimit; } #endif /* _ROUTE_H */
#ifndef _NM256_H_ #define _NM256_H_ #include <linux/spinlock.h> #include <linux/interrupt.h> #include "ac97.h" /* The revisions that we currently handle. */ enum nm256rev { REV_NM256AV, REV_NM256ZX }; /* Per-card structure. */ struct nm256_info { /* Magic number used to verify that this struct is valid. */ #define NM_MAGIC_SIG 0x55aa00ff int magsig; /* Revision number */ enum nm256rev rev; struct ac97_hwint mdev; /* Our audio device numbers. */ int dev[2]; /* The # of times each device has been opened. (Should only be 0 or 1). */ int opencnt[2]; /* We use two devices, because we can do simultaneous play and record. This keeps track of which device is being used for what purpose; these are the actual device numbers. */ int dev_for_play; int dev_for_record; spinlock_t lock; /* The mixer device. */ int mixer_oss_dev; /* * Can only be opened once for each operation. These aren't set * until an actual I/O operation is performed; this allows one * device to be open for read/write without inhibiting I/O to * the other device. */ int is_open_play; int is_open_record; /* Non-zero if we're currently playing a sample. */ int playing; /* Ditto for recording a sample. */ int recording; /* The two memory ports. */ struct nm256_ports { /* Physical address of the port. */ u32 physaddr; /* Our mapped-in pointer. */ char __iomem *ptr; /* PTR's offset within the physical port. */ u32 start_offset; /* And the offset of the end of the buffer. */ u32 end_offset; } port[2]; /* The following are offsets within memory port 1. */ u32 coeffBuf; u32 allCoeffBuf; /* Record and playback buffers. */ u32 abuf1, abuf2; /* Offset of the AC97 mixer in memory port 2. */ u32 mixer; /* Offset of the mixer status register in memory port 2. */ u32 mixer_status_offset; /* Non-zero if we have written initial values to the mixer. */ u8 mixer_values_init; /* * Status mask bit; (*mixer_status_loc & mixer_status_mask) == 0 means * it's ready. */ u16 mixer_status_mask; /* The sizes of the playback and record ring buffers. */ u32 playbackBufferSize; u32 recordBufferSize; /* Are the coefficient values in the memory cache current? */ u8 coeffsCurrent; /* For writes, the amount we last wrote. */ u32 requested_amt; /* The start of the block currently playing. */ u32 curPlayPos; /* The amount of data we were requested to record. */ u32 requestedRecAmt; /* The offset of the currently-recording block. */ u32 curRecPos; /* The destination buffer. */ char *recBuf; /* Our IRQ number. */ int irq; /* A flag indicating how many times we've grabbed the IRQ. */ int has_irq; /* The card interrupt service routine. */ irq_handler_t introutine; /* Current audio config, cached. */ struct sinfo { u32 samplerate; u8 bits; u8 stereo; } sinfo[2]; /* goes with each device */ /* The cards are stored in a chain; this is the next card. */ struct nm256_info *next_card; }; /* The BIOS signature. */ #define NM_SIGNATURE 0x4e4d0000 /* Signature mask. */ #define NM_SIG_MASK 0xffff0000 /* Size of the second memory area. */ #define NM_PORT2_SIZE 4096 /* The base offset of the mixer in the second memory area. */ #define NM_MIXER_OFFSET 0x600 /* The maximum size of a coefficient entry. */ #define NM_MAX_COEFFICIENT 0x5000 /* The interrupt register. */ #define NM_INT_REG 0xa04 /* And its bits. */ #define NM_PLAYBACK_INT 0x40 #define NM_RECORD_INT 0x100 #define NM_MISC_INT_1 0x4000 #define NM_MISC_INT_2 0x1 #define NM_ACK_INT(CARD, X) nm256_writePort16((CARD), 2, NM_INT_REG, (X) << 1) /* The AV's "mixer ready" status bit and location. */ #define <API key> 0xa04 #define NM_MIXER_READY_MASK 0x0800 #define NM_MIXER_PRESENCE 0xa06 #define NM_PRESENCE_MASK 0x0050 #define NM_PRESENCE_VALUE 0x0040 /* * For the ZX. It uses the same interrupt register, but it holds 32 * bits instead of 16. */ #define NM2_PLAYBACK_INT 0x10000 #define NM2_RECORD_INT 0x80000 #define NM2_MISC_INT_1 0x8 #define NM2_MISC_INT_2 0x2 #define NM2_ACK_INT(CARD, X) nm256_writePort32((CARD), 2, NM_INT_REG, (X)) /* The ZX's "mixer ready" status bit and location. */ #define <API key> 0xa06 #define <API key> 0x0800 /* The playback registers start from here. */ #define <API key> 0x0 /* The record registers start from here. */ #define <API key> 0x200 /* The rate register is located 2 bytes from the start of the register area. */ #define NM_RATE_REG_OFFSET 2 /* Mono/stereo flag, number of bits on playback, and rate mask. */ #define NM_RATE_STEREO 1 #define NM_RATE_BITS_16 2 #define NM_RATE_MASK 0xf0 /* Playback enable register. */ #define <API key> (<API key> + 0x1) #define <API key> 1 #define NM_PLAYBACK_ONESHOT 2 #define NM_PLAYBACK_FREERUN 4 /* Mutes the audio output. */ #define NM_AUDIO_MUTE_REG (<API key> + 0x18) #define NM_AUDIO_MUTE_LEFT 0x8000 #define NM_AUDIO_MUTE_RIGHT 0x0080 /* Recording enable register. */ #define <API key> (<API key> + 0) #define <API key> 1 #define NM_RECORD_FREERUN 2 #define NM_RBUFFER_START (<API key> + 0x4) #define NM_RBUFFER_END (<API key> + 0x10) #define NM_RBUFFER_WMARK (<API key> + 0xc) #define NM_RBUFFER_CURRP (<API key> + 0x8) #define NM_PBUFFER_START (<API key> + 0x4) #define NM_PBUFFER_END (<API key> + 0x14) #define NM_PBUFFER_WMARK (<API key> + 0xc) #define NM_PBUFFER_CURRP (<API key> + 0x8) /* A few trivial routines to make it easier to work with the registers on the chip. */ /* This is a common code portion used to fix up the port offsets. */ #define NM_FIX_PORT \ if (port < 1 || port > 2 || card == NULL) \ return -1; \ \ if (offset < card->port[port - 1].start_offset \ || offset >= card->port[port - 1].end_offset) { \ printk (KERN_ERR "Bad access: port %d, offset 0x%x\n", port, offset); \ return -1; \ } \ offset -= card->port[port - 1].start_offset; #define DEFwritePortX(X, func) \ static inline int nm256_writePort##X (struct nm256_info *card,\ int port, int offset, int value)\ {\ u##X __iomem *addr;\ \ if (nm256_debug > 1)\ printk (KERN_DEBUG "Writing 0x%x to %d:0x%x\n", value, port, offset);\ \ NM_FIX_PORT;\ \ addr = (u##X __iomem *)(card->port[port - 1].ptr + offset);\ func (value, addr);\ return 0;\ } DEFwritePortX (8, writeb) DEFwritePortX (16, writew) DEFwritePortX (32, writel) #define DEFreadPortX(X, func) \ static inline u##X nm256_readPort##X (struct nm256_info *card,\ int port, int offset)\ {\ u##X __iomem *addr;\ \ NM_FIX_PORT\ \ addr = (u##X __iomem *)(card->port[port - 1].ptr + offset);\ return func(addr);\ } DEFreadPortX (8, readb) DEFreadPortX (16, readw) DEFreadPortX (32, readl) static inline int nm256_writeBuffer8 (struct nm256_info *card, u8 *src, int port, int offset, int amt) { NM_FIX_PORT; memcpy_toio (card->port[port - 1].ptr + offset, src, amt); return 0; } static inline int nm256_readBuffer8 (struct nm256_info *card, u8 *dst, int port, int offset, int amt) { NM_FIX_PORT; memcpy_fromio (dst, card->port[port - 1].ptr + offset, amt); return 0; } /* Returns a non-zero value if we should use the coefficient cache. */ static int <API key> (struct nm256_info *card); #endif /* * Local variables: * c-basic-offset: 4 * End: */
Kodi is provided under <API key>: GPL-2.0-or-later Being under the terms of the GNU General Public License v2.0 or later, according with LICENSES/GPL-2.0-or-later In addition, other licenses may also apply. Please see LICENSES/README.md for more details.
/* * board/config.h - configuration options, board specific */ #ifndef _M54455EVB_H #define _M54455EVB_H /* * High Level Configuration Options * (easy to change) */ #define CONFIG_MCF5445x /* define processor family */ #define CONFIG_M54455 /* define processor type */ #define CONFIG_M54455EVB /* M54455EVB board */ #define CONFIG_MCFUART #define <API key> (0) #define CONFIG_BAUDRATE 115200 #define <API key> { 9600 , 19200 , 38400 , 57600, 115200 } #undef CONFIG_WATCHDOG #define CONFIG_TIMESTAMP /* Print image info with timestamp */ /* * BOOTP options */ #define <API key> #define <API key> #define <API key> #define <API key> /* Command line configuration */ #include <config_cmd_default.h> #define CONFIG_CMD_BOOTD #define CONFIG_CMD_CACHE #define CONFIG_CMD_DATE #define CONFIG_CMD_DHCP #define CONFIG_CMD_ELF #define CONFIG_CMD_EXT2 #define CONFIG_CMD_FAT #define CONFIG_CMD_FLASH #define CONFIG_CMD_I2C #define CONFIG_CMD_IDE #define CONFIG_CMD_JFFS2 #define CONFIG_CMD_MEMORY #define CONFIG_CMD_MISC #define CONFIG_CMD_MII #define CONFIG_CMD_NET #undef CONFIG_CMD_PCI #define CONFIG_CMD_PING #define CONFIG_CMD_REGINFO #define CONFIG_CMD_SPI #define CONFIG_CMD_SF #undef CONFIG_CMD_LOADB #undef CONFIG_CMD_LOADS /* Network configuration */ #define CONFIG_MCFFEC #ifdef CONFIG_MCFFEC # define CONFIG_NET_MULTI 1 # define CONFIG_MII 1 # define CONFIG_MII_INIT 1 # define <API key> # define <API key> 8 # define <API key> # define <API key> 0 # define <API key> 0 # define <API key> <API key> # define <API key> <API key> # define MCFFEC_TOUT_LOOP 50000 # define CONFIG_HAS_ETH1 # define CONFIG_BOOTDELAY 1 /* autoboot after 5 seconds */ # define CONFIG_BOOTARGS "root=/dev/mtdblock1 rw rootfstype=jffs2 ip=none mtdparts=physmap-flash.0:5M(kernel)ro,-(jffs2)" # define CONFIG_ETHADDR 00:e0:0c:bc:e5:60 # define CONFIG_ETH1ADDR 00:e0:0c:bc:e5:61 # define CONFIG_ETHPRIME "FEC0" # define CONFIG_IPADDR 192.162.1.2 # define CONFIG_NETMASK 255.255.255.0 # define CONFIG_SERVERIP 192.162.1.1 # define CONFIG_GATEWAYIP 192.162.1.1 # define <API key> /* If <API key> is not defined - hardcoded */ # ifndef <API key> # define FECDUPLEX FULL # define FECSPEED _100BASET # else # ifndef <API key> # define <API key> # endif # endif /* <API key> */ #endif #define CONFIG_HOSTNAME M54455EVB #ifdef <API key> /* ST Micro serial flash */ #define <API key> 0x40010013 #define <API key> \ "netdev=eth0\0" \ "inpclk=" MK_STR(<API key>) "\0" \ "loadaddr=0x40010000\0" \ "sbfhdr=sbfhdr.bin\0" \ "uboot=u-boot.bin\0" \ "load=tftp ${loadaddr} ${sbfhdr};" \ "tftp " MK_STR(<API key>) " ${uboot} \0" \ "upd=run load; run prog\0" \ "prog=sf probe 0:1 10000 1;" \ "sf erase 0 30000;" \ "sf write ${loadaddr} 0 0x30000;" \ "save\0" \ "" #else /* Atmel and Intel */ #ifdef <API key> # define <API key> 0x0403FFFF #elif defined(<API key>) # define <API key> 0x3FFFF #endif #define <API key> \ "netdev=eth0\0" \ "inpclk=" MK_STR(<API key>) "\0" \ "loadaddr=0x40010000\0" \ "uboot=u-boot.bin\0" \ "load=tftp ${loadaddr} ${uboot}\0" \ "upd=run load; run prog\0" \ "prog=prot off " MK_STR(<API key>) \ " " MK_STR(<API key>) ";" \ "era " MK_STR(<API key>) " " \ MK_STR(<API key>) ";" \ "cp.b ${loadaddr} " MK_STR(<API key>) \ " ${filesize}; save\0" \ "" #endif /* ATA configuration */ #define <API key> #define <API key> #define CONFIG_IDE_RESET 1 #define CONFIG_IDE_PREINIT 1 #define CONFIG_ATAPI #undef CONFIG_LBA48 #define <API key> 1 #define <API key> 2 #define <API key> 0x90000000 #define <API key> 0 #define <API key> 0xA0 /* Offset for data I/O */ #define <API key> 0xA0 /* Offset for normal register accesses */ #define <API key> 0xC0 /* Offset for alternate registers */ #define <API key> 4 /* Interval between registers */ /* Realtime clock */ #define CONFIG_MCFRTC #undef RTC_DEBUG #define <API key> (32 * CONFIG_SYS_HZ) /* Timer */ #define CONFIG_MCFTMR #undef CONFIG_MCFPIT /* I2c */ #define CONFIG_FSL_I2C #define CONFIG_HARD_I2C /* I2C with hardware support */ #undef CONFIG_SOFT_I2C /* I2C bit-banged */ #define <API key> 80000 /* I2C speed and slave address */ #define <API key> 0x7F #define <API key> 0x58000 #define CONFIG_SYS_IMMR CONFIG_SYS_MBAR /* DSPI and Serial Flash */ #define CONFIG_CF_SPI #define CONFIG_CF_DSPI #define CONFIG_HARD_SPI #define <API key> 0x13 #ifdef CONFIG_CMD_SPI # define CONFIG_SPI_FLASH # define <API key> # define <API key> (DSPI_CTAR_TRSZ(7) | \ <API key> | \ DSPI_CTAR_PASC(0) | \ DSPI_CTAR_PDT(0) | \ DSPI_CTAR_CSSCK(0) | \ DSPI_CTAR_ASC(0) | \ DSPI_CTAR_DT(1)) #endif /* PCI */ #ifdef CONFIG_CMD_PCI #define CONFIG_PCI 1 #define CONFIG_PCI_PNP 1 #define <API key> 1 #define <API key> 4 #define <API key> 0xA0000000 #define <API key> <API key> #define <API key> 0x10000000 #define <API key> 0xB1000000 #define <API key> <API key> #define <API key> 0x01000000 #define <API key> 0xB0000000 #define <API key> <API key> #define <API key> 0x01000000 #endif /* FPGA - Spartan 2 */ /* experiment #define CONFIG_FPGA CONFIG_SYS_SPARTAN3 #define CONFIG_FPGA_COUNT 1 #define <API key> #define <API key> */ /* Input, PCI, Flexbus, and VCO */ #define CONFIG_EXTRA_CLOCK #define CONFIG_PRAM 2048 /* 2048 KB */ #define CONFIG_SYS_PROMPT "-> " #define CONFIG_SYS_LONGHELP /* undef to save memory */ #if defined(CONFIG_CMD_KGDB) #define CONFIG_SYS_CBSIZE 1024 /* Console I/O Buffer Size */ #else #define CONFIG_SYS_CBSIZE 256 /* Console I/O Buffer Size */ #endif #define CONFIG_SYS_PBSIZE (CONFIG_SYS_CBSIZE+sizeof(CONFIG_SYS_PROMPT)+16) /* Print Buffer Size */ #define CONFIG_SYS_MAXARGS 16 /* max number of command args */ #define CONFIG_SYS_BARGSIZE CONFIG_SYS_CBSIZE /* Boot Argument Buffer Size */ #define <API key> (<API key> + 0x10000) #define CONFIG_SYS_HZ 1000 #define CONFIG_SYS_MBAR 0xFC000000 /* * Low Level Configuration Settings * (address mappings, register initial values, etc.) * You should know what you are doing if you make changes here. */ #define <API key> 0x80000000 #define <API key> 0x8000 /* Size of used area in internal SRAM */ #define <API key> 0x221 #define <API key> ((<API key> - <API key>) - 32) #define <API key> <API key> #define <API key> (<API key> - 32) #define <API key> 0x40000000 #define <API key> 0x48000000 #define <API key> 256 /* SDRAM size in MB */ #define <API key> 0x65311610 #define <API key> 0x59670000 #define <API key> 0xEA0B2000 #define <API key> 0x40010000 #define <API key> 0x00010033 #define <API key> 0xAA #define <API key> <API key> + 0x400 #define <API key> ((<API key> - 3) << 20) #ifdef CONFIG_CF_SBF # define <API key> (<API key> + 0x400) #else # define <API key> (<API key> + 0x400) #endif #define <API key> 64*1024 #define <API key> (256 << 10) /* Reserve 256 kB for Monitor */ #define <API key> (128 << 10) /* Reserve 128 kB for malloc() */ /* * For booting Linux, the board info and command line data * have to be in the first 8 MB of memory, since this is * the maximum mapped by the Linux kernel during initialization ?? */ /* Initial Memory map for Linux */ #define <API key> (<API key> + (<API key> << 20)) /* * Configuration for environment * Environment is embedded in u-boot in the second sector of the flash */ #ifdef CONFIG_CF_SBF # define <API key> # define CONFIG_ENV_SPI_CS 1 #else # define <API key> 1 #endif #undef <API key> #ifdef <API key> # define <API key> CONFIG_SYS_CS0_BASE # define <API key> CONFIG_SYS_CS1_BASE # define CONFIG_ENV_OFFSET 0x30000 # define CONFIG_ENV_SIZE 0x2000 # define <API key> 0x10000 #endif #ifdef <API key> # define <API key> CONFIG_SYS_CS0_BASE # define <API key> CONFIG_SYS_CS0_BASE # define <API key> CONFIG_SYS_CS1_BASE # define CONFIG_ENV_ADDR (<API key> + 0x4000) # define <API key> 0x2000 #endif #ifdef <API key> # define <API key> CONFIG_SYS_CS0_BASE # define <API key> CONFIG_SYS_CS0_BASE # define <API key> CONFIG_SYS_CS1_BASE # define CONFIG_ENV_ADDR (<API key> + 0x40000) # define CONFIG_ENV_SIZE 0x2000 # define <API key> 0x20000 #endif #define <API key> #ifdef <API key> # define <API key> 1 # define <API key> 1 # define <API key> 0x1000000 /* Max size that the board might have */ # define <API key> FLASH_CFI_8BIT # define <API key> 2 /* max number of memory banks */ # define <API key> 137 /* max number of sectors on one chip */ # define <API key> /* "Real" (hardware) sectors protection */ # define <API key> # define <API key> { CONFIG_SYS_CS0_BASE, CONFIG_SYS_CS1_BASE } # define <API key> #ifdef <API key> # define <API key> 4 # define <API key> 11 # define <API key> {1, 2, 1, 7} # define <API key> {0x4000, 0x2000, 0x8000, 0x10000} #endif #endif /* * This is setting for JFFS2 support in u-boot. * NOTE: Enable CONFIG_CMD_JFFS2 for JFFS2 support. */ #ifdef CONFIG_CMD_JFFS2 #ifdef CF_STMICRO_BOOT # define CONFIG_JFFS2_DEV "nor1" # define <API key> 0x01000000 # define <API key> (<API key> + 0x500000) #endif #ifdef <API key> # define CONFIG_JFFS2_DEV "nor1" # define <API key> 0x01000000 # define <API key> (<API key> + 0x500000) #endif #ifdef <API key> # define CONFIG_JFFS2_DEV "nor0" # define <API key> (0x01000000 - 0x500000) # define <API key> (<API key> + 0x500000) #endif #endif #define <API key> 16 #define ICACHE_STATUS (<API key> + \ <API key> - 8) #define DCACHE_STATUS (<API key> + \ <API key> - 4) #define <API key> (CF_CACR_BCINVA + CF_CACR_ICINVA) #define <API key> (CF_CACR_DCINVA) #define <API key> (<API key> | \ CF_ADDRMASK(<API key>) | \ CF_ACR_EN | CF_ACR_SM_ALL) #define <API key> (CF_CACR_BEC | CF_CACR_IEC | \ CF_CACR_ICINVA | CF_CACR_EUSP) #define <API key> ((<API key> | \ CF_CACR_DEC | CF_CACR_DDCM_P | \ CF_CACR_DCINVA) & ~CF_CACR_ICINVA) /* * CS0 - NOR Flash 1, 2, 4, or 8MB * CS1 - CompactFlash and registers * CS2 - CPLD * CS3 - FPGA * CS4 - Available * CS5 - Available */ #if defined(<API key>) || defined(<API key>) /* Atmel Flash */ #define CONFIG_SYS_CS0_BASE 0x04000000 #define CONFIG_SYS_CS0_MASK 0x00070001 #define CONFIG_SYS_CS0_CTRL 0x00001140 /* Intel Flash */ #define CONFIG_SYS_CS1_BASE 0x00000000 #define CONFIG_SYS_CS1_MASK 0x01FF0001 #define CONFIG_SYS_CS1_CTRL 0x00000D60 #define <API key> CONFIG_SYS_CS0_BASE #else /* Intel Flash */ #define CONFIG_SYS_CS0_BASE 0x00000000 #define CONFIG_SYS_CS0_MASK 0x01FF0001 #define CONFIG_SYS_CS0_CTRL 0x00000D60 /* Atmel Flash */ #define CONFIG_SYS_CS1_BASE 0x04000000 #define CONFIG_SYS_CS1_MASK 0x00070001 #define CONFIG_SYS_CS1_CTRL 0x00001140 #define <API key> CONFIG_SYS_CS1_BASE #endif /* CPLD */ #define CONFIG_SYS_CS2_BASE 0x08000000 #define CONFIG_SYS_CS2_MASK 0x00070001 #define CONFIG_SYS_CS2_CTRL 0x003f1140 /* FPGA */ #define CONFIG_SYS_CS3_BASE 0x09000000 #define CONFIG_SYS_CS3_MASK 0x00070001 #define CONFIG_SYS_CS3_CTRL 0x00000020 #endif /* _M54455EVB_H */
<?php PHPExcel_Autoloader::register(); // As we always try to run the autoloader before anything else, we can use it to do a few // simple checks and initialisations //<API key>::register(); // check mbstring.func_overload if (ini_get('mbstring.func_overload') & 2) { throw new PHPExcel_Exception('Multibyte function overloading in PHP must be disabled for string functions (2).'); } <API key>::buildCharacterSets(); class PHPExcel_Autoloader { /** * Register the Autoloader with SPL * */ public static function register() { if (function_exists('__autoload')) { // Register any existing autoloader function with SPL, so we don't get any clashes <API key>('__autoload'); } // Register ourselves with SPL if (version_compare(PHP_VERSION, '5.3.0') >= 0) { return <API key>(array('PHPExcel_Autoloader', 'load'), true, true); } else { return <API key>(array('PHPExcel_Autoloader', 'load')); } } /** * Autoload a class identified by name * * @param string $pClassName Name of the object to load */ public static function load($pClassName) { if ((class_exists($pClassName, false)) || (strpos($pClassName, 'PHPExcel') !== 0)) { // Either already loaded, or not a PHPExcel class request return false; } $pClassFilePath = PHPEXCEL_ROOT . str_replace('_', DIRECTORY_SEPARATOR, $pClassName) . '.php'; if ((file_exists($pClassFilePath) === false) || (is_readable($pClassFilePath) === false)) { // Can't load return false; } require($pClassFilePath); } }
using Xunit; namespace System.Numerics.Tests { public class op_leftshiftTest { private static int s_samples = 10; private static Random s_random = new Random(100); [Fact] public static void RunLeftShiftTests() { byte[] tempByteArray1 = new byte[0]; byte[] tempByteArray2 = new byte[0]; // LeftShift Method - Large BigIntegers - large + Shift for (int i = 0; i < s_samples; i++) { tempByteArray1 = GetRandomByteArray(s_random); tempByteArray2 = <API key>(s_random, 2); <API key>(Print(tempByteArray2) + Print(tempByteArray1) + "b<<"); } // LeftShift Method - Large BigIntegers - small + Shift for (int i = 0; i < s_samples; i++) { tempByteArray1 = GetRandomByteArray(s_random); tempByteArray2 = new byte[] { (byte)s_random.Next(1, 32) }; <API key>(Print(tempByteArray2) + Print(tempByteArray1) + "b<<"); } // LeftShift Method - Large BigIntegers - 32 bit Shift for (int i = 0; i < s_samples; i++) { tempByteArray1 = GetRandomByteArray(s_random); tempByteArray2 = new byte[] { (byte)32 }; <API key>(Print(tempByteArray2) + Print(tempByteArray1) + "b<<"); } // LeftShift Method - Large BigIntegers - large - Shift for (int i = 0; i < s_samples; i++) { tempByteArray1 = GetRandomByteArray(s_random); tempByteArray2 = <API key>(s_random, 2); <API key>(Print(tempByteArray2) + Print(tempByteArray1) + "b<<"); } // LeftShift Method - Large BigIntegers - small - Shift for (int i = 0; i < s_samples; i++) { tempByteArray1 = GetRandomByteArray(s_random); tempByteArray2 = new byte[] { unchecked((byte)s_random.Next(-31, 0)) }; <API key>(Print(tempByteArray2) + Print(tempByteArray1) + "b<<"); } // LeftShift Method - Large BigIntegers - -32 bit Shift for (int i = 0; i < s_samples; i++) { tempByteArray1 = GetRandomByteArray(s_random); tempByteArray2 = new byte[] { (byte)0xe0 }; <API key>(Print(tempByteArray2) + Print(tempByteArray1) + "b<<"); } // LeftShift Method - Large BigIntegers - 0 bit Shift for (int i = 0; i < s_samples; i++) { tempByteArray1 = GetRandomByteArray(s_random); tempByteArray2 = new byte[] { (byte)0 }; <API key>(Print(tempByteArray2) + Print(tempByteArray1) + "b<<"); } // LeftShift Method - Small BigIntegers - large + Shift for (int i = 0; i < s_samples; i++) { tempByteArray1 = GetRandomByteArray(s_random, 2); tempByteArray2 = <API key>(s_random, 2); <API key>(Print(tempByteArray2) + Print(tempByteArray1) + "b<<"); } // LeftShift Method - Small BigIntegers - small + Shift for (int i = 0; i < s_samples; i++) { tempByteArray1 = GetRandomByteArray(s_random, 2); tempByteArray2 = new byte[] { (byte)s_random.Next(1, 32) }; <API key>(Print(tempByteArray2) + Print(tempByteArray1) + "b<<"); } // LeftShift Method - Small BigIntegers - 32 bit Shift for (int i = 0; i < s_samples; i++) { tempByteArray1 = GetRandomByteArray(s_random, 2); tempByteArray2 = new byte[] { (byte)32 }; <API key>(Print(tempByteArray2) + Print(tempByteArray1) + "b<<"); } // LeftShift Method - Small BigIntegers - large - Shift for (int i = 0; i < s_samples; i++) { tempByteArray1 = GetRandomByteArray(s_random, 2); tempByteArray2 = <API key>(s_random, 2); <API key>(Print(tempByteArray2) + Print(tempByteArray1) + "b<<"); } // LeftShift Method - Small BigIntegers - small - Shift for (int i = 0; i < s_samples; i++) { tempByteArray1 = GetRandomByteArray(s_random, 2); tempByteArray2 = new byte[] { unchecked((byte)s_random.Next(-31, 0)) }; <API key>(Print(tempByteArray2) + Print(tempByteArray1) + "b<<"); } // LeftShift Method - Small BigIntegers - -32 bit Shift for (int i = 0; i < s_samples; i++) { tempByteArray1 = GetRandomByteArray(s_random, 2); tempByteArray2 = new byte[] { (byte)0xe0 }; <API key>(Print(tempByteArray2) + Print(tempByteArray1) + "b<<"); } // LeftShift Method - Small BigIntegers - 0 bit Shift for (int i = 0; i < s_samples; i++) { tempByteArray1 = GetRandomByteArray(s_random, 2); tempByteArray2 = new byte[] { (byte)0 }; <API key>(Print(tempByteArray2) + Print(tempByteArray1) + "b<<"); } // LeftShift Method - Positive BigIntegers - Shift to 0 for (int i = 0; i < s_samples; i++) { tempByteArray1 = <API key>(s_random, 100); tempByteArray2 = BitConverter.GetBytes(s_random.Next(-1000, -8 * tempByteArray1.Length)); <API key>(Print(tempByteArray2) + Print(tempByteArray1) + "b<<"); } // LeftShift Method - Negative BigIntegers - Shift to -1 for (int i = 0; i < s_samples; i++) { tempByteArray1 = <API key>(s_random, 100); tempByteArray2 = BitConverter.GetBytes(s_random.Next(-1000, -8 * tempByteArray1.Length)); <API key>(Print(tempByteArray2) + Print(tempByteArray1) + "b<<"); } } private static void <API key>(string opstring) { StackCalc sc = new StackCalc(opstring); while (sc.DoNextOperation()) { Assert.Equal(sc.snCalc.Peek().ToString(), sc.myCalc.Peek().ToString()); } } private static byte[] GetRandomByteArray(Random random) { return GetRandomByteArray(random, random.Next(0, 1024)); } private static byte[] GetRandomByteArray(Random random, int size) { return MyBigIntImp.GetRandomByteArray(random, size); } private static Byte[] <API key>(Random random, int size) { byte[] value = new byte[size]; for (int i = 0; i < value.Length; ++i) { value[i] = (byte)random.Next(0, 256); } value[value.Length - 1] &= 0x7F; return value; } private static Byte[] <API key>(Random random, int size) { byte[] value = new byte[size]; for (int i = 0; i < value.Length; ++i) { value[i] = (byte)random.Next(0, 256); } value[value.Length - 1] |= 0x80; return value; } private static String Print(byte[] bytes) { return MyBigIntImp.Print(bytes); } } }
<?php namespace Symfony\Component\Form\Guess; /** * Base class for guesses made by <API key> implementation * * Each instance contains a confidence value about the correctness of the guess. * Thus an instance with confidence HIGH_CONFIDENCE is more likely to be * correct than an instance with confidence LOW_CONFIDENCE. * * @author Bernhard Schussek <bernhard.schussek@symfony.com> */ abstract class Guess { /** * Marks an instance with a value that is very likely to be correct * @var integer */ const HIGH_CONFIDENCE = 2; /** * Marks an instance with a value that is likely to be correct * @var integer */ const MEDIUM_CONFIDENCE = 1; /** * Marks an instance with a value that may be correct * @var integer */ const LOW_CONFIDENCE = 0; /** * The list of allowed confidence values * @var array */ private static $confidences = array( self::HIGH_CONFIDENCE, self::MEDIUM_CONFIDENCE, self::LOW_CONFIDENCE, ); /** * The confidence about the correctness of the value * * One of HIGH_CONFIDENCE, MEDIUM_CONFIDENCE and LOW_CONFIDENCE. * * @var integer */ private $confidence; /** * Returns the guess most likely to be correct from a list of guesses * * If there are multiple guesses with the same, highest confidence, the * returned guess is any of them. * * @param array $guesses A list of guesses * * @return Guess The guess with the highest confidence */ public static function getBestGuess(array $guesses) { usort($guesses, function ($a, $b) { return $b->getConfidence() - $a->getConfidence(); }); return count($guesses) > 0 ? $guesses[0] : null; } /** * Constructor * * @param integer $confidence The confidence */ public function __construct($confidence) { if (!in_array($confidence, self::$confidences)) { throw new \<API key>(sprintf('The confidence should be one of "%s"', implode('", "', self::$confidences))); } $this->confidence = $confidence; } /** * Returns the confidence that the guessed value is correct * * @return integer One of the constants HIGH_CONFIDENCE, MEDIUM_CONFIDENCE * and LOW_CONFIDENCE */ public function getConfidence() { return $this->confidence; } }
/* HORIZONTAL */ /* increase bottom margin to fit the pips */ .<API key>.ui-slider-pips { margin-bottom: 1.4em; } /* default hide the labels and pips that arnt visible */ /* we just use css to hide incase we want to show certain */ /* labels/pips individually later */ .ui-slider-pips .ui-slider-label, .ui-slider-pips .ui-slider-pip-hide { display: none; } /* now we show any labels that we've set to show in the options */ .ui-slider-pips .ui-slider-pip-label .ui-slider-label { display: block; } /* PIP/LABEL WRAPPER */ /* position each pip absolutely just below the default slider */ /* and also prevent accidental selection */ .ui-slider-pips .ui-slider-pip { width: 2em; height: 1em; line-height: 1em; position: absolute; font-size: 0.8em; color: #999; overflow: visible; text-align: center; top: 20px; left: 20px; margin-left: -1em; cursor: pointer; -<API key>: none; -webkit-user-select: none; -khtml-user-select: none; -moz-user-select: none; -ms-user-select: none; user-select: none; } .ui-state-disabled.ui-slider-pips .ui-slider-pip { cursor: default; } /* little pip/line position & size */ .ui-slider-pips .ui-slider-line { background: #999; width: 1px; height: 3px; position: absolute; left: 50%; } /* the text label postion & size */ /* it overflows so no need for width to be accurate */ .ui-slider-pips .ui-slider-label { position: absolute; top: 5px; left: 50%; margin-left: -1em; width: 2em; } /* make it easy to see when we hover a label */ .ui-slider-pips:not(.ui-slider-disabled) .ui-slider-pip:hover .ui-slider-label { color: black; font-weight: bold; } /* VERTICAL */ /* vertical slider needs right-margin, not bottom */ .ui-slider-vertical.ui-slider-pips { margin-bottom: 1em; margin-right: 2em; } /* align vertical pips left and to right of the slider */ .ui-slider-vertical.ui-slider-pips .ui-slider-pip { text-align: left; top: auto; left: 20px; margin-left: 0; margin-bottom: -0.5em; } /* vertical line/pip should be horizontal instead */ .ui-slider-vertical.ui-slider-pips .ui-slider-line { width: 3px; height: 1px; position: absolute; top: 50%; left: 0; } .ui-slider-vertical.ui-slider-pips .ui-slider-label { top: 50%; left: 0.5em; margin-left: 0; margin-top: -0.5em; width: 2em; } /* FLOATING HORIZTONAL TOOLTIPS */ /* remove the godawful looking focus outline on handle and float */ .ui-slider-float .ui-slider-handle:focus, .ui-slider-float .ui-slider-handle.ui-state-focus .ui-slider-tip-label, .ui-slider-float .ui-slider-handle:focus .ui-slider-tip, .ui-slider-float .ui-slider-handle.ui-state-focus .ui-slider-tip-label, .ui-slider-float .ui-slider-handle:focus .ui-slider-tip-label .ui-slider-float .ui-slider-handle.ui-state-focus .ui-slider-tip-label { outline: none; } /* style tooltips on handles and on labels */ /* also has a nice transition */ .ui-slider-float .ui-slider-tip, .ui-slider-float .ui-slider-tip-label { position: absolute; visibility: hidden; top: -40px; display: block; width: 34px; margin-left: -18px; left: 50%; height: 20px; line-height: 20px; background: white; border-radius: 3px; border: 1px solid #888; text-align: center; font-size: 12px; opacity: 0; color: #333; -<API key>: opacity, top, visibility; -<API key>: opacity, top, visibility; -<API key>: opacity, top, visibility; transition-property: opacity, top, visibility; -<API key>: ease-in; -<API key>: ease-in; -<API key>: ease-in; <API key>: ease-in; -<API key>: 200ms, 200ms, 0ms; -<API key>: 200ms, 200ms, 0ms; -<API key>: 200ms, 200ms, 0ms; transition-duration: 200ms, 200ms, 0ms; -<API key>: 0ms, 0ms, 200ms; -<API key>: 0ms, 0ms, 200ms; -ms-transition-delay: 0ms, 0ms, 200ms; transition-delay: 0ms, 0ms, 200ms; } /* show the tooltip on hover or focus */ /* also switch transition delay around */ .ui-slider-float .ui-slider-handle:hover .ui-slider-tip, .ui-slider-float .ui-slider-handle.ui-state-hover .ui-slider-tip, .ui-slider-float .ui-slider-handle:focus .ui-slider-tip, .ui-slider-float .ui-slider-handle.ui-state-focus .ui-slider-tip, .ui-slider-float .ui-slider-handle.ui-state-active .ui-slider-tip, .ui-slider-float .ui-slider-pip:hover .ui-slider-tip-label { opacity: 1; top: -30px; visibility: visible; -<API key>: ease-out; -<API key>: ease-out; -<API key>: ease-out; <API key>: ease-out; -<API key>:200ms, 200ms, 0ms; -<API key>:200ms, 200ms, 0ms; -ms-transition-delay:200ms, 200ms, 0ms; transition-delay:200ms, 200ms, 0ms; } /* put label tooltips below slider */ .ui-slider-float .ui-slider-pip .ui-slider-tip-label { top: 42px; } .ui-slider-float .ui-slider-pip:hover .ui-slider-tip-label { top: 32px; font-weight: normal; } /* give the tooltip a css triangle arrow */ .ui-slider-float .ui-slider-tip:after, .ui-slider-float .ui-slider-pip .ui-slider-tip-label:after { content: " "; width: 0; height: 0; border: 5px solid rgba(255,255,255,0); border-top-color: rgba(255,255,255,1); position: absolute; bottom: -10px; left: 50%; margin-left: -5px; } /* put a 1px border on the tooltip arrow to match tooltip border */ .ui-slider-float .ui-slider-tip:before, .ui-slider-float .ui-slider-pip .ui-slider-tip-label:before { content: " "; width: 0; height: 0; border: 5px solid rgba(255,255,255,0); border-top-color: #888; position: absolute; bottom: -11px; left: 50%; margin-left: -5px; } /* switch the arrow to top on labels */ .ui-slider-float .ui-slider-pip .ui-slider-tip-label:after { border: 5px solid rgba(255,255,255,0); border-bottom-color: rgba(255,255,255,1); top: -10px; } .ui-slider-float .ui-slider-pip .ui-slider-tip-label:before { border: 5px solid rgba(255,255,255,0); border-bottom-color: #888; top: -11px; } /* FLOATING VERTICAL TOOLTIPS */ /* tooltip floats to left of handle */ .ui-slider-vertical.ui-slider-float .ui-slider-tip, .ui-slider-vertical.ui-slider-float .ui-slider-tip-label { top: 50%; margin-top: -11px; width: 34px; margin-left: 0px; left: -60px; color: #333; -<API key>: 200ms, 200ms, 0; -<API key>: 200ms, 200ms, 0; -<API key>: 200ms, 200ms, 0; transition-duration: 200ms, 200ms, 0; -<API key>: opacity, left, visibility; -<API key>: opacity, left, visibility; -<API key>: opacity, left, visibility; transition-property: opacity, left, visibility; -<API key>: 0, 0, 200ms; -<API key>: 0, 0, 200ms; -ms-transition-delay: 0, 0, 200ms; transition-delay: 0, 0, 200ms; } .ui-slider-vertical.ui-slider-float .ui-slider-handle:hover .ui-slider-tip, .ui-slider-vertical.ui-slider-float .ui-slider-handle.ui-state-hover .ui-slider-tip, .ui-slider-vertical.ui-slider-float .ui-slider-handle:focus .ui-slider-tip, .ui-slider-vertical.ui-slider-float .ui-slider-handle.ui-state-focus .ui-slider-tip, .ui-slider-vertical.ui-slider-float .ui-slider-handle.ui-state-active .ui-slider-tip, .ui-slider-vertical.ui-slider-float .ui-slider-pip:hover .ui-slider-tip-label { top: 50%; margin-top: -11px; left: -50px; } /* put label tooltips to right of slider */ .ui-slider-vertical.ui-slider-float .ui-slider-pip .ui-slider-tip-label { left: 47px; } .ui-slider-vertical.ui-slider-float .ui-slider-pip:hover .ui-slider-tip-label { left: 37px; } /* give the tooltip a css triangle arrow */ .ui-slider-vertical.ui-slider-float .ui-slider-tip:after, .ui-slider-vertical.ui-slider-float .ui-slider-pip .ui-slider-tip-label:after { border: 5px solid rgba(255,255,255,0); border-left-color: rgba(255,255,255,1); border-top-color: transparent; position: absolute; bottom: 50%; margin-bottom: -5px; right: -10px; margin-left: 0; top: auto; left: auto; } .ui-slider-vertical.ui-slider-float .ui-slider-tip:before, .ui-slider-vertical.ui-slider-float .ui-slider-pip .ui-slider-tip-label:before { border: 5px solid rgba(255,255,255,0); border-left-color: #888; border-top-color: transparent; position: absolute; bottom: 50%; margin-bottom: -5px; right: -11px; margin-left: 0; top: auto; left: auto; } .ui-slider-vertical.ui-slider-float .ui-slider-pip .ui-slider-tip-label:after { border: 5px solid rgba(255,255,255,0); border-right-color: rgba(255,255,255,1); right: auto; left: -10px; } .ui-slider-vertical.ui-slider-float .ui-slider-pip .ui-slider-tip-label:before { border: 5px solid rgba(255,255,255,0); border-right-color: #888; right: auto; left: -11px; } /* SELECTED STATES */ /* Comment out this chuck of code if you don't want to have the new label colours shown */ .ui-slider-pips [class*=<API key>] { font-weight: bold; color: #14CA82; } .ui-slider-pips .<API key> { } .ui-slider-pips .<API key> { color: #1897C9; } .ui-slider-pips [class*=<API key>] { font-weight: bold; color: #FF7A00; } .ui-slider-pips .<API key> { color: black; } .ui-slider-pips .<API key> { } .ui-slider-pips .<API key> { color: #E70081; } .ui-slider-pips [class*=<API key>] .ui-slider-line, .ui-slider-pips .<API key> .ui-slider-line { background: black; }
// <API key>: GPL-2.0-only #include "qedi.h" #include "qedi_dbg.h" #include <linux/uaccess.h> #include <linux/debugfs.h> #include <linux/module.h> int qedi_do_not_recover; static struct dentry *qedi_dbg_root; void qedi_dbg_host_init(struct qedi_dbg_ctx *qedi, const struct qedi_debugfs_ops *dops, const struct file_operations *fops) { char host_dirname[32]; sprintf(host_dirname, "host%u", qedi->host_no); qedi->bdf_dentry = debugfs_create_dir(host_dirname, qedi_dbg_root); while (dops) { if (!(dops->name)) break; debugfs_create_file(dops->name, 0600, qedi->bdf_dentry, qedi, fops); dops++; fops++; } } void qedi_dbg_host_exit(struct qedi_dbg_ctx *qedi) { <API key>(qedi->bdf_dentry); qedi->bdf_dentry = NULL; } void qedi_dbg_init(char *drv_name) { qedi_dbg_root = debugfs_create_dir(drv_name, NULL); } void qedi_dbg_exit(void) { <API key>(qedi_dbg_root); qedi_dbg_root = NULL; } static ssize_t <API key>(struct qedi_dbg_ctx *qedi_dbg) { if (!qedi_do_not_recover) qedi_do_not_recover = 1; QEDI_INFO(qedi_dbg, QEDI_LOG_DEBUGFS, "do_not_recover=%d\n", qedi_do_not_recover); return 0; } static ssize_t <API key>(struct qedi_dbg_ctx *qedi_dbg) { if (qedi_do_not_recover) qedi_do_not_recover = 0; QEDI_INFO(qedi_dbg, QEDI_LOG_DEBUGFS, "do_not_recover=%d\n", qedi_do_not_recover); return 0; } static struct qedi_list_of_funcs <API key>[] = { { "enable", <API key> }, { "disable", <API key> }, { NULL, NULL } }; const struct qedi_debugfs_ops qedi_debugfs_ops[] = { { "gbl_ctx", NULL }, { "do_not_recover", <API key>}, { "io_trace", NULL }, { NULL, NULL } }; static ssize_t <API key>(struct file *filp, const char __user *buffer, size_t count, loff_t *ppos) { size_t cnt = 0; struct qedi_dbg_ctx *qedi_dbg = (struct qedi_dbg_ctx *)filp->private_data; struct qedi_list_of_funcs *lof = <API key>; if (*ppos) return 0; while (lof) { if (!(lof->oper_str)) break; if (!strncmp(lof->oper_str, buffer, strlen(lof->oper_str))) { cnt = lof->oper_func(qedi_dbg); break; } lof++; } return (count - cnt); } static ssize_t <API key>(struct file *filp, char __user *buffer, size_t count, loff_t *ppos) { size_t cnt = 0; if (*ppos) return 0; cnt = sprintf(buffer, "do_not_recover=%d\n", qedi_do_not_recover); cnt = min_t(int, count, cnt - *ppos); *ppos += cnt; return cnt; } static int qedi_gbl_ctx_show(struct seq_file *s, void *unused) { struct qedi_fastpath *fp = NULL; struct qed_sb_info *sb_info = NULL; struct status_block_e4 *sb = NULL; struct global_queue *que = NULL; int id; u16 prod_idx; struct qedi_ctx *qedi = s->private; unsigned long flags; seq_puts(s, " DUMP CQ CONTEXT:\n"); for (id = 0; id < MIN_NUM_CPUS_MSIX(qedi); id++) { spin_lock_irqsave(&qedi->hba_lock, flags); seq_printf(s, "=========FAST CQ PATH [%d] ==========\n", id); fp = &qedi->fp_array[id]; sb_info = fp->sb_info; sb = sb_info->sb_virt; prod_idx = (sb->pi_array[<API key>] & <API key>); seq_printf(s, "SB PROD IDX: %d\n", prod_idx); que = qedi->global_queues[fp->sb_id]; seq_printf(s, "DRV CONS IDX: %d\n", que->cq_cons_idx); seq_printf(s, "CQ complete host memory: %d\n", fp->sb_id); seq_puts(s, "=========== END ==================\n\n\n"); <API key>(&qedi->hba_lock, flags); } return 0; } static int <API key>(struct inode *inode, struct file *file) { struct qedi_dbg_ctx *qedi_dbg = inode->i_private; struct qedi_ctx *qedi = container_of(qedi_dbg, struct qedi_ctx, dbg_ctx); return single_open(file, qedi_gbl_ctx_show, qedi); } static int qedi_io_trace_show(struct seq_file *s, void *unused) { int id, idx = 0; struct qedi_ctx *qedi = s->private; struct qedi_io_log *io_log; unsigned long flags; seq_puts(s, " DUMP IO LOGS:\n"); spin_lock_irqsave(&qedi->io_trace_lock, flags); idx = qedi->io_trace_idx; for (id = 0; id < QEDI_IO_TRACE_SIZE; id++) { io_log = &qedi->io_trace_buf[idx]; seq_printf(s, "iodir-%d:", io_log->direction); seq_printf(s, "tid-0x%x:", io_log->task_id); seq_printf(s, "cid-0x%x:", io_log->cid); seq_printf(s, "lun-%d:", io_log->lun); seq_printf(s, "op-0x%02x:", io_log->op); seq_printf(s, "0x%02x%02x%02x%02x:", io_log->lba[0], io_log->lba[1], io_log->lba[2], io_log->lba[3]); seq_printf(s, "buflen-%d:", io_log->bufflen); seq_printf(s, "sgcnt-%d:", io_log->sg_count); seq_printf(s, "res-0x%08x:", io_log->result); seq_printf(s, "jif-%lu:", io_log->jiffies); seq_printf(s, "blk_req_cpu-%d:", io_log->blk_req_cpu); seq_printf(s, "req_cpu-%d:", io_log->req_cpu); seq_printf(s, "intr_cpu-%d:", io_log->intr_cpu); seq_printf(s, "blk_rsp_cpu-%d\n", io_log->blk_rsp_cpu); idx++; if (idx == QEDI_IO_TRACE_SIZE) idx = 0; } <API key>(&qedi->io_trace_lock, flags); return 0; } static int <API key>(struct inode *inode, struct file *file) { struct qedi_dbg_ctx *qedi_dbg = inode->i_private; struct qedi_ctx *qedi = container_of(qedi_dbg, struct qedi_ctx, dbg_ctx); return single_open(file, qedi_io_trace_show, qedi); } const struct file_operations qedi_dbg_fops[] = { <API key>(qedi, gbl_ctx), qedi_dbg_fileops(qedi, do_not_recover), <API key>(qedi, io_trace), { }, };
/* <API key>: GPL-2.0-or-later */ #ifndef __AD1836_H__ #define __AD1836_H__ #define AD1836_DAC_CTRL1 0 #define <API key> 2 #define <API key> 0xE0 #define <API key> (0x4 << 5) #define <API key> (0x5 << 5) #define <API key> 0x18 #define <API key> 3 #define AD1836_DAC_CTRL2 1 /* These macros are one-based. So AD183X_MUTE_LEFT(1) will return the mute bit * for the first ADC/DAC */ #define AD1836_MUTE_LEFT(x) (((x) * 2) - 2) #define AD1836_MUTE_RIGHT(x) (((x) * 2) - 1) #define AD1836_DAC_L_VOL(x) ((x) * 2) #define AD1836_DAC_R_VOL(x) (1 + ((x) * 2)) #define AD1836_ADC_CTRL1 12 #define <API key> 7 #define <API key> 8 #define AD1836_ADC_CTRL2 13 #define <API key> 0x30 #define <API key> 4 #define <API key> (7 << 6) #define <API key> (0x4 << 6) #define <API key> (0x5 << 6) #define AD1836_ADC_AUX (0x6 << 6) #define AD1836_ADC_CTRL3 14 #define AD1836_NUM_REGS 16 #define AD1836_WORD_LEN_24 0x0 #define AD1836_WORD_LEN_20 0x1 #define AD1836_WORD_LEN_16 0x2 #endif
// <API key>: GPL-2.0-or-later #include "cx88.h" #include "cx88-reg.h" #include <linux/slab.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/jiffies.h> #include <asm/div64.h> #define INT_PI ((s32)(3.141592653589 * 32768.0)) #define compat_remainder(a, b) \ ((float)(((s32)((a) * 100)) % ((s32)((b) * 100))) / 100.0) #define baseband_freq(carrier, srate, tone) ((s32)( \ (compat_remainder(carrier + tone, srate)) / srate * 2 * INT_PI)) /* * We calculate the baseband frequencies of the carrier and the pilot tones * based on the the sampling rate of the audio rds fifo. */ #define FREQ_A2_CARRIER baseband_freq(54687.5, 2689.36, 0.0) #define FREQ_A2_DUAL baseband_freq(54687.5, 2689.36, 274.1) #define FREQ_A2_STEREO baseband_freq(54687.5, 2689.36, 117.5) /* * The frequencies below are from the reference driver. They probably need * further adjustments, because they are not tested at all. You may even need * to play a bit with the registers of the chip to select the proper signal * for the input of the audio rds fifo, and measure it's sampling rate to * calculate the proper baseband frequencies... */ #define FREQ_A2M_CARRIER ((s32)(2.114516 * 32768.0)) #define FREQ_A2M_DUAL ((s32)(2.754916 * 32768.0)) #define FREQ_A2M_STEREO ((s32)(2.462326 * 32768.0)) #define FREQ_EIAJ_CARRIER ((s32)(1.963495 * 32768.0)) /* 5pi/8 */ #define FREQ_EIAJ_DUAL ((s32)(2.562118 * 32768.0)) #define FREQ_EIAJ_STEREO ((s32)(2.601053 * 32768.0)) #define FREQ_BTSC_DUAL ((s32)(1.963495 * 32768.0)) /* 5pi/8 */ #define FREQ_BTSC_DUAL_REF ((s32)(1.374446 * 32768.0)) /* 7pi/16 */ #define FREQ_BTSC_SAP ((s32)(2.471532 * 32768.0)) #define FREQ_BTSC_SAP_REF ((s32)(1.730072 * 32768.0)) /* The spectrum of the signal should be empty between these frequencies. */ #define FREQ_NOISE_START ((s32)(0.100000 * 32768.0)) #define FREQ_NOISE_END ((s32)(1.200000 * 32768.0)) static unsigned int dsp_debug; module_param(dsp_debug, int, 0644); MODULE_PARM_DESC(dsp_debug, "enable audio dsp debug messages"); #define dprintk(level, fmt, arg...) do { \ if (dsp_debug >= level) \ printk(KERN_DEBUG pr_fmt("%s: dsp:" fmt), \ __func__, ##arg); \ } while (0) static s32 int_cos(u32 x) { u32 t2, t4, t6, t8; s32 ret; u16 period = x / INT_PI; if (period % 2) return -int_cos(x - INT_PI); x = x % INT_PI; if (x > INT_PI / 2) return -int_cos(INT_PI / 2 - (x % (INT_PI / 2))); /* * Now x is between 0 and INT_PI/2. * To calculate cos(x) we use it's Taylor polinom. */ t2 = x * x / 32768 / 2; t4 = t2 * x / 32768 * x / 32768 / 3 / 4; t6 = t4 * x / 32768 * x / 32768 / 5 / 6; t8 = t6 * x / 32768 * x / 32768 / 7 / 8; ret = 32768 - t2 + t4 - t6 + t8; return ret; } static u32 int_goertzel(s16 x[], u32 N, u32 freq) { /* * We use the Goertzel algorithm to determine the power of the * given frequency in the signal */ s32 s_prev = 0; s32 s_prev2 = 0; s32 coeff = 2 * int_cos(freq); u32 i; u64 tmp; u32 divisor; for (i = 0; i < N; i++) { s32 s = x[i] + ((s64)coeff * s_prev / 32768) - s_prev2; s_prev2 = s_prev; s_prev = s; } tmp = (s64)s_prev2 * s_prev2 + (s64)s_prev * s_prev - (s64)coeff * s_prev2 * s_prev / 32768; /* * XXX: N must be low enough so that N*N fits in s32. * Else we need two divisions. */ divisor = N * N; do_div(tmp, divisor); return (u32)tmp; } static u32 freq_magnitude(s16 x[], u32 N, u32 freq) { u32 sum = int_goertzel(x, N, freq); return (u32)int_sqrt(sum); } static u32 noise_magnitude(s16 x[], u32 N, u32 freq_start, u32 freq_end) { int i; u32 sum = 0; u32 freq_step; int samples = 5; if (N > 192) { /* The last 192 samples are enough for noise detection */ x += (N - 192); N = 192; } freq_step = (freq_end - freq_start) / (samples - 1); for (i = 0; i < samples; i++) { sum += int_goertzel(x, N, freq_start); freq_start += freq_step; } return (u32)int_sqrt(sum / samples); } static s32 detect_a2_a2m_eiaj(struct cx88_core *core, s16 x[], u32 N) { s32 carrier, stereo, dual, noise; s32 carrier_freq, stereo_freq, dual_freq; s32 ret; switch (core->tvaudio) { case WW_BG: case WW_DK: carrier_freq = FREQ_A2_CARRIER; stereo_freq = FREQ_A2_STEREO; dual_freq = FREQ_A2_DUAL; break; case WW_M: carrier_freq = FREQ_A2M_CARRIER; stereo_freq = FREQ_A2M_STEREO; dual_freq = FREQ_A2M_DUAL; break; case WW_EIAJ: carrier_freq = FREQ_EIAJ_CARRIER; stereo_freq = FREQ_EIAJ_STEREO; dual_freq = FREQ_EIAJ_DUAL; break; default: pr_warn("unsupported audio mode %d for %s\n", core->tvaudio, __func__); return UNSET; } carrier = freq_magnitude(x, N, carrier_freq); stereo = freq_magnitude(x, N, stereo_freq); dual = freq_magnitude(x, N, dual_freq); noise = noise_magnitude(x, N, FREQ_NOISE_START, FREQ_NOISE_END); dprintk(1, "detect a2/a2m/eiaj: carrier=%d, stereo=%d, dual=%d, noise=%d\n", carrier, stereo, dual, noise); if (stereo > dual) ret = <API key>; else ret = <API key> | <API key>; if (core->tvaudio == WW_EIAJ) { /* EIAJ checks may need adjustments */ if ((carrier > max(stereo, dual) * 2) && (carrier < max(stereo, dual) * 6) && (carrier > 20 && carrier < 200) && (max(stereo, dual) > min(stereo, dual))) { /* * For EIAJ the carrier is always present, * so we probably don't need noise detection */ return ret; } } else { if ((carrier > max(stereo, dual) * 2) && (carrier < max(stereo, dual) * 8) && (carrier > 20 && carrier < 200) && (noise < 10) && (max(stereo, dual) > min(stereo, dual) * 2)) { return ret; } } return V4L2_TUNER_SUB_MONO; } static s32 detect_btsc(struct cx88_core *core, s16 x[], u32 N) { s32 sap_ref = freq_magnitude(x, N, FREQ_BTSC_SAP_REF); s32 sap = freq_magnitude(x, N, FREQ_BTSC_SAP); s32 dual_ref = freq_magnitude(x, N, FREQ_BTSC_DUAL_REF); s32 dual = freq_magnitude(x, N, FREQ_BTSC_DUAL); dprintk(1, "detect btsc: dual_ref=%d, dual=%d, sap_ref=%d, sap=%d\n", dual_ref, dual, sap_ref, sap); /* FIXME: Currently not supported */ return UNSET; } static s16 *read_rds_samples(struct cx88_core *core, u32 *N) { const struct sram_channel *srch = &cx88_sram_channels[SRAM_CH27]; s16 *samples; unsigned int i; unsigned int bpl = srch->fifo_size / AUD_RDS_LINES; unsigned int spl = bpl / 4; unsigned int sample_count = spl * (AUD_RDS_LINES - 1); u32 current_address = cx_read(srch->ptr1_reg); u32 offset = (current_address - srch->fifo_start + bpl); dprintk(1, "read RDS samples: current_address=%08x (offset=%08x), sample_count=%d, aud_intstat=%08x\n", current_address, current_address - srch->fifo_start, sample_count, cx_read(MO_AUD_INTSTAT)); samples = kmalloc_array(sample_count, sizeof(*samples), GFP_KERNEL); if (!samples) return NULL; *N = sample_count; for (i = 0; i < sample_count; i++) { offset = offset % (AUD_RDS_LINES * bpl); samples[i] = cx_read(srch->fifo_start + offset); offset += 4; } dprintk(2, "RDS samples dump: %*ph\n", sample_count, samples); return samples; } s32 <API key>(struct cx88_core *core) { s16 *samples; u32 N = 0; s32 ret = UNSET; /* If audio RDS fifo is disabled, we can't read the samples */ if (!(cx_read(MO_AUD_DMACNTRL) & 0x04)) return ret; if (!(cx_read(AUD_CTL) & EN_FMRADIO_EN_RDS)) return ret; /* Wait at least 500 ms after an audio standard change */ if (time_before(jiffies, core->last_change + msecs_to_jiffies(500))) return ret; samples = read_rds_samples(core, &N); if (!samples) return ret; switch (core->tvaudio) { case WW_BG: case WW_DK: case WW_EIAJ: case WW_M: ret = detect_a2_a2m_eiaj(core, samples, N); break; case WW_BTSC: ret = detect_btsc(core, samples, N); break; case WW_NONE: case WW_I: case WW_L: case WW_I2SPT: case WW_FM: case WW_I2SADC: break; } kfree(samples); if (ret != UNSET) dprintk(1, "stereo/sap detection result:%s%s%s\n", (ret & V4L2_TUNER_SUB_MONO) ? " mono" : "", (ret & <API key>) ? " stereo" : "", (ret & <API key>) ? " dual" : ""); return ret; } EXPORT_SYMBOL(<API key>);
// Boost.TypeErasure library #if !defined(<API key>) #ifndef <API key> #define <API key> #include <boost/utility/declval.hpp> #include <boost/mpl/vector.hpp> #include <boost/mpl/push_back.hpp> #include <boost/preprocessor/cat.hpp> #include <boost/preprocessor/dec.hpp> #include <boost/preprocessor/iteration/iterate.hpp> #include <boost/preprocessor/repetition/enum.hpp> #include <boost/preprocessor/repetition/enum_params.hpp> #include <boost/preprocessor/repetition/<API key>.hpp> #include <boost/preprocessor/repetition/enum_binary_params.hpp> #include <boost/preprocessor/repetition/<API key>.hpp> #include <boost/type_erasure/config.hpp> #include <boost/type_erasure/call.hpp> #include <boost/type_erasure/concept_interface.hpp> #include <boost/type_erasure/rebind_any.hpp> #include <boost/type_erasure/param.hpp> namespace boost { namespace type_erasure { template<class Sig, class F = _self> struct callable; namespace detail { template<class Sig> struct result_of_callable; } #if defined(<API key>) /** * The @ref callable concept allows an @ref any to hold function objects. * @c Sig is interpreted in the same way as for Boost.Function, except * that the arguments and return type are allowed to be placeholders. * @c F must be a @ref placeholder. * * Multiple instances of @ref callable can be used * simultaneously. Overload resolution works normally. * Note that unlike Boost.Function, @ref callable * does not provide result_type. It does, however, * support @c boost::result_of. */ template<class Sig, class F = _self> struct callable { /** * @c R is the result type of @c Sig and @c T is the argument * types of @c Sig. */ static R apply(F& f, T... arg); }; #elif !defined(<API key>) && !defined(<API key>) template<class R, class... T, class F> struct callable<R(T...), F> { static R apply(F& f, T... arg) { return f(std::forward<T>(arg)...); } }; template<class... T, class F> struct callable<void(T...), F> { static void apply(F& f, T... arg) { f(std::forward<T>(arg)...); } }; template<class R, class F, class Base, class Enable, class... T> struct concept_interface<callable<R(T...), F>, Base, F, Enable> : Base { template<class Sig> struct result : ::boost::type_erasure::detail::result_of_callable<Sig> {}; typedef void <API key>; typedef ::boost::mpl::vector<R> <API key>; typedef char (&<API key>)[1]; <API key> <API key>( typename ::boost::type_erasure::as_param<Base, T>::type...); typename ::boost::type_erasure::rebind_any<Base, R>::type operator()(typename ::boost::type_erasure::as_param<Base, T>::type... arg) { return ::boost::type_erasure::call(callable<R(T...), F>(), *this, ::std::forward<typename ::boost::type_erasure::as_param<Base, T>::type>(arg)...); } }; template<class R, class F, class Base, class Enable, class... T> struct concept_interface<callable<R(T...), const F>, Base, F, Enable> : Base { template<class Sig> struct result : ::boost::type_erasure::detail::result_of_callable<Sig> {}; typedef void <API key>; typedef ::boost::mpl::vector<R> <API key>; typedef char (&<API key>)[1]; <API key> <API key>( typename ::boost::type_erasure::as_param<Base, T>::type...) const; typename ::boost::type_erasure::rebind_any<Base, R>::type operator()( typename ::boost::type_erasure::as_param<Base, T>::type... arg) const { return ::boost::type_erasure::call(callable<R(T...), const F>(), *this, ::std::forward<typename ::boost::type_erasure::as_param<Base, T>::type>(arg)...); } }; template<class R, class F, class Base, class... T> struct concept_interface< callable<R(T...), F>, Base, F, typename Base::<API key> > : Base { typedef typename ::boost::mpl::push_back< typename Base::<API key>, R >::type <API key>; typedef char (&<API key>)[ ::boost::mpl::size<<API key>>::value]; using Base::<API key>; <API key> <API key>( typename ::boost::type_erasure::as_param<Base, T>::type...); using Base::operator(); typename ::boost::type_erasure::rebind_any<Base, R>::type operator()(typename ::boost::type_erasure::as_param<Base, T>::type... arg) { return ::boost::type_erasure::call(callable<R(T...), F>(), *this, ::std::forward<typename ::boost::type_erasure::as_param<Base, T>::type>(arg)...); } }; template<class R, class F, class Base, class... T> struct concept_interface< callable<R(T...), const F>, Base, F, typename Base::<API key> > : Base { typedef typename ::boost::mpl::push_back< typename Base::<API key>, R >::type <API key>; typedef char (&<API key>)[ ::boost::mpl::size<<API key>>::value]; using Base::<API key>; <API key> <API key>( typename ::boost::type_erasure::as_param<Base, T>::type...) const; using Base::operator(); typename ::boost::type_erasure::rebind_any<Base, R>::type operator()(typename ::boost::type_erasure::as_param<Base, T>::type... arg) const { return ::boost::type_erasure::call(callable<R(T...), const F>(), *this, ::std::forward<typename ::boost::type_erasure::as_param<Base, T>::type>(arg)...); } }; namespace detail { template<class This, class... T> struct result_of_callable<This(T...)> { typedef typename ::boost::mpl::at_c< typename This::<API key>, sizeof(::boost::declval<This>(). <API key>(::boost::declval<T>()...)) - 1 >::type type; }; } #else /** INTERNAL ONLY */ #define BOOST_PP_FILENAME_1 <boost/type_erasure/callable.hpp> /** INTERNAL ONLY */ #define <API key> (0, BOOST_PP_DEC(<API key>)) #include BOOST_PP_ITERATE() #endif } } #endif #else #define N BOOST_PP_ITERATION() #define <API key>(z, n, data) ::boost::declval<BOOST_PP_CAT(T, n)>() #define <API key>(z, n, data)\ typename ::boost::type_erasure::as_param<Base, BOOST_PP_CAT(T, n)>::type BOOST_PP_CAT(arg, n) #ifdef <API key> #define <API key>(z, n, data) BOOST_PP_CAT(BOOST_PP_TUPLE_ELEM(2, 1, data), n) #define <API key>(z, n, data) BOOST_PP_CAT(arg, n) #else #define <API key>(z, n, data) ::std::forward<BOOST_PP_CAT(BOOST_PP_TUPLE_ELEM(2, 0, data), n)>(BOOST_PP_CAT(BOOST_PP_TUPLE_ELEM(2, 1, data), n)) #define <API key>(z, n, data) ::std::forward<typename ::boost::type_erasure::as_param<Base, BOOST_PP_CAT(T, n)>::type>(BOOST_PP_CAT(arg, n)) #endif template<class R <API key>(N, class T), class F> struct callable<R(<API key>(N, T)), F> { static R apply(F& f <API key>(N, T, arg)) { return f(BOOST_PP_ENUM(N, <API key>, (T, arg))); } }; template<<API key>(N, class T) BOOST_PP_COMMA_IF(N) class F> struct callable<void(<API key>(N, T)), F> { static void apply(F& f <API key>(N, T, arg)) { f(BOOST_PP_ENUM(N, <API key>, (T, arg))); } }; template<class R <API key>(N, class T), class F, class Base, class Enable> struct concept_interface< callable<R(<API key>(N, T)), F>, Base, F, Enable > : Base { template<class Sig> struct result : ::boost::type_erasure::detail::result_of_callable<Sig> {}; typedef void <API key>; typedef ::boost::mpl::vector<R> <API key>; typedef char (&<API key>)[1]; <API key> <API key>( BOOST_PP_ENUM(N, <API key>, ~)); typename ::boost::type_erasure::rebind_any<Base, R>::type operator()(BOOST_PP_ENUM(N, <API key>, ~)) { return ::boost::type_erasure::call( callable<R(<API key>(N, T)), F>(), *this <API key>(N, <API key>, ~)); } }; template<class R <API key>(N, class T), class F, class Base, class Enable> struct concept_interface< callable<R(<API key>(N, T)), const F>, Base, F, Enable > : Base { template<class Sig> struct result : ::boost::type_erasure::detail::result_of_callable<Sig> {}; typedef void <API key>; typedef ::boost::mpl::vector<R> <API key>; typedef char (&<API key>)[1]; <API key> <API key>( BOOST_PP_ENUM(N, <API key>, ~)) const; typename ::boost::type_erasure::rebind_any<Base, R>::type operator()(BOOST_PP_ENUM(N, <API key>, ~)) const { return ::boost::type_erasure::call( callable<R(<API key>(N, T)), const F>(), *this <API key>(N, <API key>, ~)); } }; template<class R <API key>(N, class T), class F, class Base> struct concept_interface< callable<R(<API key>(N, T)), F>, Base, F, typename Base::<API key> > : Base { typedef typename ::boost::mpl::push_back< typename Base::<API key>, R >::type <API key>; typedef char (&<API key>)[ ::boost::mpl::size<<API key>>::value]; using Base::<API key>; <API key> <API key>( BOOST_PP_ENUM(N, <API key>, ~)); using Base::operator(); typename ::boost::type_erasure::rebind_any<Base, R>::type operator()(BOOST_PP_ENUM(N, <API key>, ~)) { return ::boost::type_erasure::call( callable<R(<API key>(N, T)), F>(), *this <API key>(N, <API key>, ~)); } }; template<class R <API key>(N, class T), class F, class Base> struct concept_interface< callable<R(<API key>(N, T)), const F>, Base, F, typename Base::<API key> > : Base { typedef typename ::boost::mpl::push_back< typename Base::<API key>, R >::type <API key>; typedef char (&<API key>)[ ::boost::mpl::size<<API key>>::value]; using Base::<API key>; <API key> <API key>( BOOST_PP_ENUM(N, <API key>, ~)) const; using Base::operator(); typename ::boost::type_erasure::rebind_any<Base, R>::type operator()(BOOST_PP_ENUM(N, <API key>, ~)) const { return ::boost::type_erasure::call( callable<R(<API key>(N, T)), const F>(), *this <API key>(N, <API key>, ~)); } }; namespace detail { template<class This <API key>(N, class T)> struct result_of_callable<This(<API key>(N, T))> { typedef typename ::boost::mpl::at_c< typename This::<API key>, sizeof(::boost::declval<This>(). <API key>( BOOST_PP_ENUM(N, <API key>, ~))) - 1 >::type type; }; } #undef <API key> #undef <API key> #undef N #endif
# cs.???? = currentstate, any variable on the status tab in the planner can be used. # Script = options are # Script.Sleep(ms) # Script.ChangeParam(name,value) # Script.GetParam(name) # Script.ChangeMode(mode) - same as displayed in mode setup screen 'AUTO' # Script.WaitFor(string,timeout) # Script.SendRC(channel,pwm,sendnow) print 'Start Script' for chan in range(1,9): Script.SendRC(chan,1500,False) Script.SendRC(3,Script.GetParam('RC3_MIN'),True) Script.Sleep(5000) while cs.lat == 0: print 'Waiting for GPS' Script.Sleep(1000) print 'Got GPS' jo = 10 * 13 print jo Script.SendRC(3,1000,False) Script.SendRC(4,2000,True) cs.messages.Clear() Script.WaitFor('ARMING MOTORS',30000) Script.SendRC(4,1500,True) print 'Motors Armed!' Script.SendRC(3,1700,True) while cs.alt < 50: Script.Sleep(50) Script.SendRC(5,2000,True) # acro Script.SendRC(1,2000,False) # roll Script.SendRC(3,1370,True) # throttle while cs.roll > -45: # top hald 0 - 180 Script.Sleep(5) while cs.roll < -45: # -180 - -45 Script.Sleep(5) Script.SendRC(5,1500,False) # stabalise Script.SendRC(1,1500,True) # level roll Script.Sleep(2000) # 2 sec to stabalise Script.SendRC(3,1300,True) # throttle back to land thro = 1350 # will decend while cs.alt > 0.1: Script.Sleep(300) Script.SendRC(3,1000,False) Script.SendRC(4,1000,True) Script.WaitFor('DISARMING MOTORS',30000) Script.SendRC(4,1500,True) print 'Roll complete'
#include <linux/kvm_host.h> #include <asm/kvm_mmio.h> #include <asm/kvm_emulate.h> #include <trace/events/kvm.h> #include "trace.h" static void mmio_write_buf(char *buf, unsigned int len, unsigned long data) { void *datap = NULL; union { u8 byte; u16 hword; u32 word; u64 dword; } tmp; switch (len) { case 1: tmp.byte = data; datap = &tmp.byte; break; case 2: tmp.hword = data; datap = &tmp.hword; break; case 4: tmp.word = data; datap = &tmp.word; break; case 8: tmp.dword = data; datap = &tmp.dword; break; } memcpy(buf, datap, len); } static unsigned long mmio_read_buf(char *buf, unsigned int len) { unsigned long data = 0; union { u16 hword; u32 word; u64 dword; } tmp; switch (len) { case 1: data = buf[0]; break; case 2: memcpy(&tmp.hword, buf, len); data = tmp.hword; break; case 4: memcpy(&tmp.word, buf, len); data = tmp.word; break; case 8: memcpy(&tmp.dword, buf, len); data = tmp.dword; break; } return data; } /** * <API key> -- Handle MMIO loads after user space emulation * @vcpu: The VCPU pointer * @run: The VCPU run struct containing the mmio data * * This should only be called after returning from userspace for MMIO load * emulation. */ int <API key>(struct kvm_vcpu *vcpu, struct kvm_run *run) { unsigned long data; unsigned int len; int mask; if (!run->mmio.is_write) { len = run->mmio.len; if (len > sizeof(unsigned long)) return -EINVAL; data = mmio_read_buf(run->mmio.data, len); if (vcpu->arch.mmio_decode.sign_extend && len < sizeof(unsigned long)) { mask = 1U << ((len * 8) - 1); data = (data ^ mask) - mask; } trace_kvm_mmio(KVM_TRACE_MMIO_READ, len, run->mmio.phys_addr, data); data = <API key>(vcpu, data, len); vcpu_set_reg(vcpu, vcpu->arch.mmio_decode.rt, data); } return 0; } static int decode_hsr(struct kvm_vcpu *vcpu, bool *is_write, int *len) { unsigned long rt; int access_size; bool sign_extend; if (<API key>(vcpu)) { /* cache operation on I/O addr, tell guest unsupported */ kvm_inject_dabt(vcpu, kvm_vcpu_get_hfar(vcpu)); return 1; } if (<API key>(vcpu)) { /* page table accesses IO mem: tell guest to fix its TTBR */ kvm_inject_dabt(vcpu, kvm_vcpu_get_hfar(vcpu)); return 1; } access_size = <API key>(vcpu); if (unlikely(access_size < 0)) return access_size; *is_write = <API key>(vcpu); sign_extend = <API key>(vcpu); rt = <API key>(vcpu); *len = access_size; vcpu->arch.mmio_decode.sign_extend = sign_extend; vcpu->arch.mmio_decode.rt = rt; /* * The MMIO instruction is emulated and should not be re-executed * in the guest. */ kvm_skip_instr(vcpu, <API key>(vcpu)); return 0; } int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run, phys_addr_t fault_ipa) { unsigned long data; unsigned long rt; int ret; bool is_write; int len; u8 data_buf[8]; /* * Prepare MMIO operation. First decode the syndrome data we get * from the CPU. Then try if some in-kernel emulation feels * responsible, otherwise let user space do its magic. */ if (<API key>(vcpu)) { ret = decode_hsr(vcpu, &is_write, &len); if (ret) return ret; } else { kvm_err("load/store instruction decoding not implemented\n"); return -ENOSYS; } rt = vcpu->arch.mmio_decode.rt; if (is_write) { data = <API key>(vcpu, vcpu_get_reg(vcpu, rt), len); trace_kvm_mmio(<API key>, len, fault_ipa, data); mmio_write_buf(data_buf, len, data); ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, fault_ipa, len, data_buf); } else { trace_kvm_mmio(<API key>, len, fault_ipa, 0); ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, fault_ipa, len, data_buf); } /* Now prepare kvm_run for the potential return to userland. */ run->mmio.is_write = is_write; run->mmio.phys_addr = fault_ipa; run->mmio.len = len; if (is_write) memcpy(run->mmio.data, data_buf, len); if (!ret) { /* We handled the access successfully in the kernel. */ vcpu->stat.mmio_exit_kernel++; <API key>(vcpu, run); return 1; } else { vcpu->stat.mmio_exit_user++; } run->exit_reason = KVM_EXIT_MMIO; return 0; }
cr.define('cr.ui', function() { // require cr.ui.define // require cr.ui.limitInputWidth /** * The number of pixels to indent per level. * @type {number} * @const */ var INDENT = 20; /** * Returns the computed style for an element. * @param {!Element} el The element to get the computed style for. * @return {!CSSStyleDeclaration} The computed style. */ function getComputedStyle(el) { return el.ownerDocument.defaultView.getComputedStyle(el); } /** * Helper function that finds the first ancestor tree item. * @param {!Element} el The element to start searching from. * @return {cr.ui.TreeItem} The found tree item or null if not found. */ function findTreeItem(el) { while (el && !(el instanceof TreeItem)) { el = el.parentNode; } return el; } /** * Creates a new tree element. * @param {Object=} opt_propertyBag Optional properties. * @constructor * @extends {HTMLElement} */ var Tree = cr.ui.define('tree'); Tree.prototype = { __proto__: HTMLElement.prototype, /** * Initializes the element. */ decorate: function() { // Make list focusable if (!this.hasAttribute('tabindex')) this.tabIndex = 0; this.addEventListener('click', this.handleClick); this.addEventListener('mousedown', this.handleMouseDown); this.addEventListener('dblclick', this.handleDblClick); this.addEventListener('keydown', this.handleKeyDown); }, /** * Returns the tree item that are children of this tree. */ get items() { return this.children; }, /** * Adds a tree item to the tree. * @param {!cr.ui.TreeItem} treeItem The item to add. */ add: function(treeItem) { this.addAt(treeItem, 0xffffffff); }, /** * Adds a tree item at the given index. * @param {!cr.ui.TreeItem} treeItem The item to add. * @param {number} index The index where we want to add the item. */ addAt: function(treeItem, index) { this.insertBefore(treeItem, this.children[index]); treeItem.setDepth_(this.depth + 1); }, /** * Removes a tree item child. * @param {!cr.ui.TreeItem} treeItem The tree item to remove. */ remove: function(treeItem) { this.removeChild(treeItem); }, /** * The depth of the node. This is 0 for the tree itself. * @type {number} */ get depth() { return 0; }, /** * Handles click events on the tree and forwards the event to the relevant * tree items as necesary. * @param {Event} e The click event object. */ handleClick: function(e) { var treeItem = findTreeItem(e.target); if (treeItem) treeItem.handleClick(e); }, handleMouseDown: function(e) { if (e.button == 2) // right this.handleClick(e); }, /** * Handles double click events on the tree. * @param {Event} e The dblclick event object. */ handleDblClick: function(e) { var treeItem = findTreeItem(e.target); if (treeItem) treeItem.expanded = !treeItem.expanded; }, /** * Handles keydown events on the tree and updates selection and exanding * of tree items. * @param {Event} e The click event object. */ handleKeyDown: function(e) { var itemToSelect; if (e.ctrlKey) return; var item = this.selectedItem; if (!item) return; var rtl = getComputedStyle(item).direction == 'rtl'; switch (e.keyIdentifier) { case 'Up': itemToSelect = item ? getPrevious(item) : this.items[this.items.length - 1]; break; case 'Down': itemToSelect = item ? getNext(item) : this.items[0]; break; case 'Left': case 'Right': // Don't let back/forward keyboard shortcuts be used. if (!cr.isMac && e.altKey || cr.isMac && e.metaKey) break; if (e.keyIdentifier == 'Left' && !rtl || e.keyIdentifier == 'Right' && rtl) { if (item.expanded) item.expanded = false; else itemToSelect = findTreeItem(item.parentNode); } else { if (!item.expanded) item.expanded = true; else itemToSelect = item.items[0]; } break; case 'Home': itemToSelect = this.items[0]; break; case 'End': itemToSelect = this.items[this.items.length - 1]; break; } if (itemToSelect) { itemToSelect.selected = true; e.preventDefault(); } }, /** * The selected tree item or null if none. * @type {cr.ui.TreeItem} */ get selectedItem() { return this.selectedItem_ || null; }, set selectedItem(item) { var oldSelectedItem = this.selectedItem_; if (oldSelectedItem != item) { // Set the selectedItem_ before deselecting the old item since we only // want one change when moving between items. this.selectedItem_ = item; if (oldSelectedItem) oldSelectedItem.selected = false; if (item) item.selected = true; cr.dispatchSimpleEvent(this, 'change'); } }, /** * @return {!ClientRect} The rect to use for the context menu. */ <API key>: function() { // TODO(arv): Add trait support so we can share more code between trees // and lists. if (this.selectedItem) return this.selectedItem.rowElement.<API key>(); return this.<API key>(); } }; /** * Determines the visibility of icons next to the treeItem labels. If set to * 'hidden', no space is reserved for icons and no icons are displayed next * to treeItem labels. If set to 'parent', folder icons will be displayed * next to expandable parent nodes. If set to 'all' folder icons will be * displayed next to all nodes. Icons can be set using the treeItem's icon * property. */ cr.defineProperty(Tree, 'iconVisibility', cr.PropertyKind.ATTR); /** * This is used as a blueprint for new tree item elements. * @type {!HTMLElement} */ var treeItemProto = (function() { var treeItem = cr.doc.createElement('div'); treeItem.className = 'tree-item'; treeItem.innerHTML = '<div class=tree-row>' + '<span class=expand-icon></span>' + '<span class=tree-label></span>' + '</div>' + '<div class=tree-children></div>'; treeItem.setAttribute('role', 'treeitem'); return treeItem; })(); /** * Creates a new tree item. * @param {Object=} opt_propertyBag Optional properties. * @constructor * @extends {HTMLElement} */ var TreeItem = cr.ui.define(function() { return treeItemProto.cloneNode(true); }); TreeItem.prototype = { __proto__: HTMLElement.prototype, /** * Initializes the element. */ decorate: function() { }, /** * The tree items children. */ get items() { return this.lastElementChild.children; }, /** * The depth of the tree item. * @type {number} */ depth_: 0, get depth() { return this.depth_; }, /** * Sets the depth. * @param {number} depth The new depth. * @private */ setDepth_: function(depth) { if (depth != this.depth_) { this.rowElement.style.WebkitPaddingStart = Math.max(0, depth - 1) * INDENT + 'px'; this.depth_ = depth; var items = this.items; for (var i = 0, item; item = items[i]; i++) { item.setDepth_(depth + 1); } } }, /** * Adds a tree item as a child. * @param {!cr.ui.TreeItem} child The child to add. */ add: function(child) { this.addAt(child, 0xffffffff); }, /** * Adds a tree item as a child at a given index. * @param {!cr.ui.TreeItem} child The child to add. * @param {number} index The index where to add the child. */ addAt: function(child, index) { this.lastElementChild.insertBefore(child, this.items[index]); if (this.items.length == 1) this.hasChildren = true; child.setDepth_(this.depth + 1); }, /** * Removes a child. * @param {!cr.ui.TreeItem} child The tree item child to remove. */ remove: function(child) { // If we removed the selected item we should become selected. var tree = this.tree; var selectedItem = tree.selectedItem; if (selectedItem && child.contains(selectedItem)) this.selected = true; this.lastElementChild.removeChild(child); if (this.items.length == 0) this.hasChildren = false; }, /** * The parent tree item. * @type {!cr.ui.Tree|cr.ui.TreeItem} */ get parentItem() { var p = this.parentNode; while (p && !(p instanceof TreeItem) && !(p instanceof Tree)) { p = p.parentNode; } return p; }, /** * The tree that the tree item belongs to or null of no added to a tree. * @type {cr.ui.Tree} */ get tree() { var t = this.parentItem; while (t && !(t instanceof Tree)) { t = t.parentItem; } return t; }, /** * Whether the tree item is expanded or not. * @type {boolean} */ get expanded() { return this.hasAttribute('expanded'); }, set expanded(b) { if (this.expanded == b) return; var treeChildren = this.lastElementChild; if (b) { if (this.mayHaveChildren_) { this.setAttribute('expanded', ''); treeChildren.setAttribute('expanded', ''); cr.dispatchSimpleEvent(this, 'expand', true); this.<API key>(false); } } else { var tree = this.tree; if (tree && !this.selected) { var oldSelected = tree.selectedItem; if (oldSelected && this.contains(oldSelected)) this.selected = true; } this.removeAttribute('expanded'); treeChildren.removeAttribute('expanded'); cr.dispatchSimpleEvent(this, 'collapse', true); } }, /** * Expands all parent items. */ reveal: function() { var pi = this.parentItem; while (pi && !(pi instanceof Tree)) { pi.expanded = true; pi = pi.parentItem; } }, /** * The element representing the row that gets highlighted. * @type {!HTMLElement} */ get rowElement() { return this.firstElementChild; }, /** * The element containing the label text and the icon. * @type {!HTMLElement} */ get labelElement() { return this.firstElementChild.lastElementChild; }, /** * The label text. * @type {string} */ get label() { return this.labelElement.textContent; }, set label(s) { this.labelElement.textContent = s; }, /** * The URL for the icon. * @type {string} */ get icon() { return getComputedStyle(this.labelElement).backgroundImage.slice(4, -1); }, set icon(icon) { return this.labelElement.style.backgroundImage = url(icon); }, /** * Whether the tree item is selected or not. * @type {boolean} */ get selected() { return this.hasAttribute('selected'); }, set selected(b) { if (this.selected == b) return; var rowItem = this.firstElementChild; var tree = this.tree; if (b) { this.setAttribute('selected', ''); rowItem.setAttribute('selected', ''); this.reveal(); this.labelElement.<API key>(false); if (tree) tree.selectedItem = this; } else { this.removeAttribute('selected'); rowItem.removeAttribute('selected'); if (tree && tree.selectedItem == this) tree.selectedItem = null; } }, /** * Whether the tree item has children. * @type {boolean} */ get mayHaveChildren_() { return this.hasAttribute('may-have-children'); }, set mayHaveChildren_(b) { var rowItem = this.firstElementChild; if (b) { this.setAttribute('may-have-children', ''); rowItem.setAttribute('may-have-children', ''); } else { this.removeAttribute('may-have-children'); rowItem.removeAttribute('may-have-children'); } }, /** * Whether the tree item has children. * @type {boolean} */ get hasChildren() { return !!this.items[0]; }, /** * Whether the tree item has children. * @type {boolean} */ set hasChildren(b) { var rowItem = this.firstElementChild; this.setAttribute('has-children', b); rowItem.setAttribute('has-children', b); if (b) this.mayHaveChildren_ = true; }, /** * Called when the user clicks on a tree item. This is forwarded from the * cr.ui.Tree. * @param {Event} e The click event. */ handleClick: function(e) { if (e.target.className == 'expand-icon') this.expanded = !this.expanded; else this.selected = true; }, /** * Makes the tree item user editable. If the user renamed the item a * bubbling {@code rename} event is fired. * @type {boolean} */ set editing(editing) { var oldEditing = this.editing; if (editing == oldEditing) return; var self = this; var labelEl = this.labelElement; var text = this.label; var input; // Handles enter and escape which trigger reset and commit respectively. function handleKeydown(e) { // Make sure that the tree does not handle the key. e.stopPropagation(); // Calling tree.focus blurs the input which will make the tree item // non editable. switch (e.keyIdentifier) { case 'U+001B': // Esc input.value = text; // fall through case 'Enter': self.tree.focus(); } } function stopPropagation(e) { e.stopPropagation(); } if (editing) { this.selected = true; this.setAttribute('editing', ''); this.draggable = false; // We create an input[type=text] and copy over the label value. When // the input loses focus we set editing to false again. input = this.ownerDocument.createElement('input'); input.value = text; if (labelEl.firstChild) labelEl.replaceChild(input, labelEl.firstChild); else labelEl.appendChild(input); input.addEventListener('keydown', handleKeydown); input.addEventListener('blur', (function() { this.editing = false; }).bind(this)); // Make sure that double clicks do not expand and collapse the tree // item. var eventsToStop = ['mousedown', 'mouseup', 'contextmenu', 'dblclick']; eventsToStop.forEach(function(type) { input.addEventListener(type, stopPropagation); }); // Wait for the input element to recieve focus before sizing it. var rowElement = this.rowElement; function onFocus() { input.removeEventListener('focus', onFocus); // 20 = the padding and border of the tree-row cr.ui.limitInputWidth(input, rowElement, 100); } input.addEventListener('focus', onFocus); input.focus(); input.select(); this.oldLabel_ = text; } else { this.removeAttribute('editing'); this.draggable = true; input = labelEl.firstChild; var value = input.value; if (/^\s*$/.test(value)) { labelEl.textContent = this.oldLabel_; } else { labelEl.textContent = value; if (value != this.oldLabel_) { cr.dispatchSimpleEvent(this, 'rename', true); } } delete this.oldLabel_; } }, get editing() { return this.hasAttribute('editing'); } }; /** * Helper function that returns the next visible tree item. * @param {cr.ui.TreeItem} item The tree item. * @return {cr.ui.TreeItem} The found item or null. */ function getNext(item) { if (item.expanded) { var firstChild = item.items[0]; if (firstChild) { return firstChild; } } return getNextHelper(item); } /** * Another helper function that returns the next visible tree item. * @param {cr.ui.TreeItem} item The tree item. * @return {cr.ui.TreeItem} The found item or null. */ function getNextHelper(item) { if (!item) return null; var nextSibling = item.nextElementSibling; if (nextSibling) { return nextSibling; } return getNextHelper(item.parentItem); } /** * Helper function that returns the previous visible tree item. * @param {cr.ui.TreeItem} item The tree item. * @return {cr.ui.TreeItem} The found item or null. */ function getPrevious(item) { var previousSibling = item.<API key>; return previousSibling ? getLastHelper(previousSibling) : item.parentItem; } /** * Helper function that returns the last visible tree item in the subtree. * @param {cr.ui.TreeItem} item The item to find the last visible item for. * @return {cr.ui.TreeItem} The found item or null. */ function getLastHelper(item) { if (!item) return null; if (item.expanded && item.hasChildren) { var lastChild = item.items[item.items.length - 1]; return getLastHelper(lastChild); } return item; } // Export return { Tree: Tree, TreeItem: TreeItem }; });
<!DOCTYPE html> <html> <head> <title>Inline text in the top element</title> <meta http-equiv="Content-Type" content="text/html; charset=UTF-8"> <script type="text/javascript" src="../test.js"></script> <style> span { color:blue; } p { background-color: green; } </style> </head> <body> Some inline text <span> followed by text in span </span> followed by more inline text. <p>Then a block level element.</p> Then more inline text. </body> </html>
(function (factory) { if (typeof module === 'object' && module.exports) { module.exports = factory; } else { factory(Highcharts); } }(function (H) { var seriesTypes = H.seriesTypes, map = H.map, merge = H.merge, extend = H.extend, extendClass = H.extendClass, defaultOptions = H.getOptions(), plotOptions = defaultOptions.plotOptions, noop = function () { }, each = H.each, grep = H.grep, pick = H.pick, Series = H.Series, stableSort = H.stableSort, Color = H.Color, eachObject = function (list, func, context) { var key; context = context || this; for (key in list) { if (list.hasOwnProperty(key)) { func.call(context, list[key], key, list); } } }, reduce = function (arr, func, previous, context) { context = context || this; arr = arr || []; // @note should each be able to handle empty values automatically? each(arr, function (current, i) { previous = func.call(context, previous, current, i, arr); }); return previous; }, // @todo find correct name for this function. recursive = function (item, func, context) { var next; context = context || this; next = func.call(context, item); if (next !== false) { recursive(next, func, context); } }; // Define default options plotOptions.treemap = merge(plotOptions.scatter, { showInLegend: false, marker: false, borderColor: '#E0E0E0', borderWidth: 1, dataLabels: { enabled: true, defer: false, verticalAlign: 'middle', formatter: function () { // #2945 return this.point.name || this.point.id; }, inside: true }, tooltip: { headerFormat: '', pointFormat: '<b>{point.name}</b>: {point.node.val}</b><br/>' }, layoutAlgorithm: 'sliceAndDice', <API key>: 'vertical', <API key>: false, levelIsConstant: true, states: { hover: { borderColor: '#A0A0A0', brightness: seriesTypes.heatmap ? 0 : 0.1, shadow: false } }, drillUpButton: { position: { align: 'right', x: -10, y: 10 } } }); // Stolen from heatmap var colorSeriesMixin = { // mapping between SVG attributes and the corresponding options pointAttrToOptions: {}, pointArrayMap: ['value'], axisTypes: seriesTypes.heatmap ? ['xAxis', 'yAxis', 'colorAxis'] : ['xAxis', 'yAxis'], optionalAxis: 'colorAxis', getSymbol: noop, parallelArrays: ['x', 'y', 'value', 'colorValue'], colorKey: 'colorValue', // Point color option key translateColors: seriesTypes.heatmap && seriesTypes.heatmap.prototype.translateColors }; // The Treemap series type seriesTypes.treemap = extendClass(seriesTypes.scatter, merge(colorSeriesMixin, { type: 'treemap', trackerGroups: ['group', 'dataLabelsGroup'], pointClass: extendClass(H.Point, { setVisible: seriesTypes.pie.prototype.pointClass.prototype.setVisible }), /** * Creates an object map from parent id to childrens index. * @param {Array} data List of points set in options. * @param {string} data[].parent Parent id of point. * @param {Array} ids List of all point ids. * @return {Object} Map from parent id to children index in data. */ getListOfParents: function (data, ids) { var listOfParents = reduce(data, function (prev, curr, i) { var parent = pick(curr.parent, ''); if (prev[parent] === undefined) { prev[parent] = []; } prev[parent].push(i); return prev; }, {}); // If parent does not exist, hoist parent to root of tree. eachObject(listOfParents, function (children, parent, list) { if ((parent !== '') && (H.inArray(parent, ids) === -1)) { each(children, function (child) { list[''].push(child); }); delete list[parent]; } }); return listOfParents; }, /** * Creates a tree structured object from the series points */ getTree: function () { var tree, series = this, allIds = map(this.data, function (d) { return d.id; }), parentList = series.getListOfParents(this.data, allIds); series.nodeMap = []; tree = series.buildNode('', -1, 0, parentList, null); recursive(this.nodeMap[this.rootNode], function (node) { var next = false, p = node.parent; node.visible = true; if (p || p === '') { next = series.nodeMap[p]; } return next; }); recursive(this.nodeMap[this.rootNode].children, function (children) { var next = false; each(children, function (child) { child.visible = true; if (child.children.length) { next = (next || []).concat(child.children); } }); return next; }); this.setTreeValues(tree); return tree; }, init: function (chart, options) { var series = this; Series.prototype.init.call(series, chart, options); if (series.options.allowDrillToNode) { series.drillTo(); } }, buildNode: function (id, i, level, list, parent) { var series = this, children = [], point = series.points[i], node, child; // Actions each((list[id] || []), function (i) { child = series.buildNode(series.points[i].id, i, (level + 1), list, id); children.push(child); }); node = { id: id, i: i, children: children, level: level, parent: parent, visible: false // @todo move this to better location }; series.nodeMap[node.id] = node; if (point) { point.node = node; } return node; }, setTreeValues: function (tree) { var series = this, options = series.options, childrenTotal = 0, children = [], val, point = series.points[tree.i]; // First give the children some values each(tree.children, function (child) { child = series.setTreeValues(child); children.push(child); if (!child.ignore) { childrenTotal += child.val; } else { // @todo Add predicate to avoid looping already ignored children recursive(child.children, function (children) { var next = false; each(children, function (node) { extend(node, { ignore: true, isLeaf: false, visible: false }); if (node.children.length) { next = (next || []).concat(node.children); } }); return next; }); } }); // Sort the children stableSort(children, function (a, b) { return a.sortIndex - b.sortIndex; }); // Set the values val = pick(point && point.value, childrenTotal); extend(tree, { children: children, childrenTotal: childrenTotal, // Ignore this node if point is not visible ignore: !(pick(point && point.visible, true) && (val > 0)), isLeaf: tree.visible && !childrenTotal, levelDynamic: (options.levelIsConstant ? tree.level : (tree.level - series.nodeMap[series.rootNode].level)), name: pick(point && point.name, ''), sortIndex: pick(point && point.sortIndex, -val), val: val }); return tree; }, /** * Recursive function which calculates the area for all children of a node. * @param {Object} node The node which is parent to the children. * @param {Object} area The rectangular area of the parent. */ <API key>: function (parent, area) { var series = this, options = series.options, level = this.levelMap[parent.levelDynamic + 1], algorithm = pick((series[level && level.layoutAlgorithm] && level.layoutAlgorithm), options.layoutAlgorithm), alternate = options.<API key>, childrenValues = [], children; // Collect all children which should be included children = grep(parent.children, function (n) { return !n.ignore; }); if (level && level.<API key>) { area.direction = level.<API key> === 'vertical' ? 0 : 1; } childrenValues = series[algorithm](area, children); each(children, function (child, index) { var values = childrenValues[index]; child.values = merge(values, { val: child.childrenTotal, direction: (alternate ? 1 - area.direction : area.direction) }); child.pointValues = merge(values, { x: (values.x / series.axisRatio), width: (values.width / series.axisRatio) }); // If node has children, then call method recursively if (child.children.length) { series.<API key>(child, child.values); } }); }, setPointValues: function () { var series = this, xAxis = series.xAxis, yAxis = series.yAxis; each(series.points, function (point) { var node = point.node, values = node.pointValues, x1, x2, y1, y2; // Points which is ignored, have no values. if (values) { x1 = Math.round(xAxis.translate(values.x, 0, 0, 0, 1)); x2 = Math.round(xAxis.translate(values.x + values.width, 0, 0, 0, 1)); y1 = Math.round(yAxis.translate(values.y, 0, 0, 0, 1)); y2 = Math.round(yAxis.translate(values.y + values.height, 0, 0, 0, 1)); // Set point values point.shapeType = 'rect'; point.shapeArgs = { x: Math.min(x1, x2), y: Math.min(y1, y2), width: Math.abs(x2 - x1), height: Math.abs(y2 - y1) }; point.plotX = point.shapeArgs.x + (point.shapeArgs.width / 2); point.plotY = point.shapeArgs.y + (point.shapeArgs.height / 2); } else { // Reset visibility delete point.plotX; delete point.plotY; } }); }, setColorRecursive: function (node, color) { var series = this, point, level; if (node) { point = series.points[node.i]; level = series.levelMap[node.levelDynamic]; // Select either point color, level color or inherited color. color = pick(point && point.options.color, level && level.color, color); if (point) { point.color = color; } // Do it all again with the children if (node.children.length) { each(node.children, function (child) { series.setColorRecursive(child, color); }); } } }, algorithmGroup: function (h, w, d, p) { this.height = h; this.width = w; this.plot = p; this.direction = d; this.startDirection = d; this.total = 0; this.nW = 0; this.lW = 0; this.nH = 0; this.lH = 0; this.elArr = []; this.lP = { total: 0, lH: 0, nH: 0, lW: 0, nW: 0, nR: 0, lR: 0, aspectRatio: function (w, h) { return Math.max((w / h), (h / w)); } }; this.addElement = function (el) { this.lP.total = this.elArr[this.elArr.length - 1]; this.total = this.total + el; if (this.direction === 0) { // Calculate last point old aspect ratio this.lW = this.nW; this.lP.lH = this.lP.total / this.lW; this.lP.lR = this.lP.aspectRatio(this.lW, this.lP.lH); // Calculate last point new aspect ratio this.nW = this.total / this.height; this.lP.nH = this.lP.total / this.nW; this.lP.nR = this.lP.aspectRatio(this.nW, this.lP.nH); } else { // Calculate last point old aspect ratio this.lH = this.nH; this.lP.lW = this.lP.total / this.lH; this.lP.lR = this.lP.aspectRatio(this.lP.lW, this.lH); // Calculate last point new aspect ratio this.nH = this.total / this.width; this.lP.nW = this.lP.total / this.nH; this.lP.nR = this.lP.aspectRatio(this.lP.nW, this.nH); } this.elArr.push(el); }; this.reset = function () { this.nW = 0; this.lW = 0; this.elArr = []; this.total = 0; }; }, algorithmCalcPoints: function (directionChange, last, group, childrenArea) { var pX, pY, pW, pH, gW = group.lW, gH = group.lH, plot = group.plot, keep, i = 0, end = group.elArr.length - 1; if (last) { gW = group.nW; gH = group.nH; } else { keep = group.elArr[group.elArr.length - 1]; } each(group.elArr, function (p) { if (last || (i < end)) { if (group.direction === 0) { pX = plot.x; pY = plot.y; pW = gW; pH = p / pW; } else { pX = plot.x; pY = plot.y; pH = gH; pW = p / pH; } childrenArea.push({ x: pX, y: pY, width: pW, height: pH }); if (group.direction === 0) { plot.y = plot.y + pH; } else { plot.x = plot.x + pW; } } i = i + 1; }); // Reset variables group.reset(); if (group.direction === 0) { group.width = group.width - gW; } else { group.height = group.height - gH; } plot.y = plot.parent.y + (plot.parent.height - group.height); plot.x = plot.parent.x + (plot.parent.width - group.width); if (directionChange) { group.direction = 1 - group.direction; } // If not last, then add uncalculated element if (!last) { group.addElement(keep); } }, <API key>: function (directionChange, parent, children) { var childrenArea = [], series = this, pTot, plot = { x: parent.x, y: parent.y, parent: parent }, direction = parent.direction, i = 0, end = children.length - 1, group = new this.algorithmGroup(parent.height, parent.width, direction, plot); // Loop through and calculate all areas each(children, function (child) { pTot = (parent.width * parent.height) * (child.val / parent.val); group.addElement(pTot); if (group.lP.nR > group.lP.lR) { series.algorithmCalcPoints(directionChange, false, group, childrenArea, plot); } // If last child, then calculate all remaining areas if (i === end) { series.algorithmCalcPoints(directionChange, true, group, childrenArea, plot); } i = i + 1; }); return childrenArea; }, algorithmFill: function (directionChange, parent, children) { var childrenArea = [], pTot, direction = parent.direction, x = parent.x, y = parent.y, width = parent.width, height = parent.height, pX, pY, pW, pH; each(children, function (child) { pTot = (parent.width * parent.height) * (child.val / parent.val); pX = x; pY = y; if (direction === 0) { pH = height; pW = pTot / pH; width = width - pW; x = x + pW; } else { pW = width; pH = pTot / pW; height = height - pH; y = y + pH; } childrenArea.push({ x: pX, y: pY, width: pW, height: pH }); if (directionChange) { direction = 1 - direction; } }); return childrenArea; }, strip: function (parent, children) { return this.<API key>(false, parent, children); }, squarified: function (parent, children) { return this.<API key>(true, parent, children); }, sliceAndDice: function (parent, children) { return this.algorithmFill(true, parent, children); }, stripes: function (parent, children) { return this.algorithmFill(false, parent, children); }, translate: function () { var pointValues, seriesArea, tree, val; // Call prototype function Series.prototype.translate.call(this); // Assign variables this.rootNode = pick(this.options.rootId, ''); // Create a object map from level to options this.levelMap = reduce(this.options.levels, function (arr, item) { arr[item.level] = item; return arr; }, {}); tree = this.tree = this.getTree(); // @todo Only if series.isDirtyData is true // Calculate plotting values. this.axisRatio = (this.xAxis.len / this.yAxis.len); this.nodeMap[''].pointValues = pointValues = { x: 0, y: 0, width: 100, height: 100 }; this.nodeMap[''].values = seriesArea = merge(pointValues, { width: (pointValues.width * this.axisRatio), direction: (this.options.<API key> === 'vertical' ? 0 : 1), val: tree.val }); this.<API key>(tree, seriesArea); // Logic for point colors if (this.colorAxis) { this.translateColors(); } else if (!this.options.colorByPoint) { this.setColorRecursive(this.tree, undefined); } // Update axis extremes according to the root node. val = this.nodeMap[this.rootNode].pointValues; this.xAxis.setExtremes(val.x, val.x + val.width, false); this.yAxis.setExtremes(val.y, val.y + val.height, false); this.xAxis.setScale(); this.yAxis.setScale(); // Assign values to points. this.setPointValues(); }, /** * Extend drawDataLabels with logic to handle custom options related to the treemap series: * - Points which is not a leaf node, has dataLabels disabled by default. * - Options set on series.levels is merged in. * - Width of the dataLabel is set to match the width of the point shape. */ drawDataLabels: function () { var series = this, points = grep(series.points, function (n) { return n.node.visible; }), options, level; each(points, function (point) { level = series.levelMap[point.node.levelDynamic]; // Set options to new object to avoid problems with scope options = { style: {} }; // If not a leaf, then label should be disabled as default if (!point.node.isLeaf) { options.enabled = false; } // If options for level exists, include them as well if (level && level.dataLabels) { options = merge(options, level.dataLabels); series._hasPointLabels = true; } // Set dataLabel width to the width of the point shape. if (point.shapeArgs) { options.style.width = point.shapeArgs.width; } // Merge custom options with point options point.dlOptions = merge(options, point.options.dataLabels); }); Series.prototype.drawDataLabels.call(this); }, alignDataLabel: seriesTypes.column.prototype.alignDataLabel, /** * Get presentational attributes */ pointAttribs: function (point, state) { var level = this.levelMap[point.node.levelDynamic] || {}, options = this.options, attr, stateOptions = (state && options.states[state]) || {}; // Set attributes by precedence. Point trumps level trumps series. Stroke width uses pick // because it can be 0. attr = { 'stroke': point.borderColor || level.borderColor || stateOptions.borderColor || options.borderColor, 'stroke-width': pick(point.borderWidth, level.borderWidth, stateOptions.borderWidth, options.borderWidth), 'dashstyle': point.borderDashStyle || level.borderDashStyle || stateOptions.borderDashStyle || options.borderDashStyle, 'fill': point.color || this.color, 'zIndex': state === 'hover' ? 1 : 0 }; if (point.node.level <= this.nodeMap[this.rootNode].level) { // Hide levels above the current view attr.fill = 'none'; attr['stroke-width'] = 0; } else if (!point.node.isLeaf) { // If not a leaf, then remove fill // @todo let users set the opacity attr.fill = pick(options.interactByLeaf, !options.allowDrillToNode) ? 'none' : Color(attr.fill).setOpacity(state === 'hover' ? 0.75 : 0.15).get(); } else if (state) { // Brighten and hoist the hover nodes attr.fill = Color(attr.fill).brighten(stateOptions.brightness).get(); } return attr; }, /** * Extending ColumnSeries drawPoints */ drawPoints: function () { var series = this, points = grep(series.points, function (n) { return n.node.visible; }); each(points, function (point) { var groupKey = 'levelGroup-' + point.node.levelDynamic; if (!series[groupKey]) { series[groupKey] = series.chart.renderer.g(groupKey) .attr({ zIndex: 1000 - point.node.levelDynamic // @todo Set the zIndex based upon the number of levels, instead of using 1000 }) .add(series.group); } point.group = series[groupKey]; // Preliminary code in prepraration for HC5 that uses pointAttribs for all series point.pointAttr = { '': series.pointAttribs(point), 'hover': series.pointAttribs(point, 'hover'), 'select': {} }; }); // Call standard drawPoints seriesTypes.column.prototype.drawPoints.call(this); // If drillToNode is allowed, set a point cursor on clickables & add drillId to point if (series.options.allowDrillToNode) { each(points, function (point) { var cursor, drillId; if (point.graphic) { drillId = point.drillId = series.options.interactByLeaf ? series.drillToByLeaf(point) : series.drillToByGroup(point); cursor = drillId ? 'pointer' : 'default'; point.graphic.css({ cursor: cursor }); } }); } }, /** * Add drilling on the suitable points */ drillTo: function () { var series = this; H.addEvent(series, 'click', function (event) { var point = event.point, drillId = point.drillId, drillName; // If a drill id is returned, add click event and cursor. if (drillId) { drillName = series.nodeMap[series.rootNode].name || series.rootNode; point.setState(''); // Remove hover series.drillToNode(drillId); series.showDrillUpButton(drillName); } }); }, /** * Finds the drill id for a parent node. * Returns false if point should not have a click event * @param {Object} point * @return {string || boolean} Drill to id or false when point should not have a click event */ drillToByGroup: function (point) { var series = this, drillId = false; if ((point.node.level - series.nodeMap[series.rootNode].level) === 1 && !point.node.isLeaf) { drillId = point.id; } return drillId; }, /** * Finds the drill id for a leaf node. * Returns false if point should not have a click event * @param {Object} point * @return {string || boolean} Drill to id or false when point should not have a click event */ drillToByLeaf: function (point) { var series = this, drillId = false, nodeParent; if ((point.node.parent !== series.rootNode) && (point.node.isLeaf)) { nodeParent = point.node; while (!drillId) { nodeParent = series.nodeMap[nodeParent.parent]; if (nodeParent.parent === series.rootNode) { drillId = nodeParent.id; } } } return drillId; }, drillUp: function () { var drillPoint = null, node, parent; if (this.rootNode) { node = this.nodeMap[this.rootNode]; if (node.parent !== null) { drillPoint = this.nodeMap[node.parent]; } else { drillPoint = this.nodeMap['']; } } if (drillPoint !== null) { this.drillToNode(drillPoint.id); if (drillPoint.id === '') { this.drillUpButton = this.drillUpButton.destroy(); } else { parent = this.nodeMap[drillPoint.parent]; this.showDrillUpButton((parent.name || parent.id)); } } }, drillToNode: function (id) { this.options.rootId = id; this.isDirty = true; // Force redraw this.chart.redraw(); }, showDrillUpButton: function (name) { var series = this, backText = (name || '< Back'), buttonOptions = series.options.drillUpButton, attr, states; if (buttonOptions.text) { backText = buttonOptions.text; } if (!this.drillUpButton) { attr = buttonOptions.theme; states = attr && attr.states; this.drillUpButton = this.chart.renderer.button( backText, null, null, function () { series.drillUp(); }, attr, states && states.hover, states && states.select ) .attr({ align: buttonOptions.position.align, zIndex: 9 }) .add() .align(buttonOptions.position, false, buttonOptions.relativeTo || 'plotBox'); } else { this.drillUpButton.attr({ text: backText }) .align(); } }, buildKDTree: noop, drawLegendSymbol: H.LegendSymbolMixin.drawRectangle, getExtremes: function () { // Get the extremes from the value data Series.prototype.getExtremes.call(this, this.colorValueData); this.valueMin = this.dataMin; this.valueMax = this.dataMax; // Get the extremes from the y data Series.prototype.getExtremes.call(this); }, getExtremesFromAll: true, bindAxes: function () { var treeAxis = { endOnTick: false, gridLineWidth: 0, lineWidth: 0, min: 0, dataMin: 0, minPadding: 0, max: 100, dataMax: 100, maxPadding: 0, startOnTick: false, title: null, tickPositions: [] }; Series.prototype.bindAxes.call(this); H.extend(this.yAxis.options, treeAxis); H.extend(this.xAxis.options, treeAxis); } })); }));
<!DOCTYPE html> <html> <head> <meta http-equiv="Content-Type" content="text/html; charset=utf-8"> <meta name="viewport" content="width=device-width, initial-scale=1.0, maximum-scale=1.0, user-scalable=0"> <meta name="<API key>" content="yes"> <title>OpenLayers OSM and Google Example</title> <link rel="stylesheet" href="../theme/default/style.css" type="text/css"> <link rel="stylesheet" href="../theme/default/google.css" type="text/css"> <link rel="stylesheet" href="style.css" type="text/css"> <script src="http://maps.google.com/maps/api/js?v=3&amp;sensor=false"></script> <script src="../lib/OpenLayers.js"></script> <script src="osm-google.js"></script> </head> <body onload="init()"> <h1 id="title">OSM and Google Together</h1> <p id="shortdesc"> Demonstrate use of an OSM layer and a Google layer as base layers. </p> <div id="tags"> openstreetmap google light </div> <div id="map" class="smallmap"></div> <div id="docs"> <p> The Google(v3) layer and the OSM are both in the same projection - spherical mercator - and can be used on a map together. See the <a href="osm-google.js" target="_blank"> osm-google.js source</a> to see how this is done. </p> </div> </body> </html>
#include <linux/hw_breakpoint.h> #include "util.h" #include "../perf.h" #include "evlist.h" #include "evsel.h" #include "parse-options.h" #include "parse-events.h" #include "exec_cmd.h" #include "string.h" #include "symbol.h" #include "cache.h" #include "header.h" #include "debugfs.h" #include "parse-events-bison.h" #define YY_EXTRA_TYPE int #include "parse-events-flex.h" #include "pmu.h" #define MAX_NAME_LEN 100 struct event_symbol { const char *symbol; const char *alias; }; #ifdef PARSER_DEBUG extern int parse_events_debug; #endif int parse_events_parse(void *data, void *scanner); static struct event_symbol event_symbols_hw[PERF_COUNT_HW_MAX] = { [<API key>] = { .symbol = "cpu-cycles", .alias = "cycles", }, [<API key>] = { .symbol = "instructions", .alias = "", }, [<API key>] = { .symbol = "cache-references", .alias = "", }, [<API key>] = { .symbol = "cache-misses", .alias = "", }, [<API key>] = { .symbol = "branch-instructions", .alias = "branches", }, [<API key>] = { .symbol = "branch-misses", .alias = "", }, [<API key>] = { .symbol = "bus-cycles", .alias = "", }, [<API key>] = { .symbol = "<API key>", .alias = "<API key>", }, [<API key>] = { .symbol = "<API key>", .alias = "idle-cycles-backend", }, [<API key>] = { .symbol = "ref-cycles", .alias = "", }, }; static struct event_symbol event_symbols_sw[PERF_COUNT_SW_MAX] = { [<API key>] = { .symbol = "cpu-clock", .alias = "", }, [<API key>] = { .symbol = "task-clock", .alias = "", }, [<API key>] = { .symbol = "page-faults", .alias = "faults", }, [<API key>] = { .symbol = "context-switches", .alias = "cs", }, [<API key>] = { .symbol = "cpu-migrations", .alias = "migrations", }, [<API key>] = { .symbol = "minor-faults", .alias = "", }, [<API key>] = { .symbol = "major-faults", .alias = "", }, [<API key>] = { .symbol = "alignment-faults", .alias = "", }, [<API key>] = { .symbol = "emulation-faults", .alias = "", }, }; #define __PERF_EVENT_FIELD(config, name) \ ((config & PERF_EVENT_##name##_MASK) >> PERF_EVENT_##name##_SHIFT) #define PERF_EVENT_RAW(config) __PERF_EVENT_FIELD(config, RAW) #define PERF_EVENT_CONFIG(config) __PERF_EVENT_FIELD(config, CONFIG) #define PERF_EVENT_TYPE(config) __PERF_EVENT_FIELD(config, TYPE) #define PERF_EVENT_ID(config) __PERF_EVENT_FIELD(config, EVENT) #define for_each_subsystem(sys_dir, sys_dirent, sys_next) \ while (!readdir_r(sys_dir, &sys_dirent, &sys_next) && sys_next) \ if (sys_dirent.d_type == DT_DIR && \ (strcmp(sys_dirent.d_name, ".")) && \ (strcmp(sys_dirent.d_name, ".."))) static int tp_event_has_id(struct dirent *sys_dir, struct dirent *evt_dir) { char evt_path[MAXPATHLEN]; int fd; snprintf(evt_path, MAXPATHLEN, "%s/%s/%s/id", tracing_events_path, sys_dir->d_name, evt_dir->d_name); fd = open(evt_path, O_RDONLY); if (fd < 0) return -EINVAL; close(fd); return 0; } #define for_each_event(sys_dirent, evt_dir, evt_dirent, evt_next) \ while (!readdir_r(evt_dir, &evt_dirent, &evt_next) && evt_next) \ if (evt_dirent.d_type == DT_DIR && \ (strcmp(evt_dirent.d_name, ".")) && \ (strcmp(evt_dirent.d_name, "..")) && \ (!tp_event_has_id(&sys_dirent, &evt_dirent))) #define MAX_EVENT_LENGTH 512 struct tracepoint_path *<API key>(u64 config) { struct tracepoint_path *path = NULL; DIR *sys_dir, *evt_dir; struct dirent *sys_next, *evt_next, sys_dirent, evt_dirent; char id_buf[24]; int fd; u64 id; char evt_path[MAXPATHLEN]; char dir_path[MAXPATHLEN]; if (<API key>(tracing_events_path)) return NULL; sys_dir = opendir(tracing_events_path); if (!sys_dir) return NULL; for_each_subsystem(sys_dir, sys_dirent, sys_next) { snprintf(dir_path, MAXPATHLEN, "%s/%s", tracing_events_path, sys_dirent.d_name); evt_dir = opendir(dir_path); if (!evt_dir) continue; for_each_event(sys_dirent, evt_dir, evt_dirent, evt_next) { snprintf(evt_path, MAXPATHLEN, "%s/%s/id", dir_path, evt_dirent.d_name); fd = open(evt_path, O_RDONLY); if (fd < 0) continue; if (read(fd, id_buf, sizeof(id_buf)) < 0) { close(fd); continue; } close(fd); id = atoll(id_buf); if (id == config) { closedir(evt_dir); closedir(sys_dir); path = zalloc(sizeof(*path)); path->system = malloc(MAX_EVENT_LENGTH); if (!path->system) { free(path); return NULL; } path->name = malloc(MAX_EVENT_LENGTH); if (!path->name) { free(path->system); free(path); return NULL; } strncpy(path->system, sys_dirent.d_name, MAX_EVENT_LENGTH); strncpy(path->name, evt_dirent.d_name, MAX_EVENT_LENGTH); return path; } } closedir(evt_dir); } closedir(sys_dir); return NULL; } const char *event_type(int type) { switch (type) { case PERF_TYPE_HARDWARE: return "hardware"; case PERF_TYPE_SOFTWARE: return "software"; case <API key>: return "tracepoint"; case PERF_TYPE_HW_CACHE: return "hardware-cache"; default: break; } return "unknown"; } static int __add_event(struct list_head **_list, int *idx, struct perf_event_attr *attr, char *name, struct cpu_map *cpus) { struct perf_evsel *evsel; struct list_head *list = *_list; if (!list) { list = malloc(sizeof(*list)); if (!list) return -ENOMEM; INIT_LIST_HEAD(list); } event_attr_init(attr); evsel = perf_evsel__new(attr, (*idx)++); if (!evsel) { free(list); return -ENOMEM; } evsel->cpus = cpus; if (name) evsel->name = strdup(name); list_add_tail(&evsel->node, list); *_list = list; return 0; } static int add_event(struct list_head **_list, int *idx, struct perf_event_attr *attr, char *name) { return __add_event(_list, idx, attr, name, NULL); } static int parse_aliases(char *str, const char *names[][<API key>], int size) { int i, j; int n, longest = -1; for (i = 0; i < size; i++) { for (j = 0; j < <API key> && names[i][j]; j++) { n = strlen(names[i][j]); if (n > longest && !strncasecmp(str, names[i][j], n)) longest = n; } if (longest > 0) return i; } return -1; } int <API key>(struct list_head **list, int *idx, char *type, char *op_result1, char *op_result2) { struct perf_event_attr attr; char name[MAX_NAME_LEN]; int cache_type = -1, cache_op = -1, cache_result = -1; char *op_result[2] = { op_result1, op_result2 }; int i, n; /* * No fallback - if we cannot get a clear cache type * then bail out: */ cache_type = parse_aliases(type, <API key>, <API key>); if (cache_type == -1) return -EINVAL; n = snprintf(name, MAX_NAME_LEN, "%s", type); for (i = 0; (i < 2) && (op_result[i]); i++) { char *str = op_result[i]; n += snprintf(name + n, MAX_NAME_LEN - n, "-%s", str); if (cache_op == -1) { cache_op = parse_aliases(str, <API key>, <API key>); if (cache_op >= 0) { if (!<API key>(cache_type, cache_op)) return -EINVAL; continue; } } if (cache_result == -1) { cache_result = parse_aliases(str, <API key>, <API key>); if (cache_result >= 0) continue; } } /* * Fall back to reads: */ if (cache_op == -1) cache_op = <API key>; /* * Fall back to accesses: */ if (cache_result == -1) cache_result = <API key>; memset(&attr, 0, sizeof(attr)); attr.config = cache_type | (cache_op << 8) | (cache_result << 16); attr.type = PERF_TYPE_HW_CACHE; return add_event(list, idx, &attr, name); } static int add_tracepoint(struct list_head **listp, int *idx, char *sys_name, char *evt_name) { struct perf_evsel *evsel; struct list_head *list = *listp; if (!list) { list = malloc(sizeof(*list)); if (!list) return -ENOMEM; INIT_LIST_HEAD(list); } evsel = perf_evsel__newtp(sys_name, evt_name, (*idx)++); if (!evsel) { free(list); return -ENOMEM; } list_add_tail(&evsel->node, list); *listp = list; return 0; } static int <API key>(struct list_head **list, int *idx, char *sys_name, char *evt_name) { char evt_path[MAXPATHLEN]; struct dirent *evt_ent; DIR *evt_dir; int ret = 0; snprintf(evt_path, MAXPATHLEN, "%s/%s", tracing_events_path, sys_name); evt_dir = opendir(evt_path); if (!evt_dir) { perror("Can't open event dir"); return -1; } while (!ret && (evt_ent = readdir(evt_dir))) { if (!strcmp(evt_ent->d_name, ".") || !strcmp(evt_ent->d_name, "..") || !strcmp(evt_ent->d_name, "enable") || !strcmp(evt_ent->d_name, "filter")) continue; if (!strglobmatch(evt_ent->d_name, evt_name)) continue; ret = add_tracepoint(list, idx, sys_name, evt_ent->d_name); } closedir(evt_dir); return ret; } static int <API key>(struct list_head **list, int *idx, char *sys_name, char *evt_name) { return strpbrk(evt_name, "*?") ? <API key>(list, idx, sys_name, evt_name) : add_tracepoint(list, idx, sys_name, evt_name); } static int <API key>(struct list_head **list, int *idx, char *sys_name, char *evt_name) { struct dirent *events_ent; DIR *events_dir; int ret = 0; events_dir = opendir(tracing_events_path); if (!events_dir) { perror("Can't open event dir"); return -1; } while (!ret && (events_ent = readdir(events_dir))) { if (!strcmp(events_ent->d_name, ".") || !strcmp(events_ent->d_name, "..") || !strcmp(events_ent->d_name, "enable") || !strcmp(events_ent->d_name, "header_event") || !strcmp(events_ent->d_name, "header_page")) continue; if (!strglobmatch(events_ent->d_name, sys_name)) continue; ret = <API key>(list, idx, events_ent->d_name, evt_name); } closedir(events_dir); return ret; } int <API key>(struct list_head **list, int *idx, char *sys, char *event) { int ret; ret = <API key>(tracing_events_path); if (ret) return ret; if (strpbrk(sys, "*?")) return <API key>(list, idx, sys, event); else return <API key>(list, idx, sys, event); } static int <API key>(const char *type, struct perf_event_attr *attr) { int i; for (i = 0; i < 3; i++) { if (!type || !type[i]) break; #define CHECK_SET_TYPE(bit) \ do { \ if (attr->bp_type & bit) \ return -EINVAL; \ else \ attr->bp_type |= bit; \ } while (0) switch (type[i]) { case 'r': CHECK_SET_TYPE(HW_BREAKPOINT_R); break; case 'w': CHECK_SET_TYPE(HW_BREAKPOINT_W); break; case 'x': CHECK_SET_TYPE(HW_BREAKPOINT_X); break; default: return -EINVAL; } } #undef CHECK_SET_TYPE if (!attr->bp_type) /* Default */ attr->bp_type = HW_BREAKPOINT_R | HW_BREAKPOINT_W; return 0; } int <API key>(struct list_head **list, int *idx, void *ptr, char *type) { struct perf_event_attr attr; memset(&attr, 0, sizeof(attr)); attr.bp_addr = (unsigned long) ptr; if (<API key>(type, &attr)) return -EINVAL; /* * We should find a nice way to override the access length * Provide some defaults for now */ if (attr.bp_type == HW_BREAKPOINT_X) attr.bp_len = sizeof(long); else attr.bp_len = HW_BREAKPOINT_LEN_4; attr.type = <API key>; attr.sample_period = 1; return add_event(list, idx, &attr, NULL); } static int config_term(struct perf_event_attr *attr, struct parse_events_term *term) { #define CHECK_TYPE_VAL(type) \ do { \ if (<API key> ## type != term->type_val) \ return -EINVAL; \ } while (0) switch (term->type_term) { case <API key>: CHECK_TYPE_VAL(NUM); attr->config = term->val.num; break; case <API key>: CHECK_TYPE_VAL(NUM); attr->config1 = term->val.num; break; case <API key>: CHECK_TYPE_VAL(NUM); attr->config2 = term->val.num; break; case <API key>: CHECK_TYPE_VAL(NUM); attr->sample_period = term->val.num; break; case <API key>: /* * TODO uncomment when the field is available * attr->branch_sample_type = term->val.num; */ break; case <API key>: CHECK_TYPE_VAL(STR); break; default: return -EINVAL; } return 0; #undef CHECK_TYPE_VAL } static int config_attr(struct perf_event_attr *attr, struct list_head *head, int fail) { struct parse_events_term *term; list_for_each_entry(term, head, list) if (config_term(attr, term) && fail) return -EINVAL; return 0; } int <API key>(struct list_head **list, int *idx, u32 type, u64 config, struct list_head *head_config) { struct perf_event_attr attr; memset(&attr, 0, sizeof(attr)); attr.type = type; attr.config = config; if (head_config && config_attr(&attr, head_config, 1)) return -EINVAL; return add_event(list, idx, &attr, NULL); } static int <API key>(struct parse_events_term *term) { return term->type_term == <API key>; } static char *pmu_event_name(struct list_head *head_terms) { struct parse_events_term *term; list_for_each_entry(term, head_terms, list) if (<API key>(term)) return term->val.str; return NULL; } int <API key>(struct list_head **list, int *idx, char *name, struct list_head *head_config) { struct perf_event_attr attr; struct perf_pmu *pmu; pmu = perf_pmu__find(name); if (!pmu) return -EINVAL; memset(&attr, 0, sizeof(attr)); if (<API key>(pmu, head_config)) return -EINVAL; /* * Configure hardcoded terms first, no need to check * return value when called with fail == 0 ;) */ config_attr(&attr, head_config, 0); if (perf_pmu__config(pmu, &attr, head_config)) return -EINVAL; return __add_event(list, idx, &attr, pmu_event_name(head_config), pmu->cpus); } int <API key>(struct list_head *list, char *event_mod) { return <API key>(list, event_mod, true); } void <API key>(char *name, struct list_head *list) { struct perf_evsel *leader; <API key>(list); leader = list_entry(list->next, struct perf_evsel, node); leader->group_name = name ? strdup(name) : NULL; } void <API key>(struct list_head *list_event, struct list_head *list_all) { /* * Called for single event definition. Update the * 'all event' list, and reinit the 'single event' * list, for next event definition. */ list_splice_tail(list_event, list_all); free(list_event); } struct event_modifier { int eu; int ek; int eh; int eH; int eG; int precise; int exclude_GH; }; static int get_event_modifier(struct event_modifier *mod, char *str, struct perf_evsel *evsel) { int eu = evsel ? evsel->attr.exclude_user : 0; int ek = evsel ? evsel->attr.exclude_kernel : 0; int eh = evsel ? evsel->attr.exclude_hv : 0; int eH = evsel ? evsel->attr.exclude_host : 0; int eG = evsel ? evsel->attr.exclude_guest : 0; int precise = evsel ? evsel->attr.precise_ip : 0; int exclude = eu | ek | eh; int exclude_GH = evsel ? evsel->exclude_GH : 0; memset(mod, 0, sizeof(*mod)); while (*str) { if (*str == 'u') { if (!exclude) exclude = eu = ek = eh = 1; eu = 0; } else if (*str == 'k') { if (!exclude) exclude = eu = ek = eh = 1; ek = 0; } else if (*str == 'h') { if (!exclude) exclude = eu = ek = eh = 1; eh = 0; } else if (*str == 'G') { if (!exclude_GH) exclude_GH = eG = eH = 1; eG = 0; } else if (*str == 'H') { if (!exclude_GH) exclude_GH = eG = eH = 1; eH = 0; } else if (*str == 'p') { precise++; /* use of precise requires exclude_guest */ if (!exclude_GH) eG = 1; } else break; ++str; } /* * precise ip: * * 0 - SAMPLE_IP can have arbitrary skid * 1 - SAMPLE_IP must have constant skid * 2 - SAMPLE_IP requested to have 0 skid * 3 - SAMPLE_IP must have 0 skid * * See also <API key> */ if (precise > 3) return -EINVAL; mod->eu = eu; mod->ek = ek; mod->eh = eh; mod->eH = eH; mod->eG = eG; mod->precise = precise; mod->exclude_GH = exclude_GH; return 0; } /* * Basic modifier sanity check to validate it contains only one * instance of any modifier (apart from 'p') present. */ static int check_modifier(char *str) { char *p = str; /* The sizeof includes 0 byte as well. */ if (strlen(str) > (sizeof("ukhGHppp") - 1)) return -1; while (*p) { if (*p != 'p' && strchr(p + 1, *p)) return -1; p++; } return 0; } int <API key>(struct list_head *list, char *str, bool add) { struct perf_evsel *evsel; struct event_modifier mod; if (str == NULL) return 0; if (check_modifier(str)) return -EINVAL; if (!add && get_event_modifier(&mod, str, NULL)) return -EINVAL; list_for_each_entry(evsel, list, node) { if (add && get_event_modifier(&mod, str, evsel)) return -EINVAL; evsel->attr.exclude_user = mod.eu; evsel->attr.exclude_kernel = mod.ek; evsel->attr.exclude_hv = mod.eh; evsel->attr.precise_ip = mod.precise; evsel->attr.exclude_host = mod.eH; evsel->attr.exclude_guest = mod.eG; evsel->exclude_GH = mod.exclude_GH; } return 0; } int parse_events_name(struct list_head *list, char *name) { struct perf_evsel *evsel; list_for_each_entry(evsel, list, node) { if (!evsel->name) evsel->name = strdup(name); } return 0; } static int <API key>(const char *str, void *data, int start_token) { YY_BUFFER_STATE buffer; void *scanner; int ret; ret = <API key>(start_token, &scanner); if (ret) return ret; buffer = <API key>(str, scanner); #ifdef PARSER_DEBUG parse_events_debug = 1; #endif ret = parse_events_parse(data, scanner); <API key>(buffer, scanner); <API key>(buffer, scanner); <API key>(scanner); return ret; } /* * parse event config string, return a list of event terms. */ int parse_events_terms(struct list_head *terms, const char *str) { struct parse_events_terms data = { .terms = NULL, }; int ret; ret = <API key>(str, &data, PE_START_TERMS); if (!ret) { list_splice(data.terms, terms); free(data.terms); return 0; } <API key>(data.terms); return ret; } int parse_events(struct perf_evlist *evlist, const char *str) { struct parse_events_evlist data = { .list = LIST_HEAD_INIT(data.list), .idx = evlist->nr_entries, }; int ret; ret = <API key>(str, &data, PE_START_EVENTS); if (!ret) { int entries = data.idx - evlist->nr_entries; <API key>(evlist, &data.list, entries); evlist->nr_groups += data.nr_groups; return 0; } /* * There are 2 users - builtin-record and builtin-test objects. * Both call perf_evlist__delete in case of error, so we dont * need to bother. */ return ret; } int parse_events_option(const struct option *opt, const char *str, int unset __maybe_unused) { struct perf_evlist *evlist = *(struct perf_evlist **)opt->value; int ret = parse_events(evlist, str); if (ret) { fprintf(stderr, "invalid or unsupported event: '%s'\n", str); fprintf(stderr, "Run 'perf list' for a list of valid events\n"); } return ret; } int parse_filter(const struct option *opt, const char *str, int unset __maybe_unused) { struct perf_evlist *evlist = *(struct perf_evlist **)opt->value; struct perf_evsel *last = NULL; if (evlist->nr_entries > 0) last = perf_evlist__last(evlist); if (last == NULL || last->attr.type != <API key>) { fprintf(stderr, "-F option should follow a -e tracepoint option\n"); return -1; } last->filter = strdup(str); if (last->filter == NULL) { fprintf(stderr, "not enough memory to hold filter string\n"); return -1; } return 0; } static const char * const <API key>[] = { "Hardware event", "Software event", "Tracepoint event", "Hardware cache event", "Raw hardware event descriptor", "Hardware breakpoint", }; /* * Print the events from <debugfs_mount_point>/tracing/events */ void <API key>(const char *subsys_glob, const char *event_glob, bool name_only) { DIR *sys_dir, *evt_dir; struct dirent *sys_next, *evt_next, sys_dirent, evt_dirent; char evt_path[MAXPATHLEN]; char dir_path[MAXPATHLEN]; if (<API key>(tracing_events_path)) return; sys_dir = opendir(tracing_events_path); if (!sys_dir) return; for_each_subsystem(sys_dir, sys_dirent, sys_next) { if (subsys_glob != NULL && !strglobmatch(sys_dirent.d_name, subsys_glob)) continue; snprintf(dir_path, MAXPATHLEN, "%s/%s", tracing_events_path, sys_dirent.d_name); evt_dir = opendir(dir_path); if (!evt_dir) continue; for_each_event(sys_dirent, evt_dir, evt_dirent, evt_next) { if (event_glob != NULL && !strglobmatch(evt_dirent.d_name, event_glob)) continue; if (name_only) { printf("%s:%s ", sys_dirent.d_name, evt_dirent.d_name); continue; } snprintf(evt_path, MAXPATHLEN, "%s:%s", sys_dirent.d_name, evt_dirent.d_name); printf(" %-50s [%s]\n", evt_path, <API key>[<API key>]); } closedir(evt_dir); } closedir(sys_dir); } /* * Check whether event is in <debugfs_mount_point>/tracing/events */ int is_valid_tracepoint(const char *event_string) { DIR *sys_dir, *evt_dir; struct dirent *sys_next, *evt_next, sys_dirent, evt_dirent; char evt_path[MAXPATHLEN]; char dir_path[MAXPATHLEN]; if (<API key>(tracing_events_path)) return 0; sys_dir = opendir(tracing_events_path); if (!sys_dir) return 0; for_each_subsystem(sys_dir, sys_dirent, sys_next) { snprintf(dir_path, MAXPATHLEN, "%s/%s", tracing_events_path, sys_dirent.d_name); evt_dir = opendir(dir_path); if (!evt_dir) continue; for_each_event(sys_dirent, evt_dir, evt_dirent, evt_next) { snprintf(evt_path, MAXPATHLEN, "%s:%s", sys_dirent.d_name, evt_dirent.d_name); if (!strcmp(evt_path, event_string)) { closedir(evt_dir); closedir(sys_dir); return 1; } } closedir(evt_dir); } closedir(sys_dir); return 0; } static void __print_events_type(u8 type, struct event_symbol *syms, unsigned max) { char name[64]; unsigned i; for (i = 0; i < max ; i++, syms++) { if (strlen(syms->alias)) snprintf(name, sizeof(name), "%s OR %s", syms->symbol, syms->alias); else snprintf(name, sizeof(name), "%s", syms->symbol); printf(" %-50s [%s]\n", name, <API key>[type]); } } void print_events_type(u8 type) { if (type == PERF_TYPE_SOFTWARE) __print_events_type(type, event_symbols_sw, PERF_COUNT_SW_MAX); else __print_events_type(type, event_symbols_hw, PERF_COUNT_HW_MAX); } int <API key>(const char *event_glob, bool name_only) { unsigned int type, op, i, printed = 0; char name[64]; for (type = 0; type < <API key>; type++) { for (op = 0; op < <API key>; op++) { /* skip invalid cache type */ if (!<API key>(type, op)) continue; for (i = 0; i < <API key>; i++) { <API key>(type, op, i, name, sizeof(name)); if (event_glob != NULL && !strglobmatch(name, event_glob)) continue; if (name_only) printf("%s ", name); else printf(" %-50s [%s]\n", name, <API key>[PERF_TYPE_HW_CACHE]); ++printed; } } } return printed; } static void print_symbol_events(const char *event_glob, unsigned type, struct event_symbol *syms, unsigned max, bool name_only) { unsigned i, printed = 0; char name[MAX_NAME_LEN]; for (i = 0; i < max; i++, syms++) { if (event_glob != NULL && !(strglobmatch(syms->symbol, event_glob) || (syms->alias && strglobmatch(syms->alias, event_glob)))) continue; if (name_only) { printf("%s ", syms->symbol); continue; } if (strlen(syms->alias)) snprintf(name, MAX_NAME_LEN, "%s OR %s", syms->symbol, syms->alias); else strncpy(name, syms->symbol, MAX_NAME_LEN); printf(" %-50s [%s]\n", name, <API key>[type]); printed++; } if (printed) printf("\n"); } /* * Print the help text for the event symbols: */ void print_events(const char *event_glob, bool name_only) { if (!name_only) { printf("\n"); printf("List of pre-defined events (to be used in -e):\n"); } print_symbol_events(event_glob, PERF_TYPE_HARDWARE, event_symbols_hw, PERF_COUNT_HW_MAX, name_only); print_symbol_events(event_glob, PERF_TYPE_SOFTWARE, event_symbols_sw, PERF_COUNT_SW_MAX, name_only); <API key>(event_glob, name_only); if (event_glob != NULL) return; if (!name_only) { printf("\n"); printf(" %-50s [%s]\n", "rNNN", <API key>[PERF_TYPE_RAW]); printf(" %-50s [%s]\n", "cpu/t1=v1[,t2=v2,t3 ...]/modifier", <API key>[PERF_TYPE_RAW]); printf(" (see 'man perf-list' on how to encode it)\n"); printf("\n"); printf(" %-50s [%s]\n", "mem:<addr>[:access]", <API key>[<API key>]); printf("\n"); } <API key>(NULL, NULL, name_only); } int <API key>(struct parse_events_term *term) { return term->type_term != <API key>; } static int new_term(struct parse_events_term **_term, int type_val, int type_term, char *config, char *str, u64 num) { struct parse_events_term *term; term = zalloc(sizeof(*term)); if (!term) return -ENOMEM; INIT_LIST_HEAD(&term->list); term->type_val = type_val; term->type_term = type_term; term->config = config; switch (type_val) { case <API key>: term->val.num = num; break; case <API key>: term->val.str = str; break; default: return -EINVAL; } *_term = term; return 0; } int <API key>(struct parse_events_term **term, int type_term, char *config, u64 num) { return new_term(term, <API key>, type_term, config, NULL, num); } int <API key>(struct parse_events_term **term, int type_term, char *config, char *str) { return new_term(term, <API key>, type_term, config, str, 0); } int <API key>(struct parse_events_term **term, char *config, unsigned idx) { struct event_symbol *sym; BUG_ON(idx >= PERF_COUNT_HW_MAX); sym = &event_symbols_hw[idx]; if (config) return new_term(term, <API key>, <API key>, config, (char *) sym->symbol, 0); else return new_term(term, <API key>, <API key>, (char *) "event", (char *) sym->symbol, 0); } int <API key>(struct parse_events_term **new, struct parse_events_term *term) { return new_term(new, term->type_val, term->type_term, term->config, term->val.str, term->val.num); } void <API key>(struct list_head *terms) { struct parse_events_term *term, *h; <API key>(term, h, terms, list) free(term); free(terms); }
// <API key>: GPL-2.0-only #include <linux/device.h> #include <linux/interrupt.h> #include <crypto/internal/hash.h> #include "common.h" #include "core.h" #include "sha.h" /* crypto hw padding constant for first operation */ #define SHA_PADDING 64 #define SHA_PADDING_MASK (SHA_PADDING - 1) static LIST_HEAD(ahash_algs); static const u32 std_iv_sha1[SHA256_DIGEST_SIZE / sizeof(u32)] = { SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4, 0, 0, 0 }; static const u32 std_iv_sha256[SHA256_DIGEST_SIZE / sizeof(u32)] = { SHA256_H0, SHA256_H1, SHA256_H2, SHA256_H3, SHA256_H4, SHA256_H5, SHA256_H6, SHA256_H7 }; static void qce_ahash_done(void *data) { struct <API key> *async_req = data; struct ahash_request *req = ahash_request_cast(async_req); struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); struct qce_sha_reqctx *rctx = ahash_request_ctx(req); struct qce_alg_template *tmpl = to_ahash_tmpl(async_req->tfm); struct qce_device *qce = tmpl->qce; struct qce_result_dump *result = qce->dma.result_buf; unsigned int digestsize = <API key>(ahash); int error; u32 status; error = <API key>(&qce->dma); if (error) dev_dbg(qce->dev, "ahash dma termination error (%d)\n", error); dma_unmap_sg(qce->dev, req->src, rctx->src_nents, DMA_TO_DEVICE); dma_unmap_sg(qce->dev, &rctx->result_sg, 1, DMA_FROM_DEVICE); memcpy(rctx->digest, result->auth_iv, digestsize); if (req->result) memcpy(req->result, result->auth_iv, digestsize); rctx->byte_count[0] = cpu_to_be32(result->auth_byte_count[0]); rctx->byte_count[1] = cpu_to_be32(result->auth_byte_count[1]); error = qce_check_status(qce, &status); if (error < 0) dev_dbg(qce->dev, "ahash operation error (%x)\n", status); req->src = rctx->src_orig; req->nbytes = rctx->nbytes_orig; rctx->last_blk = false; rctx->first_blk = false; qce->async_req_done(tmpl->qce, error); } static int <API key>(struct <API key> *async_req) { struct ahash_request *req = ahash_request_cast(async_req); struct qce_sha_reqctx *rctx = ahash_request_ctx(req); struct qce_sha_ctx *ctx = crypto_tfm_ctx(async_req->tfm); struct qce_alg_template *tmpl = to_ahash_tmpl(async_req->tfm); struct qce_device *qce = tmpl->qce; unsigned long flags = rctx->flags; int ret; if (IS_SHA_HMAC(flags)) { rctx->authkey = ctx->authkey; rctx->authklen = <API key>; } else if (IS_CMAC(flags)) { rctx->authkey = ctx->authkey; rctx->authklen = AES_KEYSIZE_128; } rctx->src_nents = sg_nents_for_len(req->src, req->nbytes); if (rctx->src_nents < 0) { dev_err(qce->dev, "Invalid numbers of src SG.\n"); return rctx->src_nents; } ret = dma_map_sg(qce->dev, req->src, rctx->src_nents, DMA_TO_DEVICE); if (ret < 0) return ret; sg_init_one(&rctx->result_sg, qce->dma.result_buf, QCE_RESULT_BUF_SZ); ret = dma_map_sg(qce->dev, &rctx->result_sg, 1, DMA_FROM_DEVICE); if (ret < 0) goto error_unmap_src; ret = qce_dma_prep_sgs(&qce->dma, req->src, rctx->src_nents, &rctx->result_sg, 1, qce_ahash_done, async_req); if (ret) goto error_unmap_dst; <API key>(&qce->dma); ret = qce_start(async_req, tmpl->crypto_alg_type, 0, 0); if (ret) goto error_terminate; return 0; error_terminate: <API key>(&qce->dma); error_unmap_dst: dma_unmap_sg(qce->dev, &rctx->result_sg, 1, DMA_FROM_DEVICE); error_unmap_src: dma_unmap_sg(qce->dev, req->src, rctx->src_nents, DMA_TO_DEVICE); return ret; } static int qce_ahash_init(struct ahash_request *req) { struct qce_sha_reqctx *rctx = ahash_request_ctx(req); struct qce_alg_template *tmpl = to_ahash_tmpl(req->base.tfm); const u32 *std_iv = tmpl->std_iv; memset(rctx, 0, sizeof(*rctx)); rctx->first_blk = true; rctx->last_blk = false; rctx->flags = tmpl->alg_flags; memcpy(rctx->digest, std_iv, sizeof(rctx->digest)); return 0; } static int qce_ahash_export(struct ahash_request *req, void *out) { struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); struct qce_sha_reqctx *rctx = ahash_request_ctx(req); unsigned long flags = rctx->flags; unsigned int digestsize = <API key>(ahash); unsigned int blocksize = <API key>(crypto_ahash_tfm(ahash)); if (IS_SHA1(flags) || IS_SHA1_HMAC(flags)) { struct sha1_state *out_state = out; out_state->count = rctx->count; <API key>((__be32 *)out_state->state, rctx->digest, digestsize); memcpy(out_state->buffer, rctx->buf, blocksize); } else if (IS_SHA256(flags) || IS_SHA256_HMAC(flags)) { struct sha256_state *out_state = out; out_state->count = rctx->count; <API key>((__be32 *)out_state->state, rctx->digest, digestsize); memcpy(out_state->buf, rctx->buf, blocksize); } else { return -EINVAL; } return 0; } static int qce_import_common(struct ahash_request *req, u64 in_count, const u32 *state, const u8 *buffer, bool hmac) { struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); struct qce_sha_reqctx *rctx = ahash_request_ctx(req); unsigned int digestsize = <API key>(ahash); unsigned int blocksize; u64 count = in_count; blocksize = <API key>(crypto_ahash_tfm(ahash)); rctx->count = in_count; memcpy(rctx->buf, buffer, blocksize); if (in_count <= blocksize) { rctx->first_blk = 1; } else { rctx->first_blk = 0; /* * For HMAC, there is a hardware padding done when first block * is set. Therefore the byte_count must be incremened by 64 * after the first block operation. */ if (hmac) count += SHA_PADDING; } rctx->byte_count[0] = (__force __be32)(count & ~SHA_PADDING_MASK); rctx->byte_count[1] = (__force __be32)(count >> 32); <API key>((__be32 *)rctx->digest, (const u8 *)state, digestsize); rctx->buflen = (unsigned int)(in_count & (blocksize - 1)); return 0; } static int qce_ahash_import(struct ahash_request *req, const void *in) { struct qce_sha_reqctx *rctx = ahash_request_ctx(req); unsigned long flags = rctx->flags; bool hmac = IS_SHA_HMAC(flags); int ret = -EINVAL; if (IS_SHA1(flags) || IS_SHA1_HMAC(flags)) { const struct sha1_state *state = in; ret = qce_import_common(req, state->count, state->state, state->buffer, hmac); } else if (IS_SHA256(flags) || IS_SHA256_HMAC(flags)) { const struct sha256_state *state = in; ret = qce_import_common(req, state->count, state->state, state->buf, hmac); } return ret; } static int qce_ahash_update(struct ahash_request *req) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct qce_sha_reqctx *rctx = ahash_request_ctx(req); struct qce_alg_template *tmpl = to_ahash_tmpl(req->base.tfm); struct qce_device *qce = tmpl->qce; struct scatterlist *sg_last, *sg; unsigned int total, len; unsigned int hash_later; unsigned int nbytes; unsigned int blocksize; blocksize = <API key>(crypto_ahash_tfm(tfm)); rctx->count += req->nbytes; /* check for buffer from previous updates and append it */ total = req->nbytes + rctx->buflen; if (total <= blocksize) { <API key>(rctx->buf + rctx->buflen, req->src, 0, req->nbytes, 0); rctx->buflen += req->nbytes; return 0; } /* save the original req structure fields */ rctx->src_orig = req->src; rctx->nbytes_orig = req->nbytes; /* * if we have data from previous update copy them on buffer. The old * data will be combined with current request bytes. */ if (rctx->buflen) memcpy(rctx->tmpbuf, rctx->buf, rctx->buflen); /* calculate how many bytes will be hashed later */ hash_later = total % blocksize; if (hash_later) { unsigned int src_offset = req->nbytes - hash_later; <API key>(rctx->buf, req->src, src_offset, hash_later, 0); } /* here nbytes is multiple of blocksize */ nbytes = total - hash_later; len = rctx->buflen; sg = sg_last = req->src; while (len < nbytes && sg) { if (len + sg_dma_len(sg) > nbytes) break; len += sg_dma_len(sg); sg_last = sg; sg = sg_next(sg); } if (!sg_last) return -EINVAL; sg_mark_end(sg_last); if (rctx->buflen) { sg_init_table(rctx->sg, 2); sg_set_buf(rctx->sg, rctx->tmpbuf, rctx->buflen); sg_chain(rctx->sg, 2, req->src); req->src = rctx->sg; } req->nbytes = nbytes; rctx->buflen = hash_later; return qce->async_req_enqueue(tmpl->qce, &req->base); } static int qce_ahash_final(struct ahash_request *req) { struct qce_sha_reqctx *rctx = ahash_request_ctx(req); struct qce_alg_template *tmpl = to_ahash_tmpl(req->base.tfm); struct qce_device *qce = tmpl->qce; if (!rctx->buflen) return 0; rctx->last_blk = true; rctx->src_orig = req->src; rctx->nbytes_orig = req->nbytes; memcpy(rctx->tmpbuf, rctx->buf, rctx->buflen); sg_init_one(rctx->sg, rctx->tmpbuf, rctx->buflen); req->src = rctx->sg; req->nbytes = rctx->buflen; return qce->async_req_enqueue(tmpl->qce, &req->base); } static int qce_ahash_digest(struct ahash_request *req) { struct qce_sha_reqctx *rctx = ahash_request_ctx(req); struct qce_alg_template *tmpl = to_ahash_tmpl(req->base.tfm); struct qce_device *qce = tmpl->qce; int ret; ret = qce_ahash_init(req); if (ret) return ret; rctx->src_orig = req->src; rctx->nbytes_orig = req->nbytes; rctx->first_blk = true; rctx->last_blk = true; return qce->async_req_enqueue(tmpl->qce, &req->base); } static int <API key>(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen) { unsigned int digestsize = <API key>(tfm); struct qce_sha_ctx *ctx = crypto_tfm_ctx(&tfm->base); struct crypto_wait wait; struct ahash_request *req; struct scatterlist sg; unsigned int blocksize; struct crypto_ahash *ahash_tfm; u8 *buf; int ret; const char *alg_name; blocksize = <API key>(crypto_ahash_tfm(tfm)); memset(ctx->authkey, 0, sizeof(ctx->authkey)); if (keylen <= blocksize) { memcpy(ctx->authkey, key, keylen); return 0; } if (digestsize == SHA1_DIGEST_SIZE) alg_name = "sha1-qce"; else if (digestsize == SHA256_DIGEST_SIZE) alg_name = "sha256-qce"; else return -EINVAL; ahash_tfm = crypto_alloc_ahash(alg_name, 0, 0); if (IS_ERR(ahash_tfm)) return PTR_ERR(ahash_tfm); req = ahash_request_alloc(ahash_tfm, GFP_KERNEL); if (!req) { ret = -ENOMEM; goto err_free_ahash; } crypto_init_wait(&wait); <API key>(req, <API key>, crypto_req_done, &wait); <API key>(ahash_tfm, ~0); buf = kzalloc(keylen + QCE_MAX_ALIGN_SIZE, GFP_KERNEL); if (!buf) { ret = -ENOMEM; goto err_free_req; } memcpy(buf, key, keylen); sg_init_one(&sg, buf, keylen); <API key>(req, &sg, ctx->authkey, keylen); ret = crypto_wait_req(crypto_ahash_digest(req), &wait); if (ret) <API key>(tfm, <API key>); kfree(buf); err_free_req: ahash_request_free(req); err_free_ahash: crypto_free_ahash(ahash_tfm); return ret; } static int qce_ahash_cra_init(struct crypto_tfm *tfm) { struct crypto_ahash *ahash = __crypto_ahash_cast(tfm); struct qce_sha_ctx *ctx = crypto_tfm_ctx(tfm); <API key>(ahash, sizeof(struct qce_sha_reqctx)); memset(ctx, 0, sizeof(*ctx)); return 0; } struct qce_ahash_def { unsigned long flags; const char *name; const char *drv_name; unsigned int digestsize; unsigned int blocksize; unsigned int statesize; const u32 *std_iv; }; static const struct qce_ahash_def ahash_def[] = { { .flags = QCE_HASH_SHA1, .name = "sha1", .drv_name = "sha1-qce", .digestsize = SHA1_DIGEST_SIZE, .blocksize = SHA1_BLOCK_SIZE, .statesize = sizeof(struct sha1_state), .std_iv = std_iv_sha1, }, { .flags = QCE_HASH_SHA256, .name = "sha256", .drv_name = "sha256-qce", .digestsize = SHA256_DIGEST_SIZE, .blocksize = SHA256_BLOCK_SIZE, .statesize = sizeof(struct sha256_state), .std_iv = std_iv_sha256, }, { .flags = QCE_HASH_SHA1_HMAC, .name = "hmac(sha1)", .drv_name = "hmac-sha1-qce", .digestsize = SHA1_DIGEST_SIZE, .blocksize = SHA1_BLOCK_SIZE, .statesize = sizeof(struct sha1_state), .std_iv = std_iv_sha1, }, { .flags = <API key>, .name = "hmac(sha256)", .drv_name = "hmac-sha256-qce", .digestsize = SHA256_DIGEST_SIZE, .blocksize = SHA256_BLOCK_SIZE, .statesize = sizeof(struct sha256_state), .std_iv = std_iv_sha256, }, }; static int <API key>(const struct qce_ahash_def *def, struct qce_device *qce) { struct qce_alg_template *tmpl; struct ahash_alg *alg; struct crypto_alg *base; int ret; tmpl = kzalloc(sizeof(*tmpl), GFP_KERNEL); if (!tmpl) return -ENOMEM; tmpl->std_iv = def->std_iv; alg = &tmpl->alg.ahash; alg->init = qce_ahash_init; alg->update = qce_ahash_update; alg->final = qce_ahash_final; alg->digest = qce_ahash_digest; alg->export = qce_ahash_export; alg->import = qce_ahash_import; if (IS_SHA_HMAC(def->flags)) alg->setkey = <API key>; alg->halg.digestsize = def->digestsize; alg->halg.statesize = def->statesize; base = &alg->halg.base; base->cra_blocksize = def->blocksize; base->cra_priority = 300; base->cra_flags = CRYPTO_ALG_ASYNC; base->cra_ctxsize = sizeof(struct qce_sha_ctx); base->cra_alignmask = 0; base->cra_module = THIS_MODULE; base->cra_init = qce_ahash_cra_init; snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name); snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", def->drv_name); INIT_LIST_HEAD(&tmpl->entry); tmpl->crypto_alg_type = <API key>; tmpl->alg_flags = def->flags; tmpl->qce = qce; ret = <API key>(alg); if (ret) { kfree(tmpl); dev_err(qce->dev, "%s registration failed\n", base->cra_name); return ret; } list_add_tail(&tmpl->entry, &ahash_algs); dev_dbg(qce->dev, "%s is registered\n", base->cra_name); return 0; } static void <API key>(struct qce_device *qce) { struct qce_alg_template *tmpl, *n; <API key>(tmpl, n, &ahash_algs, entry) { <API key>(&tmpl->alg.ahash); list_del(&tmpl->entry); kfree(tmpl); } } static int qce_ahash_register(struct qce_device *qce) { int ret, i; for (i = 0; i < ARRAY_SIZE(ahash_def); i++) { ret = <API key>(&ahash_def[i], qce); if (ret) goto err; } return 0; err: <API key>(qce); return ret; } const struct qce_algo_ops ahash_ops = { .type = <API key>, .register_algs = qce_ahash_register, .unregister_algs = <API key>, .async_req_handle = <API key>, };
(function (global, factory) { typeof exports === 'object' && typeof module !== 'undefined' ? module.exports = factory() : typeof define === 'function' && define.amd ? define(factory) : (global.InfernoRouter = factory()); }(this, function () { 'use strict'; var NO_RENDER = 'NO_RENDER'; // Runs only once in applications lifetime var isBrowser = typeof window !== 'undefined' && window.document; function isArray(obj) { return obj instanceof Array; } function isNullOrUndefined(obj) { return isUndefined(obj) || isNull(obj); } function isNull(obj) { return obj === null; } function isUndefined(obj) { return obj === undefined; } function VNode(blueprint) { this.bp = blueprint; this.dom = null; this.instance = null; this.tag = null; this.children = null; this.style = null; this.className = null; this.attrs = null; this.events = null; this.hooks = null; this.key = null; this.clipData = null; } VNode.prototype = { setAttrs: function setAttrs(attrs) { this.attrs = attrs; return this; }, setTag: function setTag(tag) { this.tag = tag; return this; }, setStyle: function setStyle(style) { this.style = style; return this; }, setClassName: function setClassName(className) { this.className = className; return this; }, setChildren: function setChildren(children) { this.children = children; return this; }, setHooks: function setHooks(hooks) { this.hooks = hooks; return this; }, setEvents: function setEvents(events) { this.events = events; return this; }, setKey: function setKey(key) { this.key = key; return this; } }; function createVNode(bp) { return new VNode(bp); } function VPlaceholder() { this.placeholder = true; this.dom = null; } function createVPlaceholder() { return new VPlaceholder(); } function constructDefaults(string, object, value) { /* eslint no-return-assign: 0 */ string.split(',').forEach(function (i) { return object[i] = value; }); } var xlinkNS = 'http: var xmlNS = 'http: var strictProps = {}; var booleanProps = {}; var namespaces = {}; var isUnitlessNumber = {}; constructDefaults('xlink:href,xlink:arcrole,xlink:actuate,xlink:role,xlink:titlef,xlink:type', namespaces, xlinkNS); constructDefaults('xml:base,xml:lang,xml:space', namespaces, xmlNS); constructDefaults('volume,value', strictProps, true); constructDefaults('muted,scoped,loop,open,checked,default,capture,disabled,selected,readonly,multiple,required,autoplay,controls,seamless,reversed,allowfullscreen,novalidate', booleanProps, true); constructDefaults('<API key>,borderImageOutset,borderImageSlice,borderImageWidth,boxFlex,boxFlexGroup,boxOrdinalGroup,columnCount,flex,flexGrow,flexPositive,flexShrink,flexNegative,flexOrder,gridRow,gridColumn,fontWeight,lineClamp,lineHeight,opacity,order,orphans,tabSize,widows,zIndex,zoom,fillOpacity,floodOpacity,stopOpacity,strokeDasharray,strokeDashoffset,strokeMiterlimit,strokeOpacity,strokeWidth,', isUnitlessNumber, true); var screenWidth = isBrowser && window.screen.width; var screenHeight = isBrowser && window.screen.height; var scrollX = 0; var scrollY = 0; var lastScrollTime = 0; if (isBrowser) { window.onscroll = function () { scrollX = window.scrollX; scrollY = window.scrollY; lastScrollTime = performance.now(); }; window.resize = function () { scrollX = window.scrollX; scrollY = window.scrollY; screenWidth = window.screen.width; screenHeight = window.screen.height; lastScrollTime = performance.now(); }; } function Lifecycle() { this._listeners = []; this.scrollX = null; this.scrollY = null; this.screenHeight = screenHeight; this.screenWidth = screenWidth; } Lifecycle.prototype = { refresh: function refresh() { this.scrollX = isBrowser && window.scrollX; this.scrollY = isBrowser && window.scrollY; }, addListener: function addListener(callback) { this._listeners.push(callback); }, trigger: function trigger() { var this$1 = this; for (var i = 0; i < this._listeners.length; i++) { this$1._listeners[i](); } } }; var noOp = 'Inferno Error: Can only update a mounted or mounting component. This usually means you called setState() or forceUpdate() on an unmounted component. This is a no-op.'; // Copy of the util from dom/util, otherwise it makes massive bundles function getActiveNode() { return document.activeElement; } // Copy of the util from dom/util, otherwise it makes massive bundles function resetActiveNode(activeNode) { if (activeNode !== document.body && document.activeElement !== activeNode) { activeNode.focus(); // TODO: verify are we doing new focus event, if user has focus listener this might trigger it } } function queueStateChanges(component, newState, callback) { for (var stateKey in newState) { component._pendingState[stateKey] = newState[stateKey]; } if (!component._pendingSetState) { component._pendingSetState = true; applyState(component, false, callback); } else { var pendingState = component._pendingState; var oldState = component.state; component.state = Object.assign({}, oldState, pendingState); component._pendingState = {}; } } function applyState(component, force, callback) { if (!component._deferSetState || force) { component._pendingSetState = false; var pendingState = component._pendingState; var oldState = component.state; var nextState = Object.assign({}, oldState, pendingState); component._pendingState = {}; var nextNode = component._updateComponent(oldState, nextState, component.props, component.props, force); if (nextNode === NO_RENDER) { nextNode = component._lastNode; } else if (isNullOrUndefined(nextNode)) { nextNode = createVPlaceholder(); } var lastNode = component._lastNode; var parentDom = lastNode.dom.parentNode; var activeNode = getActiveNode(); var subLifecycle = new Lifecycle(); component._patch(lastNode, nextNode, parentDom, subLifecycle, component.context, component, null); component._lastNode = nextNode; component.<API key>.set(component, nextNode.dom); component._parentNode.dom = nextNode.dom; subLifecycle.trigger(); if (!isNullOrUndefined(callback)) { callback(); } resetActiveNode(activeNode); } } var Component = function Component(props) { /** @type {object} */ this.props = props || {}; /** @type {object} */ this.state = {}; /** @type {object} */ this.refs = {}; this._blockSetState = false; this._deferSetState = false; this._pendingSetState = false; this._pendingState = {}; this._parentNode = null; this._lastNode = null; this._unmounted = true; this.context = {}; this._patch = null; this._parentComponent = null; this.<API key> = null; }; Component.prototype.render = function render () { }; Component.prototype.forceUpdate = function forceUpdate (callback) { if (this._unmounted) { throw Error(noOp); } applyState(this, true, callback); }; Component.prototype.setState = function setState (newState, callback) { if (this._unmounted) { throw Error(noOp); } if (this._blockSetState === false) { queueStateChanges(this, newState, callback); } else { throw Error('Inferno Warning: Cannot update state via setState() in componentWillUpdate()'); } }; Component.prototype.componentDidMount = function componentDidMount () { }; Component.prototype.componentWillMount = function componentWillMount () { }; Component.prototype.<API key> = function <API key> () { }; Component.prototype.componentDidUpdate = function componentDidUpdate () { }; Component.prototype.<API key> = function <API key> () { return true; }; Component.prototype.<API key> = function <API key> () { }; Component.prototype.componentWillUpdate = function componentWillUpdate () { }; Component.prototype.getChildContext = function getChildContext () { }; Component.prototype._updateComponent = function _updateComponent (prevState, nextState, prevProps, nextProps, force) { if (this._unmounted === true) { this._unmounted = false; return false; } if (!isNullOrUndefined(nextProps) && isNullOrUndefined(nextProps.children)) { nextProps.children = prevProps.children; } if (prevProps !== nextProps || prevState !== nextState || force) { if (prevProps !== nextProps) { this._blockSetState = true; this.<API key>(nextProps); this._blockSetState = false; } var shouldUpdate = this.<API key>(nextProps, nextState); if (shouldUpdate !== false) { this._blockSetState = true; this.componentWillUpdate(nextProps, nextState); this._blockSetState = false; this.props = nextProps; this.state = nextState; var node = this.render(); this.componentDidUpdate(prevProps, prevState); return node; } } return NO_RENDER; }; var ASYNC_STATUS = { pending: 'pending', fulfilled: 'fulfilled', rejected: 'rejected' }; var Route = (function (Component) { function Route(props) { Component.call(this, props); this.state = { async: null }; } if ( Component ) Route.__proto__ = Component; Route.prototype = Object.create( Component && Component.prototype ); Route.prototype.constructor = Route; Route.prototype.async = function async () { var this$1 = this; var async = this.props.async; if (async) { this.setState({ async: { status: ASYNC_STATUS.pending } }); async(this.props.params).then(function (value) { this$1.setState({ async: { status: ASYNC_STATUS.fulfilled, value: value } }); }, this.reject).catch(this.reject); } }; Route.prototype.reject = function reject (value) { this.setState({ async: { status: ASYNC_STATUS.rejected, value: value } }); }; Route.prototype.<API key> = function <API key> () { this.async(); }; Route.prototype.componentWillMount = function componentWillMount () { this.async(); }; Route.prototype.render = function render () { var ref = this.props; var component = ref.component; var params = ref.params; return createVNode().setTag(component).setAttrs({ params: params, async: this.state.async }); }; return Route; }(Component)); var EMPTY$1 = {}; function segmentize(url) { return strip(url).split('/'); } function strip(url) { return url.replace(/(^\/+|\/+$)/g, ''); } function convertToHashbang(url) { if (url.indexOf(' url = '/'; } else { var splitHashUrl = url.split('#!'); splitHashUrl.shift(); url = splitHashUrl.join(''); } return url; } // Thanks goes to Preact for this function: https://github.com/developit/preact-router/blob/master/src/util.js function exec(url, route, opts) { if ( opts === void 0 ) opts = EMPTY$1; var reg = /(?:\?([^ c = url.match(reg), matches = {}, ret; if (c && c[1]) { var p = c[1].split('&'); for (var i = 0; i < p.length; i++) { var r = p[i].split('='); matches[decodeURIComponent(r[0])] = decodeURIComponent(r.slice(1).join('=')); } } url = segmentize(url.replace(reg, '')); route = segmentize(route || ''); var max = Math.max(url.length, route.length); var hasWildcard = false; for (var i$1 = 0; i$1 < max; i$1++) { if (route[i$1] && route[i$1].charAt(0) === ':') { var param = route[i$1].replace(/(^\:|[+*?]+$)/g, ''), flags = (route[i$1].match(/[+*?]+$/) || EMPTY$1)[0] || '', plus = ~flags.indexOf('+'), star = ~flags.indexOf('*'), val = url[i$1] || ''; if (!val && !star && (flags.indexOf('?') < 0 || plus)) { ret = false; break; } matches[param] = decodeURIComponent(val); if (plus || star) { matches[param] = url.slice(i$1).map(decodeURIComponent).join('/'); break; } } else if (route[i$1] !== url[i$1] && !hasWildcard) { if (route[i$1] === '*' && route.length === i$1 + 1) { hasWildcard = true; } else { ret = false; break; } } } if (opts.default !== true && ret === false) { return false; } return matches; } function pathRankSort(a, b) { var aAttr = a.attrs || EMPTY$1, bAttr = b.attrs || EMPTY$1; var diff = rank(bAttr.path) - rank(aAttr.path); return diff || (bAttr.path.length - aAttr.path.length); } function rank(url) { return (strip(url).match(/\/+/g) || '').length; } var Router = (function (Component) { function Router(props) { Component.call(this, props); if (!props.history) { throw new Error('Inferno Error: "inferno-router" Router components require a "history" prop passed.'); } this._didRoute = false; this.state = { url: props.url || props.history.getCurrentUrl() }; } if ( Component ) Router.__proto__ = Component; Router.prototype = Object.create( Component && Component.prototype ); Router.prototype.constructor = Router; Router.prototype.getChildContext = function getChildContext () { return { history: this.props.history, hashbang: this.props.hashbang }; }; Router.prototype.componentWillMount = function componentWillMount () { this.props.history.addRouter(this); }; Router.prototype.<API key> = function <API key> () { this.props.history.removeRouter(this); }; Router.prototype.routeTo = function routeTo (url) { this._didRoute = false; this.setState({ url: url }); return this._didRoute; }; Router.prototype.render = function render () { var children = toArray(this.props.children); var url = this.props.url || this.state.url; var wrapperComponent = this.props.component; var hashbang = this.props.hashbang; return handleRoutes(children, url, hashbang, wrapperComponent, ''); }; return Router; }(Component)); function toArray(children) { return isArray(children) ? children : (children ? [children] : children); } function handleRoutes(routes, url, hashbang, wrapperComponent, lastPath) { routes.sort(pathRankSort); for (var i = 0; i < routes.length; i++) { var route = routes[i]; var ref = route.attrs; var path = ref.path; var fullPath = lastPath + path; var params = exec(hashbang ? convertToHashbang(url) : url, fullPath); var children = toArray(route.children); if (children) { var subRoute = handleRoutes(children, url, hashbang, wrapperComponent, fullPath); if (!isNull(subRoute)) { return subRoute; } } if (params) { if (wrapperComponent) { return createVNode().setTag(wrapperComponent).setChildren(route).setAttrs({ params: params }); } return route.setAttrs(Object.assign({}, { params: params }, route.attrs)); } } return !lastPath && wrapperComponent ? createVNode().setTag(wrapperComponent) : null; } function Link(ref, ref$1) { var to = ref.to; var children = ref.children; var hashbang = ref$1.hashbang; var history = ref$1.history; return (createVNode().setAttrs({ href: hashbang ? history.getHashbangRoot() + convertToHashbang('#!' + to) : to }).setTag('a').setChildren(children)); } var routers = []; function getCurrentUrl() { var url = typeof location !== 'undefined' ? location : EMPTY; return ("" + (url.pathname || '') + (url.search || '') + (url.hash || '')); } function getHashbangRoot() { var url = typeof location !== 'undefined' ? location : EMPTY; return ("" + (url.protocol + '//' || '') + (url.host || '') + (url.pathname || '') + (url.search || '') + "#!"); } function routeTo(url) { var didRoute = false; for (var i = 0; i < routers.length; i++) { if (routers[i].routeTo(url) === true) { didRoute = true; } } return didRoute; } if (isBrowser) { window.addEventListener('popstate', function () { return routeTo(getCurrentUrl()); }); } var browserHistory = { addRouter: function addRouter(router) { routers.push(router); }, removeRouter: function removeRouter(router) { routers.splice(routers.indexOf(router), 1); }, getCurrentUrl: getCurrentUrl, getHashbangRoot: getHashbangRoot }; var index = { Route: Route, Router: Router, Link: Link, browserHistory: browserHistory }; return index; }));
# Requiring test environment file require 'spec_helper' # Requiring test subject file require_relative File.join(APP_CONTROLLERS, "static") # Test cases describe 'Routing for root' do it "has ok response when get '/' is called." do get '/' expect(last_response).to be_ok end end
/* Module: rt2x00usb Abstract: rt2x00 generic usb device routines. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/usb.h> #include <linux/bug.h> #include "rt2x00.h" #include "rt2x00usb.h" /* * Interfacing with the HW. */ int <API key>(struct rt2x00_dev *rt2x00dev, const u8 request, const u8 requesttype, const u16 offset, const u16 value, void *buffer, const u16 buffer_length, const int timeout) { struct usb_device *usb_dev = to_usb_device_intf(rt2x00dev->dev); int status; unsigned int pipe = (requesttype == <API key>) ? usb_rcvctrlpipe(usb_dev, 0) : usb_sndctrlpipe(usb_dev, 0); unsigned long expire = jiffies + msecs_to_jiffies(timeout); if (!test_bit(<API key>, &rt2x00dev->flags)) return -ENODEV; do { status = usb_control_msg(usb_dev, pipe, request, requesttype, value, offset, buffer, buffer_length, timeout / 2); if (status >= 0) return 0; if (status == -ENODEV) { /* Device has disappeared. */ clear_bit(<API key>, &rt2x00dev->flags); break; } } while (time_before(jiffies, expire)); rt2x00_err(rt2x00dev, "Vendor Request 0x%02x failed for offset 0x%04x with error %d\n", request, offset, status); return status; } EXPORT_SYMBOL_GPL(<API key>); int <API key>(struct rt2x00_dev *rt2x00dev, const u8 request, const u8 requesttype, const u16 offset, void *buffer, const u16 buffer_length, const int timeout) { int status; BUG_ON(!mutex_is_locked(&rt2x00dev->csr_mutex)); /* * Check for Cache availability. */ if (unlikely(!rt2x00dev->csr.cache || buffer_length > CSR_CACHE_SIZE)) { rt2x00_err(rt2x00dev, "CSR cache not available\n"); return -ENOMEM; } if (requesttype == <API key>) memcpy(rt2x00dev->csr.cache, buffer, buffer_length); status = <API key>(rt2x00dev, request, requesttype, offset, 0, rt2x00dev->csr.cache, buffer_length, timeout); if (!status && requesttype == <API key>) memcpy(buffer, rt2x00dev->csr.cache, buffer_length); return status; } EXPORT_SYMBOL_GPL(<API key>); int <API key>(struct rt2x00_dev *rt2x00dev, const u8 request, const u8 requesttype, const u16 offset, void *buffer, const u16 buffer_length) { int status = 0; unsigned char *tb; u16 off, len, bsize; mutex_lock(&rt2x00dev->csr_mutex); tb = (char *)buffer; off = offset; len = buffer_length; while (len && !status) { bsize = min_t(u16, CSR_CACHE_SIZE, len); status = <API key>(rt2x00dev, request, requesttype, off, tb, bsize, REGISTER_TIMEOUT); tb += bsize; len -= bsize; off += bsize; } mutex_unlock(&rt2x00dev->csr_mutex); return status; } EXPORT_SYMBOL_GPL(<API key>); int <API key>(struct rt2x00_dev *rt2x00dev, const unsigned int offset, const struct rt2x00_field32 field, u32 *reg) { unsigned int i; if (!test_bit(<API key>, &rt2x00dev->flags)) return -ENODEV; for (i = 0; i < <API key>; i++) { *reg = <API key>(rt2x00dev, offset); if (!rt2x00_get_field32(*reg, field)) return 1; udelay(REGISTER_BUSY_DELAY); } rt2x00_err(rt2x00dev, "Indirect register access failed: offset=0x%.08x, value=0x%.08x\n", offset, *reg); *reg = ~0; return 0; } EXPORT_SYMBOL_GPL(<API key>); struct <API key> { __le32 reg; struct usb_ctrlrequest cr; struct rt2x00_dev *rt2x00dev; bool (*callback)(struct rt2x00_dev *, int, u32); }; static void <API key>(struct urb *urb) { struct <API key> *rd = urb->context; if (rd->callback(rd->rt2x00dev, urb->status, le32_to_cpu(rd->reg))) { usb_anchor_urb(urb, rd->rt2x00dev->anchor); if (usb_submit_urb(urb, GFP_ATOMIC) < 0) { usb_unanchor_urb(urb); kfree(rd); } } else kfree(rd); } void <API key>(struct rt2x00_dev *rt2x00dev, const unsigned int offset, bool (*callback)(struct rt2x00_dev*, int, u32)) { struct usb_device *usb_dev = to_usb_device_intf(rt2x00dev->dev); struct urb *urb; struct <API key> *rd; rd = kmalloc(sizeof(*rd), GFP_ATOMIC); if (!rd) return; urb = usb_alloc_urb(0, GFP_ATOMIC); if (!urb) { kfree(rd); return; } rd->rt2x00dev = rt2x00dev; rd->callback = callback; rd->cr.bRequestType = <API key>; rd->cr.bRequest = USB_MULTI_READ; rd->cr.wValue = 0; rd->cr.wIndex = cpu_to_le16(offset); rd->cr.wLength = cpu_to_le16(sizeof(u32)); <API key>(urb, usb_dev, usb_rcvctrlpipe(usb_dev, 0), (unsigned char *)(&rd->cr), &rd->reg, sizeof(rd->reg), <API key>, rd); usb_anchor_urb(urb, rt2x00dev->anchor); if (usb_submit_urb(urb, GFP_ATOMIC) < 0) { usb_unanchor_urb(urb); kfree(rd); } usb_free_urb(urb); } EXPORT_SYMBOL_GPL(<API key>); /* * TX data handlers. */ static void <API key>(struct queue_entry *entry) { /* * If the transfer to hardware succeeded, it does not mean the * frame was send out correctly. It only means the frame * was successfully pushed to the hardware, we have no * way to determine the transmission status right now. * (Only indirectly by looking at the failed TX counters * in the register). */ if (test_bit(<API key>, &entry->flags)) <API key>(entry, TXDONE_FAILURE); else <API key>(entry, TXDONE_UNKNOWN); } static void <API key>(struct work_struct *work) { struct rt2x00_dev *rt2x00dev = container_of(work, struct rt2x00_dev, txdone_work); struct data_queue *queue; struct queue_entry *entry; tx_queue_for_each(rt2x00dev, queue) { while (!rt2x00queue_empty(queue)) { entry = <API key>(queue, Q_INDEX_DONE); if (test_bit(<API key>, &entry->flags) || !test_bit(<API key>, &entry->flags)) break; <API key>(entry); } } } static void <API key>(struct urb *urb) { struct queue_entry *entry = (struct queue_entry *)urb->context; struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; if (!test_bit(<API key>, &entry->flags)) return; /* * Check if the frame was correctly uploaded */ if (urb->status) set_bit(<API key>, &entry->flags); /* * Report the frame as DMA done */ rt2x00lib_dmadone(entry); if (rt2x00dev->ops->lib->tx_dma_done) rt2x00dev->ops->lib->tx_dma_done(entry); /* * Schedule the delayed work for reading the TX status * from the device. */ if (!rt2x00_has_cap_flag(rt2x00dev, <API key>) || !kfifo_is_empty(&rt2x00dev->txstatus_fifo)) queue_work(rt2x00dev->workqueue, &rt2x00dev->txdone_work); } static bool <API key>(struct queue_entry *entry, void *data) { struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; struct usb_device *usb_dev = to_usb_device_intf(rt2x00dev->dev); struct <API key> *entry_priv = entry->priv_data; u32 length; int status; if (!test_and_clear_bit(ENTRY_DATA_PENDING, &entry->flags) || test_bit(<API key>, &entry->flags)) return false; /* * USB devices require certain padding at the end of each frame * and urb. Those paddings are not included in skbs. Pass entry * to the driver to determine what the overall length should be. */ length = rt2x00dev->ops->lib->get_tx_data_len(entry); status = skb_padto(entry->skb, length); if (unlikely(status)) { /* TODO: report something more appropriate than IO_FAILED. */ rt2x00_warn(rt2x00dev, "TX SKB padding error, out of memory\n"); set_bit(<API key>, &entry->flags); rt2x00lib_dmadone(entry); return false; } usb_fill_bulk_urb(entry_priv->urb, usb_dev, usb_sndbulkpipe(usb_dev, entry->queue->usb_endpoint), entry->skb->data, length, <API key>, entry); status = usb_submit_urb(entry_priv->urb, GFP_ATOMIC); if (status) { if (status == -ENODEV) clear_bit(<API key>, &rt2x00dev->flags); set_bit(<API key>, &entry->flags); rt2x00lib_dmadone(entry); } return false; } /* * RX data handlers. */ static void <API key>(struct work_struct *work) { struct rt2x00_dev *rt2x00dev = container_of(work, struct rt2x00_dev, rxdone_work); struct queue_entry *entry; struct skb_frame_desc *skbdesc; u8 rxd[32]; while (!rt2x00queue_empty(rt2x00dev->rx)) { entry = <API key>(rt2x00dev->rx, Q_INDEX_DONE); if (test_bit(<API key>, &entry->flags) || !test_bit(<API key>, &entry->flags)) break; /* * Fill in desc fields of the skb descriptor */ skbdesc = get_skb_frame_desc(entry->skb); skbdesc->desc = rxd; skbdesc->desc_len = entry->queue->desc_size; /* * Send the frame to rt2x00lib for further processing. */ rt2x00lib_rxdone(entry, GFP_KERNEL); } } static void <API key>(struct urb *urb) { struct queue_entry *entry = (struct queue_entry *)urb->context; struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; if (!test_and_clear_bit(<API key>, &entry->flags)) return; /* * Report the frame as DMA done */ rt2x00lib_dmadone(entry); /* * Check if the received data is simply too small * to be actually valid, or if the urb is signaling * a problem. */ if (urb->actual_length < entry->queue->desc_size || urb->status) set_bit(<API key>, &entry->flags); /* * Schedule the delayed work for reading the RX status * from the device. */ queue_work(rt2x00dev->workqueue, &rt2x00dev->rxdone_work); } static bool <API key>(struct queue_entry *entry, void *data) { struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; struct usb_device *usb_dev = to_usb_device_intf(rt2x00dev->dev); struct <API key> *entry_priv = entry->priv_data; int status; if (test_and_set_bit(<API key>, &entry->flags) || test_bit(<API key>, &entry->flags)) return false; rt2x00lib_dmastart(entry); usb_fill_bulk_urb(entry_priv->urb, usb_dev, usb_rcvbulkpipe(usb_dev, entry->queue->usb_endpoint), entry->skb->data, entry->skb->len, <API key>, entry); status = usb_submit_urb(entry_priv->urb, GFP_ATOMIC); if (status) { if (status == -ENODEV) clear_bit(<API key>, &rt2x00dev->flags); set_bit(<API key>, &entry->flags); rt2x00lib_dmadone(entry); } return false; } void <API key>(struct data_queue *queue) { switch (queue->qid) { case QID_AC_VO: case QID_AC_VI: case QID_AC_BE: case QID_AC_BK: if (!rt2x00queue_empty(queue)) <API key>(queue, Q_INDEX_DONE, Q_INDEX, NULL, <API key>); break; case QID_RX: if (!rt2x00queue_full(queue)) <API key>(queue, Q_INDEX, Q_INDEX_DONE, NULL, <API key>); break; default: break; } } EXPORT_SYMBOL_GPL(<API key>); static bool <API key>(struct queue_entry *entry, void *data) { struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; struct <API key> *entry_priv = entry->priv_data; struct <API key> *bcn_priv = entry->priv_data; if (!test_bit(<API key>, &entry->flags)) return false; usb_kill_urb(entry_priv->urb); /* * Kill guardian urb (if required by driver). */ if ((entry->queue->qid == QID_BEACON) && (rt2x00_has_cap_flag(rt2x00dev, <API key>))) usb_kill_urb(bcn_priv->guardian_urb); return false; } void <API key>(struct data_queue *queue, bool drop) { struct work_struct *completion; unsigned int i; if (drop) <API key>(queue, Q_INDEX_DONE, Q_INDEX, NULL, <API key>); /* * Obtain the queue completion handler */ switch (queue->qid) { case QID_AC_VO: case QID_AC_VI: case QID_AC_BE: case QID_AC_BK: completion = &queue->rt2x00dev->txdone_work; break; case QID_RX: completion = &queue->rt2x00dev->rxdone_work; break; default: return; } for (i = 0; i < 10; i++) { /* * Check if the driver is already done, otherwise we * have to sleep a little while to give the driver/hw * the oppurtunity to complete interrupt process itself. */ if (rt2x00queue_empty(queue)) break; /* * Schedule the completion handler manually, when this * worker function runs, it should cleanup the queue. */ queue_work(queue->rt2x00dev->workqueue, completion); /* * Wait for a little while to give the driver * the oppurtunity to recover itself. */ msleep(50); } } EXPORT_SYMBOL_GPL(<API key>); static void <API key>(struct data_queue *queue) { rt2x00_warn(queue->rt2x00dev, "TX queue %d DMA timed out, invoke forced forced reset\n", queue->qid); <API key>(queue); <API key>(queue, true); <API key>(queue); } static int <API key>(struct data_queue *queue) { struct queue_entry *entry; entry = <API key>(queue, Q_INDEX_DMA_DONE); return <API key>(entry); } void rt2x00usb_watchdog(struct rt2x00_dev *rt2x00dev) { struct data_queue *queue; tx_queue_for_each(rt2x00dev, queue) { if (!rt2x00queue_empty(queue)) { if (<API key>(queue)) <API key>(queue); } } } EXPORT_SYMBOL_GPL(rt2x00usb_watchdog); /* * Radio handlers */ void <API key>(struct rt2x00_dev *rt2x00dev) { <API key>(rt2x00dev, USB_RX_CONTROL, 0, 0, REGISTER_TIMEOUT); } EXPORT_SYMBOL_GPL(<API key>); /* * Device initialization handlers. */ void <API key>(struct queue_entry *entry) { entry->flags = 0; if (entry->queue->qid == QID_RX) <API key>(entry, NULL); } EXPORT_SYMBOL_GPL(<API key>); static void <API key>(struct data_queue *queue, struct <API key> *ep_desc) { struct usb_device *usb_dev = to_usb_device_intf(queue->rt2x00dev->dev); int pipe; queue->usb_endpoint = usb_endpoint_num(ep_desc); if (queue->qid == QID_RX) { pipe = usb_rcvbulkpipe(usb_dev, queue->usb_endpoint); queue->usb_maxpacket = usb_maxpacket(usb_dev, pipe, 0); } else { pipe = usb_sndbulkpipe(usb_dev, queue->usb_endpoint); queue->usb_maxpacket = usb_maxpacket(usb_dev, pipe, 1); } if (!queue->usb_maxpacket) queue->usb_maxpacket = 1; } static int <API key>(struct rt2x00_dev *rt2x00dev) { struct usb_interface *intf = to_usb_interface(rt2x00dev->dev); struct usb_host_interface *intf_desc = intf->cur_altsetting; struct <API key> *ep_desc; struct data_queue *queue = rt2x00dev->tx; struct <API key> *tx_ep_desc = NULL; unsigned int i; /* * Walk through all available endpoints to search for "bulk in" * and "bulk out" endpoints. When we find such endpoints collect * the information we need from the descriptor and assign it * to the queue. */ for (i = 0; i < intf_desc->desc.bNumEndpoints; i++) { ep_desc = &intf_desc->endpoint[i].desc; if (<API key>(ep_desc)) { <API key>(rt2x00dev->rx, ep_desc); } else if (<API key>(ep_desc) && (queue != queue_end(rt2x00dev))) { <API key>(queue, ep_desc); queue = queue_next(queue); tx_ep_desc = ep_desc; } } /* * At least 1 endpoint for RX and 1 endpoint for TX must be available. */ if (!rt2x00dev->rx->usb_endpoint || !rt2x00dev->tx->usb_endpoint) { rt2x00_err(rt2x00dev, "Bulk-in/Bulk-out endpoints not found\n"); return -EPIPE; } /* * It might be possible not all queues have a dedicated endpoint. * Loop through all TX queues and copy the endpoint information * which we have gathered from already assigned endpoints. */ <API key>(rt2x00dev, queue) { if (!queue->usb_endpoint) <API key>(queue, tx_ep_desc); } return 0; } static int <API key>(struct data_queue *queue) { struct rt2x00_dev *rt2x00dev = queue->rt2x00dev; struct <API key> *entry_priv; struct <API key> *bcn_priv; unsigned int i; for (i = 0; i < queue->limit; i++) { entry_priv = queue->entries[i].priv_data; entry_priv->urb = usb_alloc_urb(0, GFP_KERNEL); if (!entry_priv->urb) return -ENOMEM; } /* * If this is not the beacon queue or * no guardian byte was required for the beacon, * then we are done. */ if (queue->qid != QID_BEACON || !rt2x00_has_cap_flag(rt2x00dev, <API key>)) return 0; for (i = 0; i < queue->limit; i++) { bcn_priv = queue->entries[i].priv_data; bcn_priv->guardian_urb = usb_alloc_urb(0, GFP_KERNEL); if (!bcn_priv->guardian_urb) return -ENOMEM; } return 0; } static void <API key>(struct data_queue *queue) { struct rt2x00_dev *rt2x00dev = queue->rt2x00dev; struct <API key> *entry_priv; struct <API key> *bcn_priv; unsigned int i; if (!queue->entries) return; for (i = 0; i < queue->limit; i++) { entry_priv = queue->entries[i].priv_data; usb_kill_urb(entry_priv->urb); usb_free_urb(entry_priv->urb); } /* * If this is not the beacon queue or * no guardian byte was required for the beacon, * then we are done. */ if (queue->qid != QID_BEACON || !rt2x00_has_cap_flag(rt2x00dev, <API key>)) return; for (i = 0; i < queue->limit; i++) { bcn_priv = queue->entries[i].priv_data; usb_kill_urb(bcn_priv->guardian_urb); usb_free_urb(bcn_priv->guardian_urb); } } int <API key>(struct rt2x00_dev *rt2x00dev) { struct data_queue *queue; int status; /* * Find endpoints for each queue */ status = <API key>(rt2x00dev); if (status) goto exit; /* * Allocate DMA */ queue_for_each(rt2x00dev, queue) { status = <API key>(queue); if (status) goto exit; } return 0; exit: <API key>(rt2x00dev); return status; } EXPORT_SYMBOL_GPL(<API key>); void <API key>(struct rt2x00_dev *rt2x00dev) { struct data_queue *queue; <API key>(rt2x00dev->anchor); hrtimer_cancel(&rt2x00dev->txstatus_timer); cancel_work_sync(&rt2x00dev->rxdone_work); cancel_work_sync(&rt2x00dev->txdone_work); queue_for_each(rt2x00dev, queue) <API key>(queue); } EXPORT_SYMBOL_GPL(<API key>); /* * USB driver handlers. */ static void rt2x00usb_free_reg(struct rt2x00_dev *rt2x00dev) { kfree(rt2x00dev->rf); rt2x00dev->rf = NULL; kfree(rt2x00dev->eeprom); rt2x00dev->eeprom = NULL; kfree(rt2x00dev->csr.cache); rt2x00dev->csr.cache = NULL; } static int rt2x00usb_alloc_reg(struct rt2x00_dev *rt2x00dev) { rt2x00dev->csr.cache = kzalloc(CSR_CACHE_SIZE, GFP_KERNEL); if (!rt2x00dev->csr.cache) goto exit; rt2x00dev->eeprom = kzalloc(rt2x00dev->ops->eeprom_size, GFP_KERNEL); if (!rt2x00dev->eeprom) goto exit; rt2x00dev->rf = kzalloc(rt2x00dev->ops->rf_size, GFP_KERNEL); if (!rt2x00dev->rf) goto exit; return 0; exit: rt2x00_probe_err("Failed to allocate registers\n"); rt2x00usb_free_reg(rt2x00dev); return -ENOMEM; } int rt2x00usb_probe(struct usb_interface *usb_intf, const struct rt2x00_ops *ops) { struct usb_device *usb_dev = interface_to_usbdev(usb_intf); struct ieee80211_hw *hw; struct rt2x00_dev *rt2x00dev; int retval; usb_dev = usb_get_dev(usb_dev); usb_reset_device(usb_dev); hw = ieee80211_alloc_hw(sizeof(struct rt2x00_dev), ops->hw); if (!hw) { rt2x00_probe_err("Failed to allocate hardware\n"); retval = -ENOMEM; goto exit_put_device; } usb_set_intfdata(usb_intf, hw); rt2x00dev = hw->priv; rt2x00dev->dev = &usb_intf->dev; rt2x00dev->ops = ops; rt2x00dev->hw = hw; <API key>(rt2x00dev, <API key>); INIT_WORK(&rt2x00dev->rxdone_work, <API key>); INIT_WORK(&rt2x00dev->txdone_work, <API key>); hrtimer_init(&rt2x00dev->txstatus_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); retval = rt2x00usb_alloc_reg(rt2x00dev); if (retval) goto exit_free_device; rt2x00dev->anchor = devm_kmalloc(&usb_dev->dev, sizeof(struct usb_anchor), GFP_KERNEL); if (!rt2x00dev->anchor) { retval = -ENOMEM; goto exit_free_reg; } init_usb_anchor(rt2x00dev->anchor); retval = rt2x00lib_probe_dev(rt2x00dev); if (retval) goto exit_free_anchor; return 0; exit_free_anchor: <API key>(rt2x00dev->anchor); exit_free_reg: rt2x00usb_free_reg(rt2x00dev); exit_free_device: ieee80211_free_hw(hw); exit_put_device: usb_put_dev(usb_dev); usb_set_intfdata(usb_intf, NULL); return retval; } EXPORT_SYMBOL_GPL(rt2x00usb_probe); void <API key>(struct usb_interface *usb_intf) { struct ieee80211_hw *hw = usb_get_intfdata(usb_intf); struct rt2x00_dev *rt2x00dev = hw->priv; /* * Free all allocated data. */ <API key>(rt2x00dev); rt2x00usb_free_reg(rt2x00dev); ieee80211_free_hw(hw); /* * Free the USB device data. */ usb_set_intfdata(usb_intf, NULL); usb_put_dev(interface_to_usbdev(usb_intf)); } EXPORT_SYMBOL_GPL(<API key>); #ifdef CONFIG_PM int rt2x00usb_suspend(struct usb_interface *usb_intf, pm_message_t state) { struct ieee80211_hw *hw = usb_get_intfdata(usb_intf); struct rt2x00_dev *rt2x00dev = hw->priv; return rt2x00lib_suspend(rt2x00dev, state); } EXPORT_SYMBOL_GPL(rt2x00usb_suspend); int rt2x00usb_resume(struct usb_interface *usb_intf) { struct ieee80211_hw *hw = usb_get_intfdata(usb_intf); struct rt2x00_dev *rt2x00dev = hw->priv; return rt2x00lib_resume(rt2x00dev); } EXPORT_SYMBOL_GPL(rt2x00usb_resume); #endif /* CONFIG_PM */ /* * rt2x00usb module information. */ MODULE_AUTHOR(DRV_PROJECT); MODULE_VERSION(DRV_VERSION); MODULE_DESCRIPTION("rt2x00 usb library"); MODULE_LICENSE("GPL");
// <API key>: GPL-2.0-only #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/delay.h> #include <linux/dma-mapping.h> #include <linux/dmaengine.h> #include <linux/freezer.h> #include <linux/init.h> #include <linux/kthread.h> #include <linux/sched/task.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/random.h> #include <linux/slab.h> #include <linux/wait.h> static unsigned int test_buf_size = 16384; module_param(test_buf_size, uint, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(test_buf_size, "Size of the memcpy test buffer"); static char test_device[32]; module_param_string(device, test_device, sizeof(test_device), S_IRUGO | S_IWUSR); MODULE_PARM_DESC(device, "Bus ID of the DMA Engine to test (default: any)"); static unsigned int threads_per_chan = 1; module_param(threads_per_chan, uint, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(threads_per_chan, "Number of threads to start per channel (default: 1)"); static unsigned int max_channels; module_param(max_channels, uint, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(max_channels, "Maximum number of channels to use (default: all)"); static unsigned int iterations; module_param(iterations, uint, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(iterations, "Iterations before stopping test (default: infinite)"); static unsigned int dmatest; module_param(dmatest, uint, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(dmatest, "dmatest 0-memcpy 1-memset (default: 0)"); static unsigned int xor_sources = 3; module_param(xor_sources, uint, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(xor_sources, "Number of xor source buffers (default: 3)"); static unsigned int pq_sources = 3; module_param(pq_sources, uint, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(pq_sources, "Number of p+q source buffers (default: 3)"); static int timeout = 3000; module_param(timeout, uint, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(timeout, "Transfer Timeout in msec (default: 3000), " "Pass 0xFFFFFFFF (4294967295) for maximum timeout"); static bool noverify; module_param(noverify, bool, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(noverify, "Disable data verification (default: verify)"); static bool norandom; module_param(norandom, bool, 0644); MODULE_PARM_DESC(norandom, "Disable random offset setup (default: random)"); static bool polled; module_param(polled, bool, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(polled, "Use polling for completion instead of interrupts"); static bool verbose; module_param(verbose, bool, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(verbose, "Enable \"success\" result messages (default: off)"); static int alignment = -1; module_param(alignment, int, 0644); MODULE_PARM_DESC(alignment, "Custom data address alignment taken as 2^(alignment) (default: not used (-1))"); static unsigned int transfer_size; module_param(transfer_size, uint, 0644); MODULE_PARM_DESC(transfer_size, "Optional custom transfer size in bytes (default: not used (0))"); /** * struct dmatest_params - test parameters. * @buf_size: size of the memcpy test buffer * @channel: bus ID of the channel to test * @device: bus ID of the DMA Engine to test * @threads_per_chan: number of threads to start per channel * @max_channels: maximum number of channels to use * @iterations: iterations before stopping test * @xor_sources: number of xor source buffers * @pq_sources: number of p+q source buffers * @timeout: transfer timeout in msec, 0 - 0xFFFFFFFF (4294967295) */ struct dmatest_params { unsigned int buf_size; char channel[20]; char device[32]; unsigned int threads_per_chan; unsigned int max_channels; unsigned int iterations; unsigned int xor_sources; unsigned int pq_sources; unsigned int timeout; bool noverify; bool norandom; int alignment; unsigned int transfer_size; bool polled; }; /** * struct dmatest_info - test information. * @params: test parameters * @lock: access protection to the fields of this structure */ static struct dmatest_info { /* Test parameters */ struct dmatest_params params; /* Internal state */ struct list_head channels; unsigned int nr_channels; struct mutex lock; bool did_init; } test_info = { .channels = LIST_HEAD_INIT(test_info.channels), .lock = __MUTEX_INITIALIZER(test_info.lock), }; static int dmatest_run_set(const char *val, const struct kernel_param *kp); static int dmatest_run_get(char *val, const struct kernel_param *kp); static const struct kernel_param_ops run_ops = { .set = dmatest_run_set, .get = dmatest_run_get, }; static bool dmatest_run; module_param_cb(run, &run_ops, &dmatest_run, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(run, "Run the test (default: false)"); static int dmatest_chan_set(const char *val, const struct kernel_param *kp); static int dmatest_chan_get(char *val, const struct kernel_param *kp); static const struct kernel_param_ops multi_chan_ops = { .set = dmatest_chan_set, .get = dmatest_chan_get, }; static char test_channel[20]; static struct kparam_string newchan_kps = { .string = test_channel, .maxlen = 20, }; module_param_cb(channel, &multi_chan_ops, &newchan_kps, 0644); MODULE_PARM_DESC(channel, "Bus ID of the channel to test (default: any)"); static int <API key>(char *val, const struct kernel_param *kp); static const struct kernel_param_ops test_list_ops = { .get = <API key>, }; module_param_cb(test_list, &test_list_ops, NULL, 0444); MODULE_PARM_DESC(test_list, "Print current test list"); /* Maximum amount of mismatched bytes in buffer to print */ #define MAX_ERROR_COUNT 32 /* * Initialization patterns. All bytes in the source buffer has bit 7 * set, all bytes in the destination buffer has bit 7 cleared. * * Bit 6 is set for all bytes which are to be copied by the DMA * engine. Bit 5 is set for all bytes which are to be overwritten by * the DMA engine. * * The remaining bits are the inverse of a counter which increments by * one for each byte address. */ #define PATTERN_SRC 0x80 #define PATTERN_DST 0x00 #define PATTERN_COPY 0x40 #define PATTERN_OVERWRITE 0x20 #define PATTERN_COUNT_MASK 0x1f #define PATTERN_MEMSET_IDX 0x01 /* Fixed point arithmetic ops */ #define FIXPT_SHIFT 8 #define FIXPNT_MASK 0xFF #define FIXPT_TO_INT(a) ((a) >> FIXPT_SHIFT) #define INT_TO_FIXPT(a) ((a) << FIXPT_SHIFT) #define FIXPT_GET_FRAC(a) ((((a) & FIXPNT_MASK) * 100) >> FIXPT_SHIFT) /* poor man's completion - we want to use <API key>() on it */ struct dmatest_done { bool done; wait_queue_head_t *wait; }; struct dmatest_data { u8 **raw; u8 **aligned; unsigned int cnt; unsigned int off; }; struct dmatest_thread { struct list_head node; struct dmatest_info *info; struct task_struct *task; struct dma_chan *chan; struct dmatest_data src; struct dmatest_data dst; enum <API key> type; wait_queue_head_t done_wait; struct dmatest_done test_done; bool done; bool pending; }; struct dmatest_chan { struct list_head node; struct dma_chan *chan; struct list_head threads; }; static <API key>(thread_wait); static bool wait; static bool <API key>(struct dmatest_info *info) { struct dmatest_chan *dtc; list_for_each_entry(dtc, &info->channels, node) { struct dmatest_thread *thread; list_for_each_entry(thread, &dtc->threads, node) { if (!thread->done) return true; } } return false; } static bool <API key>(struct dmatest_info *info) { struct dmatest_chan *dtc; list_for_each_entry(dtc, &info->channels, node) { struct dmatest_thread *thread; list_for_each_entry(thread, &dtc->threads, node) { if (thread->pending) return true; } } return false; } static int dmatest_wait_get(char *val, const struct kernel_param *kp) { struct dmatest_info *info = &test_info; struct dmatest_params *params = &info->params; if (params->iterations) wait_event(thread_wait, !<API key>(info)); wait = true; return param_get_bool(val, kp); } static const struct kernel_param_ops wait_ops = { .get = dmatest_wait_get, .set = param_set_bool, }; module_param_cb(wait, &wait_ops, &wait, S_IRUGO); MODULE_PARM_DESC(wait, "Wait for tests to complete (default: false)"); static bool <API key>(struct dmatest_params *params, struct dma_chan *chan) { if (params->channel[0] == '\0') return true; return strcmp(dma_chan_name(chan), params->channel) == 0; } static bool <API key>(struct dmatest_params *params, struct dma_device *device) { if (params->device[0] == '\0') return true; return strcmp(dev_name(device->dev), params->device) == 0; } static unsigned long dmatest_random(void) { unsigned long buf; prandom_bytes(&buf, sizeof(buf)); return buf; } static inline u8 gen_inv_idx(u8 index, bool is_memset) { u8 val = is_memset ? PATTERN_MEMSET_IDX : index; return ~val & PATTERN_COUNT_MASK; } static inline u8 gen_src_value(u8 index, bool is_memset) { return PATTERN_SRC | gen_inv_idx(index, is_memset); } static inline u8 gen_dst_value(u8 index, bool is_memset) { return PATTERN_DST | gen_inv_idx(index, is_memset); } static void dmatest_init_srcs(u8 **bufs, unsigned int start, unsigned int len, unsigned int buf_size, bool is_memset) { unsigned int i; u8 *buf; for (; (buf = *bufs); bufs++) { for (i = 0; i < start; i++) buf[i] = gen_src_value(i, is_memset); for ( ; i < start + len; i++) buf[i] = gen_src_value(i, is_memset) | PATTERN_COPY; for ( ; i < buf_size; i++) buf[i] = gen_src_value(i, is_memset); buf++; } } static void dmatest_init_dsts(u8 **bufs, unsigned int start, unsigned int len, unsigned int buf_size, bool is_memset) { unsigned int i; u8 *buf; for (; (buf = *bufs); bufs++) { for (i = 0; i < start; i++) buf[i] = gen_dst_value(i, is_memset); for ( ; i < start + len; i++) buf[i] = gen_dst_value(i, is_memset) | PATTERN_OVERWRITE; for ( ; i < buf_size; i++) buf[i] = gen_dst_value(i, is_memset); } } static void dmatest_mismatch(u8 actual, u8 pattern, unsigned int index, unsigned int counter, bool is_srcbuf, bool is_memset) { u8 diff = actual ^ pattern; u8 expected = pattern | gen_inv_idx(counter, is_memset); const char *thread_name = current->comm; if (is_srcbuf) pr_warn("%s: srcbuf[0x%x] overwritten! Expected %02x, got %02x\n", thread_name, index, expected, actual); else if ((pattern & PATTERN_COPY) && (diff & (PATTERN_COPY | PATTERN_OVERWRITE))) pr_warn("%s: dstbuf[0x%x] not copied! Expected %02x, got %02x\n", thread_name, index, expected, actual); else if (diff & PATTERN_SRC) pr_warn("%s: dstbuf[0x%x] was copied! Expected %02x, got %02x\n", thread_name, index, expected, actual); else pr_warn("%s: dstbuf[0x%x] mismatch! Expected %02x, got %02x\n", thread_name, index, expected, actual); } static unsigned int dmatest_verify(u8 **bufs, unsigned int start, unsigned int end, unsigned int counter, u8 pattern, bool is_srcbuf, bool is_memset) { unsigned int i; unsigned int error_count = 0; u8 actual; u8 expected; u8 *buf; unsigned int counter_orig = counter; for (; (buf = *bufs); bufs++) { counter = counter_orig; for (i = start; i < end; i++) { actual = buf[i]; expected = pattern | gen_inv_idx(counter, is_memset); if (actual != expected) { if (error_count < MAX_ERROR_COUNT) dmatest_mismatch(actual, pattern, i, counter, is_srcbuf, is_memset); error_count++; } counter++; } } if (error_count > MAX_ERROR_COUNT) pr_warn("%s: %u errors suppressed\n", current->comm, error_count - MAX_ERROR_COUNT); return error_count; } static void dmatest_callback(void *arg) { struct dmatest_done *done = arg; struct dmatest_thread *thread = container_of(done, struct dmatest_thread, test_done); if (!thread->done) { done->done = true; wake_up_all(done->wait); } else { /* * If thread->done, it means that this callback occurred * after the parent thread has cleaned up. This can * happen in the case that driver doesn't implement * the terminate_all() functionality and a dma operation * did not occur within the timeout period */ WARN(1, "dmatest: Kernel memory may be corrupted!!\n"); } } static unsigned int min_odd(unsigned int x, unsigned int y) { unsigned int val = min(x, y); return val % 2 ? val : val - 1; } static void result(const char *err, unsigned int n, unsigned int src_off, unsigned int dst_off, unsigned int len, unsigned long data) { pr_info("%s: result #%u: '%s' with src_off=0x%x dst_off=0x%x len=0x%x (%lu)\n", current->comm, n, err, src_off, dst_off, len, data); } static void dbg_result(const char *err, unsigned int n, unsigned int src_off, unsigned int dst_off, unsigned int len, unsigned long data) { pr_debug("%s: result #%u: '%s' with src_off=0x%x dst_off=0x%x len=0x%x (%lu)\n", current->comm, n, err, src_off, dst_off, len, data); } #define verbose_result(err, n, src_off, dst_off, len, data) ({ \ if (verbose) \ result(err, n, src_off, dst_off, len, data); \ else \ dbg_result(err, n, src_off, dst_off, len, data);\ }) static unsigned long long dmatest_persec(s64 runtime, unsigned int val) { unsigned long long per_sec = 1000000; if (runtime <= 0) return 0; /* drop precision until runtime is 32-bits */ while (runtime > UINT_MAX) { runtime >>= 1; per_sec <<= 1; } per_sec *= val; per_sec = INT_TO_FIXPT(per_sec); do_div(per_sec, runtime); return per_sec; } static unsigned long long dmatest_KBs(s64 runtime, unsigned long long len) { return FIXPT_TO_INT(dmatest_persec(runtime, len >> 10)); } static void <API key>(struct dmatest_data *d, unsigned int cnt) { unsigned int i; for (i = 0; i < cnt; i++) kfree(d->raw[i]); kfree(d->aligned); kfree(d->raw); } static void <API key>(struct dmatest_data *d) { <API key>(d, d->cnt); } static int <API key>(struct dmatest_data *d, unsigned int buf_size, u8 align) { unsigned int i = 0; d->raw = kcalloc(d->cnt + 1, sizeof(u8 *), GFP_KERNEL); if (!d->raw) return -ENOMEM; d->aligned = kcalloc(d->cnt + 1, sizeof(u8 *), GFP_KERNEL); if (!d->aligned) goto err; for (i = 0; i < d->cnt; i++) { d->raw[i] = kmalloc(buf_size + align, GFP_KERNEL); if (!d->raw[i]) goto err; /* align to alignment restriction */ if (align) d->aligned[i] = PTR_ALIGN(d->raw[i], align); else d->aligned[i] = d->raw[i]; } return 0; err: <API key>(d, i); return -ENOMEM; } /* * This function repeatedly tests DMA transfers of various lengths and * offsets for a given operation type until it is told to exit by * kthread_stop(). There may be multiple threads running this function * in parallel for a single channel, and there may be multiple channels * being tested in parallel. * * Before each test, the source and destination buffer is initialized * with a known pattern. This pattern is different depending on * whether it's in an area which is supposed to be copied or * overwritten, and different in the source and destination buffers. * So if the DMA engine doesn't copy exactly what we tell it to copy, * we'll notice. */ static int dmatest_func(void *data) { struct dmatest_thread *thread = data; struct dmatest_done *done = &thread->test_done; struct dmatest_info *info; struct dmatest_params *params; struct dma_chan *chan; struct dma_device *dev; unsigned int error_count; unsigned int failed_tests = 0; unsigned int total_tests = 0; dma_cookie_t cookie; enum dma_status status; enum dma_ctrl_flags flags; u8 *pq_coefs = NULL; int ret; unsigned int buf_size; struct dmatest_data *src; struct dmatest_data *dst; int i; ktime_t ktime, start, diff; ktime_t filltime = 0; ktime_t comparetime = 0; s64 runtime = 0; unsigned long long total_len = 0; unsigned long long iops = 0; u8 align = 0; bool is_memset = false; dma_addr_t *srcs; dma_addr_t *dma_pq; set_freezable(); ret = -ENOMEM; smp_rmb(); thread->pending = false; info = thread->info; params = &info->params; chan = thread->chan; dev = chan->device; src = &thread->src; dst = &thread->dst; if (thread->type == DMA_MEMCPY) { align = params->alignment < 0 ? dev->copy_align : params->alignment; src->cnt = dst->cnt = 1; } else if (thread->type == DMA_MEMSET) { align = params->alignment < 0 ? dev->fill_align : params->alignment; src->cnt = dst->cnt = 1; is_memset = true; } else if (thread->type == DMA_XOR) { /* force odd to ensure dst = src */ src->cnt = min_odd(params->xor_sources | 1, dev->max_xor); dst->cnt = 1; align = params->alignment < 0 ? dev->xor_align : params->alignment; } else if (thread->type == DMA_PQ) { /* force odd to ensure dst = src */ src->cnt = min_odd(params->pq_sources | 1, dma_maxpq(dev, 0)); dst->cnt = 2; align = params->alignment < 0 ? dev->pq_align : params->alignment; pq_coefs = kmalloc(params->pq_sources + 1, GFP_KERNEL); if (!pq_coefs) goto err_thread_type; for (i = 0; i < src->cnt; i++) pq_coefs[i] = 1; } else goto err_thread_type; /* Check if buffer count fits into map count variable (u8) */ if ((src->cnt + dst->cnt) >= 255) { pr_err("too many buffers (%d of 255 supported)\n", src->cnt + dst->cnt); goto err_free_coefs; } buf_size = params->buf_size; if (1 << align > buf_size) { pr_err("%u-byte buffer too small for %d-byte alignment\n", buf_size, 1 << align); goto err_free_coefs; } if (<API key>(src, buf_size, align) < 0) goto err_free_coefs; if (<API key>(dst, buf_size, align) < 0) goto err_src; set_user_nice(current, 10); srcs = kcalloc(src->cnt, sizeof(dma_addr_t), GFP_KERNEL); if (!srcs) goto err_dst; dma_pq = kcalloc(dst->cnt, sizeof(dma_addr_t), GFP_KERNEL); if (!dma_pq) goto err_srcs_array; /* * src and dst buffers are freed by ourselves below */ if (params->polled) flags = DMA_CTRL_ACK; else flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT; ktime = ktime_get(); while (!kthread_should_stop() && !(params->iterations && total_tests >= params->iterations)) { struct <API key> *tx = NULL; struct <API key> *um; dma_addr_t *dsts; unsigned int len; total_tests++; if (params->transfer_size) { if (params->transfer_size >= buf_size) { pr_err("%u-byte transfer size must be lower than %u-buffer size\n", params->transfer_size, buf_size); break; } len = params->transfer_size; } else if (params->norandom) { len = buf_size; } else { len = dmatest_random() % buf_size + 1; } /* Do not alter transfer size explicitly defined by user */ if (!params->transfer_size) { len = (len >> align) << align; if (!len) len = 1 << align; } total_len += len; if (params->norandom) { src->off = 0; dst->off = 0; } else { src->off = dmatest_random() % (buf_size - len + 1); dst->off = dmatest_random() % (buf_size - len + 1); src->off = (src->off >> align) << align; dst->off = (dst->off >> align) << align; } if (!params->noverify) { start = ktime_get(); dmatest_init_srcs(src->aligned, src->off, len, buf_size, is_memset); dmatest_init_dsts(dst->aligned, dst->off, len, buf_size, is_memset); diff = ktime_sub(ktime_get(), start); filltime = ktime_add(filltime, diff); } um = <API key>(dev->dev, src->cnt + dst->cnt, GFP_KERNEL); if (!um) { failed_tests++; result("unmap data NULL", total_tests, src->off, dst->off, len, ret); continue; } um->len = buf_size; for (i = 0; i < src->cnt; i++) { void *buf = src->aligned[i]; struct page *pg = virt_to_page(buf); unsigned long pg_off = offset_in_page(buf); um->addr[i] = dma_map_page(dev->dev, pg, pg_off, um->len, DMA_TO_DEVICE); srcs[i] = um->addr[i] + src->off; ret = dma_mapping_error(dev->dev, um->addr[i]); if (ret) { result("src mapping error", total_tests, src->off, dst->off, len, ret); goto <API key>; } um->to_cnt++; } /* map with DMA_BIDIRECTIONAL to force writeback/invalidate */ dsts = &um->addr[src->cnt]; for (i = 0; i < dst->cnt; i++) { void *buf = dst->aligned[i]; struct page *pg = virt_to_page(buf); unsigned long pg_off = offset_in_page(buf); dsts[i] = dma_map_page(dev->dev, pg, pg_off, um->len, DMA_BIDIRECTIONAL); ret = dma_mapping_error(dev->dev, dsts[i]); if (ret) { result("dst mapping error", total_tests, src->off, dst->off, len, ret); goto <API key>; } um->bidi_cnt++; } if (thread->type == DMA_MEMCPY) tx = dev-><API key>(chan, dsts[0] + dst->off, srcs[0], len, flags); else if (thread->type == DMA_MEMSET) tx = dev-><API key>(chan, dsts[0] + dst->off, *(src->aligned[0] + src->off), len, flags); else if (thread->type == DMA_XOR) tx = dev->device_prep_dma_xor(chan, dsts[0] + dst->off, srcs, src->cnt, len, flags); else if (thread->type == DMA_PQ) { for (i = 0; i < dst->cnt; i++) dma_pq[i] = dsts[i] + dst->off; tx = dev->device_prep_dma_pq(chan, dma_pq, srcs, src->cnt, pq_coefs, len, flags); } if (!tx) { result("prep error", total_tests, src->off, dst->off, len, ret); msleep(100); goto <API key>; } done->done = false; if (!params->polled) { tx->callback = dmatest_callback; tx->callback_param = done; } cookie = tx->tx_submit(tx); if (dma_submit_error(cookie)) { result("submit error", total_tests, src->off, dst->off, len, ret); msleep(100); goto <API key>; } if (params->polled) { status = dma_sync_wait(chan, cookie); <API key>(chan); if (status == DMA_COMPLETE) done->done = true; } else { <API key>(chan); <API key>(thread->done_wait, done->done, msecs_to_jiffies(params->timeout)); status = <API key>(chan, cookie, NULL, NULL); } if (!done->done) { result("test timed out", total_tests, src->off, dst->off, len, 0); goto <API key>; } else if (status != DMA_COMPLETE) { result(status == DMA_ERROR ? "completion error status" : "completion busy status", total_tests, src->off, dst->off, len, ret); goto <API key>; } dmaengine_unmap_put(um); if (params->noverify) { verbose_result("test passed", total_tests, src->off, dst->off, len, 0); continue; } start = ktime_get(); pr_debug("%s: verifying source buffer...\n", current->comm); error_count = dmatest_verify(src->aligned, 0, src->off, 0, PATTERN_SRC, true, is_memset); error_count += dmatest_verify(src->aligned, src->off, src->off + len, src->off, PATTERN_SRC | PATTERN_COPY, true, is_memset); error_count += dmatest_verify(src->aligned, src->off + len, buf_size, src->off + len, PATTERN_SRC, true, is_memset); pr_debug("%s: verifying dest buffer...\n", current->comm); error_count += dmatest_verify(dst->aligned, 0, dst->off, 0, PATTERN_DST, false, is_memset); error_count += dmatest_verify(dst->aligned, dst->off, dst->off + len, src->off, PATTERN_SRC | PATTERN_COPY, false, is_memset); error_count += dmatest_verify(dst->aligned, dst->off + len, buf_size, dst->off + len, PATTERN_DST, false, is_memset); diff = ktime_sub(ktime_get(), start); comparetime = ktime_add(comparetime, diff); if (error_count) { result("data error", total_tests, src->off, dst->off, len, error_count); failed_tests++; } else { verbose_result("test passed", total_tests, src->off, dst->off, len, 0); } continue; <API key>: dmaengine_unmap_put(um); failed_tests++; } ktime = ktime_sub(ktime_get(), ktime); ktime = ktime_sub(ktime, comparetime); ktime = ktime_sub(ktime, filltime); runtime = ktime_to_us(ktime); ret = 0; kfree(dma_pq); err_srcs_array: kfree(srcs); err_dst: <API key>(dst); err_src: <API key>(src); err_free_coefs: kfree(pq_coefs); err_thread_type: iops = dmatest_persec(runtime, total_tests); pr_info("%s: summary %u tests, %u failures %llu.%02llu iops %llu KB/s (%d)\n", current->comm, total_tests, failed_tests, FIXPT_TO_INT(iops), FIXPT_GET_FRAC(iops), dmatest_KBs(runtime, total_len), ret); /* terminate all transfers on specified channels */ if (ret || failed_tests) <API key>(chan); thread->done = true; wake_up(&thread_wait); return ret; } static void <API key>(struct dmatest_chan *dtc) { struct dmatest_thread *thread; struct dmatest_thread *_thread; int ret; <API key>(thread, _thread, &dtc->threads, node) { ret = kthread_stop(thread->task); pr_debug("thread %s exited with status %d\n", thread->task->comm, ret); list_del(&thread->node); put_task_struct(thread->task); kfree(thread); } /* terminate all transfers on specified channels */ <API key>(dtc->chan); kfree(dtc); } static int dmatest_add_threads(struct dmatest_info *info, struct dmatest_chan *dtc, enum <API key> type) { struct dmatest_params *params = &info->params; struct dmatest_thread *thread; struct dma_chan *chan = dtc->chan; char *op; unsigned int i; if (type == DMA_MEMCPY) op = "copy"; else if (type == DMA_MEMSET) op = "set"; else if (type == DMA_XOR) op = "xor"; else if (type == DMA_PQ) op = "pq"; else return -EINVAL; for (i = 0; i < params->threads_per_chan; i++) { thread = kzalloc(sizeof(struct dmatest_thread), GFP_KERNEL); if (!thread) { pr_warn("No memory for %s-%s%u\n", dma_chan_name(chan), op, i); break; } thread->info = info; thread->chan = dtc->chan; thread->type = type; thread->test_done.wait = &thread->done_wait; init_waitqueue_head(&thread->done_wait); smp_wmb(); thread->task = kthread_create(dmatest_func, thread, "%s-%s%u", dma_chan_name(chan), op, i); if (IS_ERR(thread->task)) { pr_warn("Failed to create thread %s-%s%u\n", dma_chan_name(chan), op, i); kfree(thread); break; } /* srcbuf and dstbuf are allocated by the thread itself */ get_task_struct(thread->task); list_add_tail(&thread->node, &dtc->threads); thread->pending = true; } return i; } static int dmatest_add_channel(struct dmatest_info *info, struct dma_chan *chan) { struct dmatest_chan *dtc; struct dma_device *dma_dev = chan->device; unsigned int thread_count = 0; int cnt; dtc = kmalloc(sizeof(struct dmatest_chan), GFP_KERNEL); if (!dtc) { pr_warn("No memory for %s\n", dma_chan_name(chan)); return -ENOMEM; } dtc->chan = chan; INIT_LIST_HEAD(&dtc->threads); if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) { if (dmatest == 0) { cnt = dmatest_add_threads(info, dtc, DMA_MEMCPY); thread_count += cnt > 0 ? cnt : 0; } } if (dma_has_cap(DMA_MEMSET, dma_dev->cap_mask)) { if (dmatest == 1) { cnt = dmatest_add_threads(info, dtc, DMA_MEMSET); thread_count += cnt > 0 ? cnt : 0; } } if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) { cnt = dmatest_add_threads(info, dtc, DMA_XOR); thread_count += cnt > 0 ? cnt : 0; } if (dma_has_cap(DMA_PQ, dma_dev->cap_mask)) { cnt = dmatest_add_threads(info, dtc, DMA_PQ); thread_count += cnt > 0 ? cnt : 0; } pr_info("Added %u threads using %s\n", thread_count, dma_chan_name(chan)); list_add_tail(&dtc->node, &info->channels); info->nr_channels++; return 0; } static bool filter(struct dma_chan *chan, void *param) { struct dmatest_params *params = param; if (!<API key>(params, chan) || !<API key>(params, chan->device)) return false; else return true; } static void request_channels(struct dmatest_info *info, enum <API key> type) { dma_cap_mask_t mask; dma_cap_zero(mask); dma_cap_set(type, mask); for (;;) { struct dmatest_params *params = &info->params; struct dma_chan *chan; chan = dma_request_channel(mask, filter, params); if (chan) { if (dmatest_add_channel(info, chan)) { dma_release_channel(chan); break; /* add_channel failed, punt */ } } else break; /* no more channels available */ if (params->max_channels && info->nr_channels >= params->max_channels) break; /* we have all we need */ } } static void add_threaded_test(struct dmatest_info *info) { struct dmatest_params *params = &info->params; /* Copy test parameters */ params->buf_size = test_buf_size; strlcpy(params->channel, strim(test_channel), sizeof(params->channel)); strlcpy(params->device, strim(test_device), sizeof(params->device)); params->threads_per_chan = threads_per_chan; params->max_channels = max_channels; params->iterations = iterations; params->xor_sources = xor_sources; params->pq_sources = pq_sources; params->timeout = timeout; params->noverify = noverify; params->norandom = norandom; params->alignment = alignment; params->transfer_size = transfer_size; params->polled = polled; request_channels(info, DMA_MEMCPY); request_channels(info, DMA_MEMSET); request_channels(info, DMA_XOR); request_channels(info, DMA_PQ); } static void run_pending_tests(struct dmatest_info *info) { struct dmatest_chan *dtc; unsigned int thread_count = 0; list_for_each_entry(dtc, &info->channels, node) { struct dmatest_thread *thread; thread_count = 0; list_for_each_entry(thread, &dtc->threads, node) { wake_up_process(thread->task); thread_count++; } pr_info("Started %u threads using %s\n", thread_count, dma_chan_name(dtc->chan)); } } static void stop_threaded_test(struct dmatest_info *info) { struct dmatest_chan *dtc, *_dtc; struct dma_chan *chan; <API key>(dtc, _dtc, &info->channels, node) { list_del(&dtc->node); chan = dtc->chan; <API key>(dtc); pr_debug("dropped channel %s\n", dma_chan_name(chan)); dma_release_channel(chan); } info->nr_channels = 0; } static void <API key>(struct dmatest_info *info) { /* we might be called early to set run=, defer running until all * parameters have been evaluated */ if (!info->did_init) return; run_pending_tests(info); } static int dmatest_run_get(char *val, const struct kernel_param *kp) { struct dmatest_info *info = &test_info; mutex_lock(&info->lock); if (<API key>(info)) { dmatest_run = true; } else { if (!<API key>(info)) stop_threaded_test(info); dmatest_run = false; } mutex_unlock(&info->lock); return param_get_bool(val, kp); } static int dmatest_run_set(const char *val, const struct kernel_param *kp) { struct dmatest_info *info = &test_info; int ret; mutex_lock(&info->lock); ret = param_set_bool(val, kp); if (ret) { mutex_unlock(&info->lock); return ret; } else if (dmatest_run) { if (<API key>(info)) <API key>(info); else pr_info("Could not start test, no channels configured\n"); } else { stop_threaded_test(info); } mutex_unlock(&info->lock); return ret; } static int dmatest_chan_set(const char *val, const struct kernel_param *kp) { struct dmatest_info *info = &test_info; struct dmatest_chan *dtc; char chan_reset_val[20]; int ret = 0; mutex_lock(&info->lock); ret = <API key>(val, kp); if (ret) { mutex_unlock(&info->lock); return ret; } /*Clear any previously run threads */ if (!<API key>(info) && !<API key>(info)) stop_threaded_test(info); /* Reject channels that are already registered */ if (<API key>(info)) { list_for_each_entry(dtc, &info->channels, node) { if (strcmp(dma_chan_name(dtc->chan), strim(test_channel)) == 0) { dtc = list_last_entry(&info->channels, struct dmatest_chan, node); strlcpy(chan_reset_val, dma_chan_name(dtc->chan), sizeof(chan_reset_val)); ret = -EBUSY; goto add_chan_err; } } } add_threaded_test(info); /* Check if channel was added successfully */ dtc = list_last_entry(&info->channels, struct dmatest_chan, node); if (dtc->chan) { /* * if new channel was not successfully added, revert the * "test_channel" string to the name of the last successfully * added channel. exception for when users issues empty string * to channel parameter. */ if ((strcmp(dma_chan_name(dtc->chan), strim(test_channel)) != 0) && (strcmp("", strim(test_channel)) != 0)) { ret = -EINVAL; strlcpy(chan_reset_val, dma_chan_name(dtc->chan), sizeof(chan_reset_val)); goto add_chan_err; } } else { /* Clear test_channel if no channels were added successfully */ strlcpy(chan_reset_val, "", sizeof(chan_reset_val)); ret = -EBUSY; goto add_chan_err; } mutex_unlock(&info->lock); return ret; add_chan_err: <API key>(chan_reset_val, kp); mutex_unlock(&info->lock); return ret; } static int dmatest_chan_get(char *val, const struct kernel_param *kp) { struct dmatest_info *info = &test_info; mutex_lock(&info->lock); if (!<API key>(info) && !<API key>(info)) { stop_threaded_test(info); strlcpy(test_channel, "", sizeof(test_channel)); } mutex_unlock(&info->lock); return param_get_string(val, kp); } static int <API key>(char *val, const struct kernel_param *kp) { struct dmatest_info *info = &test_info; struct dmatest_chan *dtc; unsigned int thread_count = 0; list_for_each_entry(dtc, &info->channels, node) { struct dmatest_thread *thread; thread_count = 0; list_for_each_entry(thread, &dtc->threads, node) { thread_count++; } pr_info("%u threads using %s\n", thread_count, dma_chan_name(dtc->chan)); } return 0; } static int __init dmatest_init(void) { struct dmatest_info *info = &test_info; struct dmatest_params *params = &info->params; if (dmatest_run) { mutex_lock(&info->lock); add_threaded_test(info); run_pending_tests(info); mutex_unlock(&info->lock); } if (params->iterations && wait) wait_event(thread_wait, !<API key>(info)); /* module parameters are stable, inittime tests are started, * let userspace take over 'run' control */ info->did_init = true; return 0; } /* when compiled-in wait for drivers to load first */ late_initcall(dmatest_init); static void __exit dmatest_exit(void) { struct dmatest_info *info = &test_info; mutex_lock(&info->lock); stop_threaded_test(info); mutex_unlock(&info->lock); } module_exit(dmatest_exit); MODULE_AUTHOR("Haavard Skinnemoen (Atmel)"); MODULE_LICENSE("GPL v2");