text
stringlengths 3
1.05M
|
|---|
# -*- coding: utf-8 -*-
import argparse
import inspect
import os
from pprint import pprint
import sys
# add parent directory to sys path to import relative modules
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0,parentdir)
from lib.composition_utils import *
from lib.io_utils import *
from lib.math_utils import *
from lib.video_utils import *
# input
parser = argparse.ArgumentParser()
addVideoArgs(parser)
a = parser.parse_args()
parseVideoArgs(a)
aa = vars(a)
aa["OUTPUT_FILE"] = "output/ia_fedflixnara_01_proliferation.mp4"
offset = getInitialOffset(a)
print(offset)
print("-----")
aa["OUTPUT_FILE"] = "output/ia_fedflixnara_02_waves.mp4"
offset = getInitialOffset(a)
print(offset)
print("-----")
aa["OUTPUT_FILE"] = "output/ia_fedflixnara_03_falling.mp4"
offset = getInitialOffset(a)
print(offset)
print("-----")
aa["OUTPUT_FILE"] = "output/ia_fedflixnara_04_orbits.mp4"
offset = getInitialOffset(a)
print(offset)
print("-----")
aa["OUTPUT_FILE"] = "output/ia_fedflixnara_05_shuffle.mp4"
offset = getInitialOffset(a)
print(offset)
|
import Check from "../Core/Check.js";
import defaultValue from "../Core/defaultValue.js";
import defined from "../Core/defined.js";
import DeveloperError from "../Core/DeveloperError.js";
import getAbsoluteUri from "../Core/getAbsoluteUri.js";
import GltfLoaderUtil from "./GltfLoaderUtil.js";
import hasExtension from "./hasExtension.js";
/**
* Compute cache keys for resources in {@link ResourceCache}.
*
* @namespace ResourceCacheKey
*
* @private
*/
const ResourceCacheKey = {};
function getExternalResourceCacheKey(resource) {
return getAbsoluteUri(resource.url);
}
function getBufferViewCacheKey(bufferView) {
let byteOffset = bufferView.byteOffset;
let byteLength = bufferView.byteLength;
if (hasExtension(bufferView, "EXT_meshopt_compression")) {
const meshopt = bufferView.extensions.EXT_meshopt_compression;
byteOffset = defaultValue(meshopt.byteOffset, 0);
byteLength = meshopt.byteLength;
}
return byteOffset + "-" + (byteOffset + byteLength);
}
function getAccessorCacheKey(accessor, bufferView) {
const byteOffset = bufferView.byteOffset + accessor.byteOffset;
const componentType = accessor.componentType;
const type = accessor.type;
const count = accessor.count;
return byteOffset + "-" + componentType + "-" + type + "-" + count;
}
function getExternalBufferCacheKey(resource) {
return getExternalResourceCacheKey(resource);
}
function getEmbeddedBufferCacheKey(parentResource, bufferId) {
const parentCacheKey = getExternalResourceCacheKey(parentResource);
return parentCacheKey + "-buffer-id-" + bufferId;
}
function getBufferCacheKey(buffer, bufferId, gltfResource, baseResource) {
if (defined(buffer.uri)) {
const resource = baseResource.getDerivedResource({
url: buffer.uri,
});
return getExternalBufferCacheKey(resource);
}
return getEmbeddedBufferCacheKey(gltfResource, bufferId);
}
function getDracoCacheKey(gltf, draco, gltfResource, baseResource) {
const bufferViewId = draco.bufferView;
const bufferView = gltf.bufferViews[bufferViewId];
const bufferId = bufferView.buffer;
const buffer = gltf.buffers[bufferId];
const bufferCacheKey = getBufferCacheKey(
buffer,
bufferId,
gltfResource,
baseResource
);
const bufferViewCacheKey = getBufferViewCacheKey(bufferView);
return bufferCacheKey + "-range-" + bufferViewCacheKey;
}
function getImageCacheKey(gltf, imageId, gltfResource, baseResource) {
const image = gltf.images[imageId];
const bufferViewId = image.bufferView;
const uri = image.uri;
if (defined(uri)) {
const resource = baseResource.getDerivedResource({
url: uri,
});
return getExternalResourceCacheKey(resource);
}
const bufferView = gltf.bufferViews[bufferViewId];
const bufferId = bufferView.buffer;
const buffer = gltf.buffers[bufferId];
const bufferCacheKey = getBufferCacheKey(
buffer,
bufferId,
gltfResource,
baseResource
);
const bufferViewCacheKey = getBufferViewCacheKey(bufferView);
return bufferCacheKey + "-range-" + bufferViewCacheKey;
}
function getSamplerCacheKey(gltf, textureInfo) {
const sampler = GltfLoaderUtil.createSampler({
gltf: gltf,
textureInfo: textureInfo,
});
return (
sampler.wrapS +
"-" +
sampler.wrapT +
"-" +
sampler.minificationFilter +
"-" +
sampler.magnificationFilter
);
}
/**
* Gets the schema cache key.
*
* @param {Object} options Object with the following properties:
* @param {Object} [options.schema] An object that explicitly defines a schema JSON. Mutually exclusive with options.resource.
* @param {Resource} [options.resource] The {@link Resource} pointing to the schema JSON. Mutually exclusive with options.schema.
*
* @returns {String} The schema cache key.
*
* @exception {DeveloperError} One of options.schema and options.resource must be defined.
* @private
*/
ResourceCacheKey.getSchemaCacheKey = function (options) {
const schema = options.schema;
const resource = options.resource;
//>>includeStart('debug', pragmas.debug);
if (defined(schema) === defined(resource)) {
throw new DeveloperError(
"One of options.schema and options.resource must be defined."
);
}
//>>includeEnd('debug');
if (defined(schema)) {
return "embedded-schema:" + JSON.stringify(schema);
}
return "external-schema:" + getExternalResourceCacheKey(resource);
};
/**
* Gets the external buffer cache key.
*
* @param {Object} options Object with the following properties:
* @param {Resource} options.resource The {@link Resource} pointing to the external buffer.
*
* @returns {String} The external buffer cache key.
* @private
*/
ResourceCacheKey.getExternalBufferCacheKey = function (options) {
options = defaultValue(options, defaultValue.EMPTY_OBJECT);
const resource = options.resource;
//>>includeStart('debug', pragmas.debug);
Check.typeOf.object("options.resource", resource);
//>>includeEnd('debug');
return "external-buffer:" + getExternalBufferCacheKey(resource);
};
/**
* Gets the embedded buffer cache key.
*
* @param {Object} options Object with the following properties:
* @param {Resource} options.parentResource The {@link Resource} containing the embedded buffer.
* @param {Number} options.bufferId A unique identifier of the embedded buffer within the parent resource.
*
* @returns {String} The embedded buffer cache key.
* @private
*/
ResourceCacheKey.getEmbeddedBufferCacheKey = function (options) {
options = defaultValue(options, defaultValue.EMPTY_OBJECT);
const parentResource = options.parentResource;
const bufferId = options.bufferId;
//>>includeStart('debug', pragmas.debug);
Check.typeOf.object("options.parentResource", parentResource);
Check.typeOf.number("options.bufferId", bufferId);
//>>includeEnd('debug');
return (
"embedded-buffer:" + getEmbeddedBufferCacheKey(parentResource, bufferId)
);
};
/**
* Gets the glTF cache key.
*
* @param {Object} options Object with the following properties:
* @param {Resource} options.gltfResource The {@link Resource} containing the glTF.
*
* @returns {String} The glTF cache key.
* @private
*/
ResourceCacheKey.getGltfCacheKey = function (options) {
options = defaultValue(options, defaultValue.EMPTY_OBJECT);
const gltfResource = options.gltfResource;
//>>includeStart('debug', pragmas.debug);
Check.typeOf.object("options.gltfResource", gltfResource);
//>>includeEnd('debug');
return "gltf:" + getExternalResourceCacheKey(gltfResource);
};
/**
* Gets the buffer view cache key.
*
* @param {Object} options Object with the following properties:
* @param {Object} options.gltf The glTF JSON.
* @param {Number} options.bufferViewId The bufferView ID.
* @param {Resource} options.gltfResource The {@link Resource} containing the glTF.
* @param {Resource} options.baseResource The {@link Resource} that paths in the glTF JSON are relative to.
*
* @returns {String} The buffer view cache key.
* @private
*/
ResourceCacheKey.getBufferViewCacheKey = function (options) {
options = defaultValue(options, defaultValue.EMPTY_OBJECT);
const gltf = options.gltf;
const bufferViewId = options.bufferViewId;
const gltfResource = options.gltfResource;
const baseResource = options.baseResource;
//>>includeStart('debug', pragmas.debug);
Check.typeOf.object("options.gltf", gltf);
Check.typeOf.number("options.bufferViewId", bufferViewId);
Check.typeOf.object("options.gltfResource", gltfResource);
Check.typeOf.object("options.baseResource", baseResource);
//>>includeEnd('debug');
const bufferView = gltf.bufferViews[bufferViewId];
let bufferId = bufferView.buffer;
const buffer = gltf.buffers[bufferId];
if (hasExtension(bufferView, "EXT_meshopt_compression")) {
const meshopt = bufferView.extensions.EXT_meshopt_compression;
bufferId = meshopt.buffer;
}
const bufferCacheKey = getBufferCacheKey(
buffer,
bufferId,
gltfResource,
baseResource
);
const bufferViewCacheKey = getBufferViewCacheKey(bufferView);
return "buffer-view:" + bufferCacheKey + "-range-" + bufferViewCacheKey;
};
/**
* Gets the Draco cache key.
*
* @param {Object} options Object with the following properties:
* @param {Object} options.gltf The glTF JSON.
* @param {Object} options.draco The Draco extension object.
* @param {Resource} options.gltfResource The {@link Resource} containing the glTF.
* @param {Resource} options.baseResource The {@link Resource} that paths in the glTF JSON are relative to.
*
* @returns {String} The Draco cache key.
* @private
*/
ResourceCacheKey.getDracoCacheKey = function (options) {
options = defaultValue(options, defaultValue.EMPTY_OBJECT);
const gltf = options.gltf;
const draco = options.draco;
const gltfResource = options.gltfResource;
const baseResource = options.baseResource;
//>>includeStart('debug', pragmas.debug);
Check.typeOf.object("options.gltf", gltf);
Check.typeOf.object("options.draco", draco);
Check.typeOf.object("options.gltfResource", gltfResource);
Check.typeOf.object("options.baseResource", baseResource);
//>>includeEnd('debug');
return "draco:" + getDracoCacheKey(gltf, draco, gltfResource, baseResource);
};
/**
* Gets the vertex buffer cache key.
*
* @param {Object} options Object with the following properties:
* @param {Object} options.gltf The glTF JSON.
* @param {Resource} options.gltfResource The {@link Resource} containing the glTF.
* @param {Resource} options.baseResource The {@link Resource} that paths in the glTF JSON are relative to.
* @param {Number} [options.bufferViewId] The bufferView ID corresponding to the vertex buffer.
* @param {Object} [options.draco] The Draco extension object.
* @param {String} [options.attributeSemantic] The attribute semantic, e.g. POSITION or NORMAL.
* @param {Boolean} [dequantize=false] Determines whether or not the vertex buffer will be dequantized on the CPU.
* @param {Boolean} [loadAsTypedArray=false] Load vertex buffer as a typed array instead of a GPU vertex buffer.
*
* @exception {DeveloperError} One of options.bufferViewId and options.draco must be defined.
* @exception {DeveloperError} When options.draco is defined options.attributeSemantic must also be defined.
*
* @returns {String} The vertex buffer cache key.
* @private
*/
ResourceCacheKey.getVertexBufferCacheKey = function (options) {
options = defaultValue(options, defaultValue.EMPTY_OBJECT);
const gltf = options.gltf;
const gltfResource = options.gltfResource;
const baseResource = options.baseResource;
const bufferViewId = options.bufferViewId;
const draco = options.draco;
const attributeSemantic = options.attributeSemantic;
const dequantize = defaultValue(options.dequantize, false);
const loadAsTypedArray = defaultValue(options.loadAsTypedArray, false);
//>>includeStart('debug', pragmas.debug);
Check.typeOf.object("options.gltf", gltf);
Check.typeOf.object("options.gltfResource", gltfResource);
Check.typeOf.object("options.baseResource", baseResource);
const hasBufferViewId = defined(bufferViewId);
const hasDraco = defined(draco);
const hasAttributeSemantic = defined(attributeSemantic);
if (hasBufferViewId === hasDraco) {
throw new DeveloperError(
"One of options.bufferViewId and options.draco must be defined."
);
}
if (hasDraco && !hasAttributeSemantic) {
throw new DeveloperError(
"When options.draco is defined options.attributeSemantic must also be defined."
);
}
if (hasDraco) {
Check.typeOf.object("options.draco", draco);
Check.typeOf.string("options.attributeSemantic", attributeSemantic);
}
//>>includeEnd('debug');
let cacheKeySuffix = "";
if (dequantize) {
cacheKeySuffix += "-dequantize";
}
if (loadAsTypedArray) {
cacheKeySuffix += "-typed-array";
}
if (defined(draco)) {
const dracoCacheKey = getDracoCacheKey(
gltf,
draco,
gltfResource,
baseResource
);
return (
"vertex-buffer:" +
dracoCacheKey +
"-draco-" +
attributeSemantic +
cacheKeySuffix
);
}
const bufferView = gltf.bufferViews[bufferViewId];
const bufferId = bufferView.buffer;
const buffer = gltf.buffers[bufferId];
const bufferCacheKey = getBufferCacheKey(
buffer,
bufferId,
gltfResource,
baseResource
);
const bufferViewCacheKey = getBufferViewCacheKey(bufferView);
return (
"vertex-buffer:" +
bufferCacheKey +
"-range-" +
bufferViewCacheKey +
cacheKeySuffix
);
};
/**
* Gets the index buffer cache key.
*
* @param {Object} options Object with the following properties:
* @param {Object} options.gltf The glTF JSON.
* @param {Number} options.accessorId The accessor ID corresponding to the index buffer.
* @param {Resource} options.gltfResource The {@link Resource} containing the glTF.
* @param {Resource} options.baseResource The {@link Resource} that paths in the glTF JSON are relative to.
* @param {Object} [options.draco] The Draco extension object.
* @param {Boolean} [loadAsTypedArray=false] Load index buffer as a typed array instead of a GPU index buffer.
*
* @returns {String} The index buffer cache key.
* @private
*/
ResourceCacheKey.getIndexBufferCacheKey = function (options) {
options = defaultValue(options, defaultValue.EMPTY_OBJECT);
const gltf = options.gltf;
const accessorId = options.accessorId;
const gltfResource = options.gltfResource;
const baseResource = options.baseResource;
const draco = options.draco;
const loadAsTypedArray = defaultValue(options.loadAsTypedArray, false);
//>>includeStart('debug', pragmas.debug);
Check.typeOf.object("options.gltf", gltf);
Check.typeOf.number("options.accessorId", accessorId);
Check.typeOf.object("options.gltfResource", gltfResource);
Check.typeOf.object("options.baseResource", baseResource);
//>>includeEnd('debug');
let cacheKeySuffix = "";
if (loadAsTypedArray) {
cacheKeySuffix += "-typed-array";
}
if (defined(draco)) {
const dracoCacheKey = getDracoCacheKey(
gltf,
draco,
gltfResource,
baseResource
);
return "index-buffer:" + dracoCacheKey + "-draco" + cacheKeySuffix;
}
const accessor = gltf.accessors[accessorId];
const bufferViewId = accessor.bufferView;
const bufferView = gltf.bufferViews[bufferViewId];
const bufferId = bufferView.buffer;
const buffer = gltf.buffers[bufferId];
const bufferCacheKey = getBufferCacheKey(
buffer,
bufferId,
gltfResource,
baseResource
);
const accessorCacheKey = getAccessorCacheKey(accessor, bufferView);
return (
"index-buffer:" +
bufferCacheKey +
"-accessor-" +
accessorCacheKey +
cacheKeySuffix
);
};
/**
* Gets the image cache key.
*
* @param {Object} options Object with the following properties:
* @param {Object} options.gltf The glTF JSON.
* @param {Number} options.imageId The image ID.
* @param {Resource} options.gltfResource The {@link Resource} containing the glTF.
* @param {Resource} options.baseResource The {@link Resource} that paths in the glTF JSON are relative to.
*
* @returns {String} The image cache key.
* @private
*/
ResourceCacheKey.getImageCacheKey = function (options) {
options = defaultValue(options, defaultValue.EMPTY_OBJECT);
const gltf = options.gltf;
const imageId = options.imageId;
const gltfResource = options.gltfResource;
const baseResource = options.baseResource;
//>>includeStart('debug', pragmas.debug);
Check.typeOf.object("options.gltf", gltf);
Check.typeOf.number("options.imageId", imageId);
Check.typeOf.object("options.gltfResource", gltfResource);
Check.typeOf.object("options.baseResource", baseResource);
//>>includeEnd('debug');
const imageCacheKey = getImageCacheKey(
gltf,
imageId,
gltfResource,
baseResource
);
return "image:" + imageCacheKey;
};
/**
* Gets the texture cache key.
*
* @param {Object} options Object with the following properties:
* @param {Object} options.gltf The glTF JSON.
* @param {Object} options.textureInfo The texture info object.
* @param {Resource} options.gltfResource The {@link Resource} containing the glTF.
* @param {Resource} options.baseResource The {@link Resource} that paths in the glTF JSON are relative to.
* @param {SupportedImageFormats} options.supportedImageFormats The supported image formats.
*
* @returns {String} The texture cache key.
* @private
*/
ResourceCacheKey.getTextureCacheKey = function (options) {
options = defaultValue(options, defaultValue.EMPTY_OBJECT);
const gltf = options.gltf;
const textureInfo = options.textureInfo;
const gltfResource = options.gltfResource;
const baseResource = options.baseResource;
const supportedImageFormats = options.supportedImageFormats;
//>>includeStart('debug', pragmas.debug);
Check.typeOf.object("options.gltf", gltf);
Check.typeOf.object("options.textureInfo", textureInfo);
Check.typeOf.object("options.gltfResource", gltfResource);
Check.typeOf.object("options.baseResource", baseResource);
Check.typeOf.object("options.supportedImageFormats", supportedImageFormats);
//>>includeEnd('debug');
const textureId = textureInfo.index;
const imageId = GltfLoaderUtil.getImageIdFromTexture({
gltf: gltf,
textureId: textureId,
supportedImageFormats: supportedImageFormats,
});
const imageCacheKey = getImageCacheKey(
gltf,
imageId,
gltfResource,
baseResource
);
// Include the sampler cache key in the texture cache key since textures and
// samplers are coupled in WebGL 1. When upgrading to WebGL 2 consider
// removing the sampleCacheKey here.
const samplerCacheKey = getSamplerCacheKey(gltf, textureInfo);
return "texture:" + imageCacheKey + "-sampler-" + samplerCacheKey;
};
export default ResourceCacheKey;
|
// Works with filtering
filterSelection("recent-project") // Execute the function and show all columns
function filterSelection(c) {
var x, i;
x = document.getElementsByClassName("project");
if (c == "all") c = "";
// Add the "show" class (display:block) to the filtered elements, and remove the "show" class from the elements that are not selected
for (i = 0; i < x.length; i++) {
w3RemoveClass(x[i], "show-project");
if (x[i].className.indexOf(c) > -1) w3AddClass(x[i], "show-project");
}
}
// Show filtered elements
function w3AddClass(element, name) {
var i, arr1, arr2;
arr1 = element.className.split(" ");
arr2 = name.split(" ");
for (i = 0; i < arr2.length; i++) {
if (arr1.indexOf(arr2[i]) == -1) {
element.className += " " + arr2[i];
}
}
}
// Hide elements that are not selected
function w3RemoveClass(element, name) {
var i, arr1, arr2;
arr1 = element.className.split(" ");
arr2 = name.split(" ");
for (i = 0; i < arr2.length; i++) {
while (arr1.indexOf(arr2[i]) > -1) {
arr1.splice(arr1.indexOf(arr2[i]), 1);
}
}
element.className = arr1.join(" ");
}
// Add active class to the current button (highlight it)
var btnContainer = document.getElementById("myBtnContainer");
var btns = btnContainer.getElementsByClassName("btn");
for (var i = 0; i < btns.length; i++) {
btns[i].addEventListener("click", function(){
var current = document.getElementsByClassName("btn-secondary--active");
current[0].className = current[0].className.replace(" btn-secondary--active", "");
this.className += " btn-secondary--active";
});
}
|
(window.webpackJsonp=window.webpackJsonp||[]).push([[1906],{"/lqo":function(e,t,r){"use strict";var n=r("VHp3");r("O9pe"),r("17x9");var o,c=(o=r("q1tI"))&&"object"==typeof o&&"default"in o?o.default:o,a=c.createElement("path",{d:"M9 13H19V15H9z"}),i=c.createElement("path",{d:"M22.45,21A10.87,10.87,0,0,0,25,14,11,11,0,1,0,14,25a10.87,10.87,0,0,0,7-2.55L28.59,30,30,28.59ZM14,23a9,9,0,1,1,9-9A9,9,0,0,1,14,23Z"}),l=c.forwardRef((function(e,t){var r=e.children,o=n._objectWithoutProperties(e,["children"]);return c.createElement(n.Icon,n._extends({width:32,height:32,viewBox:"0 0 32 32",xmlns:"http://www.w3.org/2000/svg",fill:"currentColor",ref:t},o),a,i,r)}));e.exports=l},VHp3:function(e,t,r){"use strict";var n,o=r("O9pe"),c=(n=r("q1tI"))&&"object"==typeof n&&"default"in n?n.default:n;function a(e,t,r){return t in e?Object.defineProperty(e,t,{value:r,enumerable:!0,configurable:!0,writable:!0}):e[t]=r,e}function i(){return(i=Object.assign||function(e){for(var t=1;t<arguments.length;t++){var r=arguments[t];for(var n in r)Object.prototype.hasOwnProperty.call(r,n)&&(e[n]=r[n])}return e}).apply(this,arguments)}function l(e,t){var r=Object.keys(e);if(Object.getOwnPropertySymbols){var n=Object.getOwnPropertySymbols(e);t&&(n=n.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),r.push.apply(r,n)}return r}function p(e){for(var t=1;t<arguments.length;t++){var r=null!=arguments[t]?arguments[t]:{};t%2?l(Object(r),!0).forEach((function(t){a(e,t,r[t])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(r)):l(Object(r)).forEach((function(t){Object.defineProperty(e,t,Object.getOwnPropertyDescriptor(r,t))}))}return e}function u(e,t){if(null==e)return{};var r,n,o=function(e,t){if(null==e)return{};var r,n,o={},c=Object.keys(e);for(n=0;n<c.length;n++)r=c[n],t.indexOf(r)>=0||(o[r]=e[r]);return o}(e,t);if(Object.getOwnPropertySymbols){var c=Object.getOwnPropertySymbols(e);for(n=0;n<c.length;n++)r=c[n],t.indexOf(r)>=0||Object.prototype.propertyIsEnumerable.call(e,r)&&(o[r]=e[r])}return o}var s=c.forwardRef((function(e,t){var r=e.className,n=e.children,a=e.tabIndex,i=u(e,["className","children","tabIndex"]),l=o.getAttributes(p(p({},i),{},{tabindex:a})),s=l.tabindex,f=u(l,["tabindex"]);return r&&(f.className=r),null!=s&&(f.tabIndex=s),t&&(f.ref=t),c.createElement("svg",f,n)}));s.displayName="Icon",s.defaultProps={xmlns:"http://www.w3.org/2000/svg",preserveAspectRatio:"xMidYMid meet"},t.Icon=s,t._extends=i,t._objectWithoutProperties=u}}]);
|
module.exports= [
{ "key": "aerialway",
"values" : [ { "@id": "drag_lift"} ,
{ "@id": "chair_lift"} ,
{ "@id": "cable_car"} ,
{ "@id": "aerialway_v_station"} ,
{ "@id": "pylon"} ,
{ "@id": "gondola"} ,
{ "@id": "goods"} ] } ,
{ "key": "aeroway",
"values" : [ { "@id": "helipad"} ,
{ "@id": "aerodrome"} ,
{ "@id": "apron"} ,
{ "@id": "taxiway"} ,
{ "@id": "terminal"} ,
{ "@id": "runway"} ,
{ "@id": "gate"} ] } ,
{ "key": "amenity",
"values" : [ { "@id": "hospital"} ,
{ "@id": "grave_yard"} ,
{ "@id": "kindergarten"} ,
{ "@id": "car_sharing"} ,
{ "@id": "car_wash"} ,
{ "@id": "casino"} ,
{ "@id": "cafe"} ,
{ "@id": "car_rental"} ,
{ "@id": "college"} ,
{ "@id": "charging_station"} ,
{ "@id": "cinema"} ,
{ "@id": "dentist"} ,
{ "@id": "community_centre"} ,
{ "@id": "courthouse"} ,
{ "@id": "drinking_water"} ,
{ "@id": "emergency_phone"} ,
{ "@id": "fast_food"} ,
{ "@id": "doctors"} ,
{ "@id": "airport"} ,
{ "@id": "atm"} ,
{ "@id": "arts_centre"} ,
{ "@id": "bench"} ,
{ "@id": "bicycle_parking"} ,
{ "@id": "bicycle_rental"} ,
{ "@id": "biergarten"} ,
{ "@id": "bank"} ,
{ "@id": "bar"} ,
{ "@id": "bureau_de_change"} ,
{ "@id": "bus_station"} ,
{ "@id": "telephone"} ,
{ "@id": "theatre"} ,
{ "@id": "swimming_pool"} ,
{ "@id": "taxi"} ,
{ "@id": "toilets"} ,
{ "@id": "town_hall"} ,
{ "@id": "waste_basket"} ,
{ "@id": "university"} ,
{ "@id": "veterinary"} ,
{ "@id": "post_box"} ,
{ "@id": "post_office"} ,
{ "@id": "pub"} ,
{ "@id": "place_of_worship"} ,
{ "@id": "police"} ,
{ "@id": "restaurant"} ,
{ "@id": "recycling"} ,
{ "@id": "school"} ,
{ "@id": "sauna"} ,
{ "@id": "shelter"} ,
{ "@id": "marketplace"} ,
{ "@id": "library"} ,
{ "@id": "nightclub"} ,
{ "@id": "parking"} ,
{ "@id": "pharmacy"} ,
{ "@id": "food_court"} ,
{ "@id": "ferry_terminal"} ,
{ "@id": "fire_station"} ,
{ "@id": "fountain"} ,
{ "@id": "fuel"} ] } ,
{ "key": "bicycle_parking",
"values" : [ { "@id": "ground_slots"} ,
{ "@id": "anchors"} ,
{ "@id": "building"} ,
{ "@id": "stands"} ,
{ "@id": "wall_loops"} ,
{ "@id": "shed"} ,
{ "@id": "lockers"} ,
{ "@id": "informal"} ] } ,
{ "key": "building",
"values" : [ ] } ,
{ "key": "boundary",
"values" : [ { "@id": "administrative"} ,
{ "@id": "historic"} ,
{ "@id": "maritime"} ,
{ "@id": "national_park"} ,
{ "@id": "political"} ,
{ "@id": "postal_code"} ,
{ "@id": "religious_administration"} ,
{ "@id": "protected_area"} ,
{ "@id": "civil"} , ] } ,
{ "key": "castle_type",
"values" : [ { "@id": "fortress"} ,
{ "@id": "defensive"} ,
{ "@id": "burg"} ,
{ "@id": "stately"} ,
{ "@id": "schloss"} ] } ,
{ "key": "place",
"values" : [ { "@id": "country"} ,
{ "@id": "state"} ,
{ "@id": "region"} ,
{ "@id": "province"} ,
{ "@id": "district"} ,
{ "@id": "county"} ,
{ "@id": "municipality"},
{ "@id": "city"} ,
{ "@id": "borough"} ,
{ "@id": "suburb"} ,
{ "@id": "quarter"} ,
{ "@id": "neighbourhood"} ,
{ "@id": "city_block"},
{ "@id": "plot"},
{ "@id": "town"},
{ "@id": "village"},
{ "@id": "hamlet"},
{ "@id": "isolated_dwelling"},
{ "@id": "farm"},
{ "@id": "allotments"},
{ "@id": "continent"},
{ "@id": "archipelago"},
{ "@id": "island"},
{ "@id": "islet"},
{ "@id": "square"},
{ "@id": "locality"}] } ,
{ "key": "cuisine",
"values" : [ { "@id": "turkish"} ,
{ "@id": "vegetarian"} ,
{ "@id": "cuisine_v_seafood"} ,
{ "@id": "american"} ,
{ "@id": "arabic"} ,
{ "@id": "asian"} ,
{ "@id": "burger"} ,
{ "@id": "chinese"} ,
{ "@id": "coffee_shop"} ,
{ "@id": "fish"} ,
{ "@id": "french"} ,
{ "@id": "german"} ,
{ "@id": "greek"} ,
{ "@id": "indian"} ,
{ "@id": "international"} ,
{ "@id": "italian"} ,
{ "@id": "japanese"} ,
{ "@id": "kebap"} ,
{ "@id": "mexican"} ,
{ "@id": "pizza"} ,
{ "@id": "regional"} ,
{ "@id": "spanish"} ,
{ "@id": "sushi"} ,
{ "@id": "thai"} ] } ,
{ "key": "denomination",
"values" : [ { "@id": "baptist"} ,
{ "@id": "catholic"} ,
{ "@id": "lutheran"} ,
{ "@id": "methodist"} ,
{ "@id": "orthodox"} ,
{ "@id": "protestant"} ] } ,
{ "key": "emergency",
"values" : [ { "@id": "emergency_v_no"} ,
{ "@id": "emergency_v_yes"} ,
{ "@id": "ambulance_station"} ,
{ "@id": "fire_hydrant"} ,
{ "@id": "phone"} ] } ,
{ "key": "geological",
"values" : [ { "@id": "outcrop"},{ "@id": "moraine"},{ "@id": "palaeontological_site"} ] } ,
{ "key": "highway",
"values" : [
{ "@id": "residential"} ] } ,
{ "key": "historic",
"values" : [ { "@id": "aircraft"} ,
{ "@id": "aqueduct"} ,
{ "@id": "archaeological_site"} ,
{ "@id": "battlefield"} ,
{ "@id": "boundary_stone"} ,
{ "@id": "building"} ,
{ "@id": "cannon"},
{ "@id": "castle"},
{ "@id": "city_gate"},
{ "@id": "citywalls"},
{ "@id": "farm"},
{ "@id": "fort"},
{ "@id": "gallows"},
{ "@id": "highwater_mark"},
{ "@id": "locomotive"},
{ "@id": "manor"},
{ "@id": "memorial"},
{ "@id": "milestone"},
{ "@id": "monastery"},
{ "@id": "monument"},
{ "@id": "optical_telegraph"},
{ "@id": "pillory"},
{ "@id": "railway_car"},
{ "@id": "ruins"},
{ "@id": "rune_ston"},
{ "@id": "ship"},
{ "@id": "tomb"},
{ "@id": "wayside_cross"},
{ "@id": "wayside_shrine"},
{ "@id": "wreck"} ] } ,
{ "key": "landuse",
"values" : [ { "@id": "forest"},{ "@id": "residential"} ] } ,
{ "key": "leisure",
"values" : [ { "@id": "stadium"} ,
{ "@id": "sports_centre"} ,
{ "@id": "swimming_pool"} ,
{ "@id": "arena"} ,
{ "@id": "dance"} ,
{ "@id": "common"} ,
{ "@id": "dog_park"} ,
{ "@id": "ice_rink"} ,
{ "@id": "fishing"} ,
{ "@id": "fitness_station"} ,
{ "@id": "golf_course"} ,
{ "@id": "garden"} ,
{ "@id": "nature_reserve"} ,
{ "@id": "picnic_table"} ,
{ "@id": "park"} ,
{ "@id": "marina"} ,
{ "@id": "miniature_golf"} ,
{ "@id": "slipway"} ,
{ "@id": "playground"} ,
{ "@id": "pitch"} ,
{ "@id": "recreation_ground"} ,
{ "@id": "track"} ,
{ "@id": "water_park"} ] } ,
{ "key": "man_made",
"values" : [ { "@id": "storage_tank"} ,
{ "@id": "water_tower"} ,
{ "@id": "works"} ,
{ "@id": "environmental_hazard"} ,
{ "@id": "pier"} ,
{ "@id": "pipeline"} ,
{ "@id": "monitoring_station"} ,
{ "@id": "wastewater_plant"} ,
{ "@id": "tower"} ,
{ "@id": "survey_point"} ] } ,
{ "key": "military",
"values" : [ { "@id": "bunker"} ,
{ "@id": "barracks"} ,
{ "@id": "bombcrater"} ,
{ "@id": "airfield"} ,
{ "@id": "danger_area"} ,
{ "@id": "naval_base"} ,
{ "@id": "nuclear_explosion_site"} ,
{ "@id": "range"} ] } ,
{ "key": "natural",
"values" : [ { "@id": "wood"} ,
{"@id": "tree_row"} ,
{"@id": "tree"} ,
{"@id": "scrub"} ,
{"@id": "heath"} ,
{"@id": "moor"} ,
{"@id": "grassland"} ,
{"@id": "fell"} ,
{"@id": "bare_rock"} ,
{"@id": "scree"} ,
{"@id": "shingle"} ,
{"@id": "sand"} ,
{"@id": "mud"} ,
{"@id": "water"} ,
{"@id": "wetland"} ,
{"@id": "glacier"} ,
{"@id": "bay"} ,
{"@id": "cape"} ,
{"@id": "beach"} ,
{"@id": "coastline"} ,
{"@id": "spring"} ,
{"@id": "hot_spring"} ,
{"@id": "geyser"} ,
{"@id": "peak"} ,
{"@id": "volcano"} ,
{"@id": "valley"} ,
{"@id": "ridge"} ,
{"@id": "arete"} ,
{"@id": "cliff"} ,
{"@id": "saddle"} ,
{"@id": "rock"} ,
{"@id": "stone"} ,
{"@id": "sinkhole"} ,
{"@id": "cave_entrance"}
] } ,
{ "key": "power",
"values" : [ { "@id": "power_v_station"} ,
{ "@id": "power_v_tower"} ,
{ "@id": "line"} ,
{ "@id": "minor_line"} ,
{ "@id": "generator"} ,
{ "@id": "sub_station"} ,
{ "@id": "transformer"} ,
{ "@id": "pole"} ] } ,
{ "key": "power_source",
"values" : [ { "@id": "power_source_v_no"} ,
{ "@id": "coal"} ,
{ "@id": "wind"} ,
{ "@id": "photovoltaic"} ,
{ "@id": "nuclear"} ,
{ "@id": "fossil"} ,
{ "@id": "gas"} ,
{ "@id": "hydro"} ,
{ "@id": "renewable"} ] } ,
{ "key": "railway",
"values" : [ { "@id": "platform"} ,
{ "@id": "rail"} ,
{ "@id": "railway_v_tram"} ,
{ "@id": "crossing"} ,
{ "@id": "disused"} ,
{ "@id": "abandoned"} ,
{ "@id": "level_crossing"} ,
{ "@id": "halt"} ,
{ "@id": "tram_stop"} ,
{ "@id": "spur"} ,
{ "@id": "station"} ] } ,
{ "key": "religion",
"values" : [ { "@id": "zoroastrian"} ,
{ "@id": "christian"} ,
{ "@id": "buddhist"} ,
{ "@id": "bahai"} ,
{ "@id": "pagan"} ,
{ "@id": "muslim"} ,
{ "@id": "pastafarian"} ,
{ "@id": "multifaith"} ,
{ "@id": "hindu"} ,
{ "@id": "jewish"} ,
{ "@id": "jain"} ,
{ "@id": "unitarian"} ,
{ "@id": "spiritualist"} ,
{ "@id": "taoist"} ,
{ "@id": "scientologist"} ,
{ "@id": "shinto"} ,
{ "@id": "sikh"} ] } ,
{ "key": "route",
"values" : [ { "@id": "power"} ,
{ "@id": "route_v_bicycle"} ,
{ "@id": "route_v_subway"} ,
{ "@id": "detour"} ,
{ "@id": "bus"} ,
{ "@id": "piste"} ,
{ "@id": "mtb"} ,
{ "@id": "hiking"} ,
{ "@id": "ferry"} ,
{ "@id": "flightpath"} ,
{ "@id": "foot"} ,
{ "@id": "walking"} ,
{ "@id": "train"} ,
{ "@id": "tram"} ,
{ "@id": "truck"} ,
{ "@id": "trolleybus"} ,
{ "@id": "ski"} ,
{ "@id": "road"} ,
{ "@id": "railway"} ] } ,
{ "key": "shop",
"values" : [ { "@id": "seafood"} ,
{ "@id": "shoes"} ,
{ "@id": "sports"} ,
{ "@id": "stationery"} ,
{ "@id": "supermarket"} ,
{ "@id": "toys"} ,
{ "@id": "travel_agency"} ,
{ "@id": "video"} ,
{ "@id": "florist"} ,
{ "@id": "furniture"} ,
{ "@id": "garden_centre"} ,
{ "@id": "hairdresser"} ,
{ "@id": "hardware"} ,
{ "@id": "kiosk"} ,
{ "@id": "jewelry"} ,
{ "@id": "laundry"} ,
{ "@id": "massage"} ,
{ "@id": "mall"} ,
{ "@id": "mobile_phone"} ,
{ "@id": "outdoor"} ,
{ "@id": "optician"} ,
{ "@id": "organic"} ,
{ "@id": "pet"} ,
{ "@id": "bakery"} ,
{ "@id": "beauty"} ,
{ "@id": "beverages"} ,
{ "@id": "bicycle"} ,
{ "@id": "books"} ,
{ "@id": "butcher"} ,
{ "@id": "car"} ,
{ "@id": "car_repair"} ,
{ "@id": "chemist"} ,
{ "@id": "clothes"} ,
{ "@id": "computer"} ,
{ "@id": "convenience"} ,
{ "@id": "deli"} ,
{ "@id": "department_store"} ,
{ "@id": "doityourself"} ,
{ "@id": "dry_cleaning"} ,
{ "@id": "electronics"} ] } ,
{ "key": "sport",
"values" : [ { "@id": "skateboard"} ,
{ "@id": "skating"} ,
{ "@id": "skiing"} ,
{ "@id": "soccer"} ,
{ "@id": "tennis"} ,
{ "@id": "swimming"} ,
{ "@id": "table_tennis"} ,
{ "@id": "football"} ,
{ "@id": "golf"} ,
{ "@id": "hockey"} ,
{ "@id": "gymnastics"} ,
{ "@id": "multi"} ,
{ "@id": "athletics"} ,
{ "@id": "baseball"} ,
{ "@id": "basketball"} ,
{ "@id": "bowls"} ,
{ "@id": "climbing"} ,
{ "@id": "equestrian"} ] } ,
{ "key": "station",
"values" : [ { "@id": "subway"} ] } ,
{ "key": "theatre:genre",
"values" : [ { "@id": "puppet"} ,
{ "@id": "political"} ,
{ "@id": "shadow_play"} ,
{ "@id": "stand_up_comedy"} ,
{ "@id": "variet\u00E9"} ,
{ "@id": "figure"} ,
{ "@id": "marionette"} ,
{ "@id": "philharmonic"} ,
{ "@id": "musical"} ,
{ "@id": "opera"} ,
{ "@id": "ballet"} ,
{ "@id": "boulevard"} ,
{ "@id": "chamber_music"} ,
{ "@id": "comedy"} ,
{ "@id": "cabaret"} ,
{ "@id": "drama"} ] } ,
{ "key": "tourism",
"values" : [ { "@id": "theme_park"} ,
{ "@id": "viewpoint"} ,
{ "@id": "information"} ,
{ "@id": "hotel"} ,
{ "@id": "guest_house"} ,
{ "@id": "hostel"} ,
{ "@id": "motel"} ,
{ "@id": "museum"} ,
{ "@id": "picnic_site"} ,
{ "@id": "attraction"} ,
{ "@id": "artwork"} ,
{ "@id": "alpine_hut"} ,
{ "@id": "chalet"} ,
{ "@id": "zoo"} ,
{ "@id": "camp_site"} ,
{ "@id": "caravan_site"} ] } ,
{ "key": "type",
"values" : [ { "@id": "restriction"} ,
{ "@id": "site"} ,
{ "@id": "route"} ,
{ "@id": "multipolygon"} ,
{ "@id": "boundary"} ] } ,
{ "key": "waterway",
"values" : [ { "@id": "river"} ,
{ "@id": "riverbank"} ,
{ "@id": "stream"} ,
{ "@id": "waterfall"} ,
{ "@id": "canal"} ,
{ "@id": "drain"} ,
{ "@id": "ditch"} ,
{ "@id": "dam"} ] }
]
|
"""Module for deltakere service."""
import logging
from typing import Any, List
class DeltakereService:
"""Class representing deltakere service."""
async def get_all_deltakere(self, db: Any) -> List:
"""Get all deltakere function."""
deltakere = []
cursor = db.deltakere_collection.find()
for document in await cursor.to_list(length=2000):
deltakere.append(document)
logging.debug(document)
return deltakere
async def get_deltaker_by_startnr(self, db: Any, startnummer: str) -> dict:
"""Get deltaker function."""
result = await db.deltakere_collection.find_one({"Startnr": startnummer})
return result
async def get_deltakere_by_klubb(self, db: Any, klubb: str) -> List:
"""Get all deltakere function."""
deltakere = []
myquery = "^" + klubb
cursor = db.deltakere_collection.find({"Klubb": {"$regex": myquery}})
for document in await cursor.to_list(length=100):
deltakere.append(document)
logging.debug(document)
return deltakere
async def get_deltakere_by_lopsklasse(self, db: Any, klasse: str) -> List:
"""Get all deltakere function."""
deltakere = []
cursor = db.deltakere_collection.find({"Løpsklasse": klasse})
for document in await cursor.to_list(length=100):
deltakere.append(document)
logging.debug(document)
return deltakere
async def get_deltakere_by_arsklasse(self, db: Any, klasse: str) -> List:
"""Get all deltakere function."""
deltakere = []
cursor = db.deltakere_collection.find({"ÅrsKlasse": klasse})
for document in await cursor.to_list(length=100):
deltakere.append(document)
logging.debug(document)
return deltakere
async def create_deltakere(self, db: Any, body: Any) -> int:
"""Create deltakere function. Delete existing deltakere, if any."""
returncode = 201
collist = await db.list_collection_names()
logging.debug(collist)
if "deltakere_collection" in collist:
returncode = 202
result = await db.deltakere_collection.delete_many({})
logging.debug(result)
result = await db.deltakere_collection.insert_many(body)
logging.debug("inserted %d docs" % (len(result.inserted_ids),))
return returncode
|
import subprocess
import os
import ccmlib.repository
from ccmlib.common import is_win, get_version_from_build
class DTestConfig:
def __init__(self):
self.use_vnodes = True
self.use_off_heap_memtables = False
self.num_tokens = -1
self.data_dir_count = -1
self.force_execution_of_resource_intensive_tests = False
self.skip_resource_intensive_tests = False
self.cassandra_dir = None
self.cassandra_version = None
self.cassandra_version_from_build = None
self.delete_logs = False
self.execute_upgrade_tests = False
self.execute_upgrade_tests_only = False
self.disable_active_log_watching = False
self.keep_test_dir = False
self.keep_failed_test_dir = False
self.enable_jacoco_code_coverage = False
self.jemalloc_path = find_libjemalloc()
def setup(self, request):
self.use_vnodes = request.config.getoption("--use-vnodes")
self.use_off_heap_memtables = request.config.getoption("--use-off-heap-memtables")
self.num_tokens = request.config.getoption("--num-tokens")
self.data_dir_count = request.config.getoption("--data-dir-count-per-instance")
self.force_execution_of_resource_intensive_tests = request.config.getoption("--force-resource-intensive-tests")
self.skip_resource_intensive_tests = request.config.getoption("--skip-resource-intensive-tests")
cassandra_dir = request.config.getoption("--cassandra-dir") or request.config.getini("cassandra_dir")
if cassandra_dir is not None:
self.cassandra_dir = os.path.expanduser(cassandra_dir)
self.cassandra_version = request.config.getoption("--cassandra-version")
self.cassandra_version_from_build = self.get_version_from_build()
self.delete_logs = request.config.getoption("--delete-logs")
self.execute_upgrade_tests = request.config.getoption("--execute-upgrade-tests")
self.execute_upgrade_tests_only = request.config.getoption("--execute-upgrade-tests-only")
self.disable_active_log_watching = request.config.getoption("--disable-active-log-watching")
self.keep_test_dir = request.config.getoption("--keep-test-dir")
self.keep_failed_test_dir = request.config.getoption("--keep-failed-test-dir")
self.enable_jacoco_code_coverage = request.config.getoption("--enable-jacoco-code-coverage")
def get_version_from_build(self):
# There are times when we want to know the C* version we're testing against
# before we do any cluster. In the general case, we can't know that -- the
# test method could use any version it wants for self.cluster. However, we can
# get the version from build.xml in the C* repository specified by
# CASSANDRA_VERSION or CASSANDRA_DIR.
if self.cassandra_version is not None:
ccm_repo_cache_dir, _ = ccmlib.repository.setup(self.cassandra_version)
return get_version_from_build(ccm_repo_cache_dir)
elif self.cassandra_dir is not None:
return get_version_from_build(self.cassandra_dir)
# Determine the location of the libjemalloc jar so that we can specify it
# through environment variables when start Cassandra. This reduces startup
# time, making the dtests run faster.
def find_libjemalloc():
if is_win():
# let the normal bat script handle finding libjemalloc
return ""
this_dir = os.path.dirname(os.path.realpath(__file__))
script = os.path.join(this_dir, "findlibjemalloc.sh")
try:
p = subprocess.Popen([script], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
if stderr or not stdout:
return "-" # tells C* not to look for libjemalloc
else:
return stdout
except Exception as exc:
print("Failed to run script to prelocate libjemalloc ({}): {}".format(script, exc))
return ""
|
(window["webpackJsonp"]=window["webpackJsonp"]||[]).push([[56],{"8f95":function(e,t){},"920f":function(e,t,a){"use strict";var n=a("8f95"),i=a.n(n);t["default"]=i.a},"95d3":function(e,t,a){"use strict";a.r(t);var n=function(){var e=this,t=e.$createElement,a=e._self._c||t;return a("div",[a("transition",{attrs:{appear:"","enter-active-class":"animated fadeIn"}},[a("q-table",{staticClass:"my-sticky-header-table shadow-24",attrs:{data:e.table_list,"row-key":"id",separator:e.separator,loading:e.loading,filter:e.filter,columns:e.columns,"hide-bottom":"",pagination:e.pagination,"no-data-label":"No data","no-results-label":"No data you want","table-style":{height:e.height},flat:"",bordered:""},on:{"update:pagination":function(t){e.pagination=t}},scopedSlots:e._u([{key:"top",fn:function(){return[a("q-btn-group",{attrs:{push:""}},[a("q-btn",{attrs:{label:e.$t("refresh"),icon:"refresh"},on:{click:function(t){return e.reFresh()}}},[a("q-tooltip",{attrs:{"content-class":"bg-amber text-black shadow-4",offset:[10,10],"content-style":"font-size: 12px"}},[e._v("\n "+e._s(e.$t("refreshtip"))+"\n ")])],1)],1),a("q-space"),a("q-input",{attrs:{outlined:"",rounded:"",dense:"",debounce:"300",color:"primary",placeholder:e.$t("search")},on:{blur:function(t){return e.getSearchList()},keyup:function(t){return!t.type.indexOf("key")&&e._k(t.keyCode,"enter",13,t.key,"Enter")?null:e.getSearchList()}},scopedSlots:e._u([{key:"append",fn:function(){return[a("q-icon",{attrs:{name:"search"},on:{click:function(t){return e.getSearchList()}}})]},proxy:!0}]),model:{value:e.filter,callback:function(t){e.filter=t},expression:"filter"}})]},proxy:!0},{key:"body",fn:function(t){return[a("q-tr",{attrs:{props:t}},[a("q-td",{key:"bin_name",attrs:{props:t}},[e._v("\n "+e._s(t.row.bin_name)+"\n ")]),a("q-td",{key:"bin_size",attrs:{props:t}},[e._v("\n "+e._s(t.row.bin_size)+"\n ")]),a("q-td",{key:"bin_property",attrs:{props:t}},[e._v("\n "+e._s(t.row.bin_property)+"\n ")]),a("q-td",{key:"empty_label",attrs:{props:t}},[e._v("\n "+e._s(t.row.empty_label)+"\n ")]),a("q-td",{key:"creater",attrs:{props:t}},[e._v("\n "+e._s(t.row.creater)+"\n ")]),a("q-td",{key:"create_time",attrs:{props:t}},[e._v("\n "+e._s(t.row.create_time)+"\n ")]),a("q-td",{key:"update_time",attrs:{props:t}},[e._v("\n "+e._s(t.row.update_time)+"\n ")])],1)]}}])})],1),[a("div",{staticClass:"q-pa-lg flex flex-center"},[a("q-btn",{directives:[{name:"show",rawName:"v-show",value:e.pathname_previous,expression:"pathname_previous"}],attrs:{flat:"",push:"",color:"purple",label:e.$t("previous"),icon:"navigate_before"},on:{click:function(t){return e.getListPrevious()}}},[a("q-tooltip",{attrs:{"content-class":"bg-amber text-black shadow-4",offset:[10,10],"content-style":"font-size: 12px"}},[e._v("\n "+e._s(e.$t("previous"))+"\n ")])],1),a("q-btn",{directives:[{name:"show",rawName:"v-show",value:e.pathname_next,expression:"pathname_next"}],attrs:{flat:"",push:"",color:"purple",label:e.$t("next"),"icon-right":"navigate_next"},on:{click:function(t){return e.getListNext()}}},[a("q-tooltip",{attrs:{"content-class":"bg-amber text-black shadow-4",offset:[10,10],"content-style":"font-size: 12px"}},[e._v("\n "+e._s(e.$t("next"))+"\n ")])],1),a("q-btn",{directives:[{name:"show",rawName:"v-show",value:!e.pathname_previous&&!e.pathname_next,expression:"!pathname_previous && !pathname_next"}],attrs:{flat:"",push:"",color:"dark",label:e.$t("no_data")}})],1)]],2)},i=[],r=a("3004"),s={name:"Pageoccupiedbin",data(){return{openid:"",login_name:"",authin:"0",pathname:"binset/?empty_label=false",pathname_previous:"",pathname_next:"",separator:"cell",loading:!1,height:"",table_list:[],bin_size_list:[],bin_property_list:[],warehouse_list:[],columns:[{name:"bin_name",required:!0,label:this.$t("warehouse.view_binset.bin_name"),align:"left",field:"bin_name"},{name:"bin_size",label:this.$t("warehouse.view_binset.bin_size"),field:"bin_size",align:"center"},{name:"bin_property",label:this.$t("warehouse.view_binset.bin_property"),field:"bin_property",align:"center"},{name:"empty_label",label:this.$t("warehouse.view_binset.empty_label"),field:"empty_label",align:"center"},{name:"creater",label:this.$t("creater"),field:"creater",align:"center"},{name:"create_time",label:this.$t("createtime"),field:"create_time",align:"center"},{name:"update_time",label:this.$t("updatetime"),field:"update_time",align:"center"}],filter:"",pagination:{page:1,rowsPerPage:"30"}}},methods:{getList(){var e=this;e.$q.localStorage.has("auth")&&Object(r["e"])(e.pathname,{}).then((t=>{e.table_list=t.results,e.pathname_previous=t.previous,e.pathname_next=t.next})).catch((t=>{e.$q.notify({message:t.detail,icon:"close",color:"negative"})}))},getSearchList(){var e=this;e.$q.localStorage.has("auth")&&Object(r["e"])(e.pathname+"&bin_name__icontains="+e.filter,{}).then((t=>{e.table_list=t.results,e.pathname_previous=t.previous,e.pathname_next=t.next})).catch((t=>{e.$q.notify({message:t.detail,icon:"close",color:"negative"})}))},getListPrevious(){var e=this;e.$q.localStorage.has("auth")&&Object(r["e"])(e.pathname_previous,{}).then((t=>{e.table_list=t.results,e.pathname_previous=t.previous,e.pathname_next=t.next})).catch((t=>{e.$q.notify({message:t.detail,icon:"close",color:"negative"})}))},getListNext(){var e=this;e.$q.localStorage.has("auth")&&Object(r["e"])(e.pathname_next,{}).then((t=>{e.table_list=t.results,e.pathname_previous=t.previous,e.pathname_next=t.next})).catch((t=>{e.$q.notify({message:t.detail,icon:"close",color:"negative"})}))},reFresh(){var e=this;e.getList()}},created(){var e=this;e.$q.localStorage.has("openid")?e.openid=e.$q.localStorage.getItem("openid"):(e.openid="",e.$q.localStorage.set("openid","")),e.$q.localStorage.has("login_name")?e.login_name=e.$q.localStorage.getItem("login_name"):(e.login_name="",e.$q.localStorage.set("login_name","")),e.$q.localStorage.has("auth")?(e.authin="1",e.getList()):e.authin="0"},mounted(){var e=this;e.$q.platform.is.electron?e.height=String(e.$q.screen.height-290)+"px":e.height=e.$q.screen.height-290+"px"},updated(){},destroyed(){}},o=s,l=a("42e1"),p=a("920f"),c=a("eaac"),h=a("e7a9"),u=a("9c40"),_=a("05c0"),d=a("2c91"),m=a("27f9"),b=a("0016"),f=a("bd08"),g=a("db86"),v=a("eebe"),y=a.n(v),q=Object(l["a"])(o,n,i,!1,null,null,null);"function"===typeof p["default"]&&Object(p["default"])(q);t["default"]=q.exports;y()(q,"components",{QTable:c["a"],QBtnGroup:h["a"],QBtn:u["a"],QTooltip:_["a"],QSpace:d["a"],QInput:m["a"],QIcon:b["a"],QTr:f["a"],QTd:g["a"]})}}]);
|
"""Multi-consumer multi-producer dispatching mechanism
Originally based on pydispatch (BSD) http://pypi.python.org/pypi/PyDispatcher/2.0.1
See license.txt for original license.
Heavily modified for Django's purposes.
"""
from dispatch.dispatcher import Signal
|
/*
* contrib/intarray/_int_bool.c
*/
#include "postgres.h"
#include "miscadmin.h"
#include "utils/builtins.h"
#include "_int.h"
#include "miscadmin.h"
PG_FUNCTION_INFO_V1(bqarr_in);
PG_FUNCTION_INFO_V1(bqarr_out);
Datum bqarr_in(PG_FUNCTION_ARGS);
Datum bqarr_out(PG_FUNCTION_ARGS);
PG_FUNCTION_INFO_V1(boolop);
Datum boolop(PG_FUNCTION_ARGS);
PG_FUNCTION_INFO_V1(rboolop);
Datum rboolop(PG_FUNCTION_ARGS);
PG_FUNCTION_INFO_V1(querytree);
Datum querytree(PG_FUNCTION_ARGS);
/* parser's states */
#define WAITOPERAND 1
#define WAITENDOPERAND 2
#define WAITOPERATOR 3
/*
* node of query tree, also used
* for storing polish notation in parser
*/
typedef struct NODE
{
int4 type;
int4 val;
struct NODE *next;
} NODE;
typedef struct
{
char *buf;
int4 state;
int4 count;
/* reverse polish notation in list (for temporary usage) */
NODE *str;
/* number in str */
int4 num;
} WORKSTATE;
/*
* get token from query string
*/
static int4
gettoken(WORKSTATE *state, int4 *val)
{
char nnn[16];
int innn;
*val = 0; /* default result */
innn = 0;
while (1)
{
if (innn >= sizeof(nnn))
return ERR; /* buffer overrun => syntax error */
switch (state->state)
{
case WAITOPERAND:
innn = 0;
if ((*(state->buf) >= '0' && *(state->buf) <= '9') ||
*(state->buf) == '-')
{
state->state = WAITENDOPERAND;
nnn[innn++] = *(state->buf);
}
else if (*(state->buf) == '!')
{
(state->buf)++;
*val = (int4) '!';
return OPR;
}
else if (*(state->buf) == '(')
{
state->count++;
(state->buf)++;
return OPEN;
}
else if (*(state->buf) != ' ')
return ERR;
break;
case WAITENDOPERAND:
if (*(state->buf) >= '0' && *(state->buf) <= '9')
{
nnn[innn++] = *(state->buf);
}
else
{
long lval;
nnn[innn] = '\0';
errno = 0;
lval = strtol(nnn, NULL, 0);
*val = (int4) lval;
if (errno != 0 || (long) *val != lval)
return ERR;
state->state = WAITOPERATOR;
return (state->count && *(state->buf) == '\0')
? ERR : VAL;
}
break;
case WAITOPERATOR:
if (*(state->buf) == '&' || *(state->buf) == '|')
{
state->state = WAITOPERAND;
*val = (int4) *(state->buf);
(state->buf)++;
return OPR;
}
else if (*(state->buf) == ')')
{
(state->buf)++;
state->count--;
return (state->count < 0) ? ERR : CLOSE;
}
else if (*(state->buf) == '\0')
return (state->count) ? ERR : END;
else if (*(state->buf) != ' ')
return ERR;
break;
default:
return ERR;
break;
}
(state->buf)++;
}
return END;
}
/*
* push new one in polish notation reverse view
*/
static void
pushquery(WORKSTATE *state, int4 type, int4 val)
{
NODE *tmp = (NODE *) palloc(sizeof(NODE));
tmp->type = type;
tmp->val = val;
tmp->next = state->str;
state->str = tmp;
state->num++;
}
#define STACKDEPTH 16
/*
* make polish notation of query
*/
static int4
makepol(WORKSTATE *state)
{
int4 val,
type;
int4 stack[STACKDEPTH];
int4 lenstack = 0;
/* since this function recurses, it could be driven to stack overflow */
check_stack_depth();
while ((type = gettoken(state, &val)) != END)
{
switch (type)
{
case VAL:
pushquery(state, type, val);
while (lenstack && (stack[lenstack - 1] == (int4) '&' ||
stack[lenstack - 1] == (int4) '!'))
{
lenstack--;
pushquery(state, OPR, stack[lenstack]);
}
break;
case OPR:
if (lenstack && val == (int4) '|')
pushquery(state, OPR, val);
else
{
if (lenstack == STACKDEPTH)
ereport(ERROR,
(errcode(ERRCODE_STATEMENT_TOO_COMPLEX),
errmsg("statement too complex")));
stack[lenstack] = val;
lenstack++;
}
break;
case OPEN:
if (makepol(state) == ERR)
return ERR;
while (lenstack && (stack[lenstack - 1] == (int4) '&' ||
stack[lenstack - 1] == (int4) '!'))
{
lenstack--;
pushquery(state, OPR, stack[lenstack]);
}
break;
case CLOSE:
while (lenstack)
{
lenstack--;
pushquery(state, OPR, stack[lenstack]);
};
return END;
break;
case ERR:
default:
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
errmsg("syntax error")));
return ERR;
}
}
while (lenstack)
{
lenstack--;
pushquery(state, OPR, stack[lenstack]);
};
return END;
}
typedef struct
{
int4 *arrb;
int4 *arre;
} CHKVAL;
/*
* is there value 'val' in (sorted) array or not ?
*/
static bool
checkcondition_arr(void *checkval, ITEM *item)
{
int4 *StopLow = ((CHKVAL *) checkval)->arrb;
int4 *StopHigh = ((CHKVAL *) checkval)->arre;
int4 *StopMiddle;
/* Loop invariant: StopLow <= val < StopHigh */
while (StopLow < StopHigh)
{
StopMiddle = StopLow + (StopHigh - StopLow) / 2;
if (*StopMiddle == item->val)
return (true);
else if (*StopMiddle < item->val)
StopLow = StopMiddle + 1;
else
StopHigh = StopMiddle;
}
return false;
}
static bool
checkcondition_bit(void *checkval, ITEM *item)
{
return GETBIT(checkval, HASHVAL(item->val));
}
/*
* evaluate boolean expression, using chkcond() to test the primitive cases
*/
static bool
execute(ITEM *curitem, void *checkval, bool calcnot,
bool (*chkcond) (void *checkval, ITEM *item))
{
/* since this function recurses, it could be driven to stack overflow */
check_stack_depth();
if (curitem->type == VAL)
return (*chkcond) (checkval, curitem);
else if (curitem->val == (int4) '!')
{
return (calcnot) ?
((execute(curitem - 1, checkval, calcnot, chkcond)) ? false : true)
: true;
}
else if (curitem->val == (int4) '&')
{
if (execute(curitem + curitem->left, checkval, calcnot, chkcond))
return execute(curitem - 1, checkval, calcnot, chkcond);
else
return false;
}
else
{ /* |-operator */
if (execute(curitem + curitem->left, checkval, calcnot, chkcond))
return true;
else
return execute(curitem - 1, checkval, calcnot, chkcond);
}
return false;
}
/*
* signconsistent & execconsistent called by *_consistent
*/
bool
signconsistent(QUERYTYPE *query, BITVEC sign, bool calcnot)
{
return execute(GETQUERY(query) + query->size - 1,
(void *) sign, calcnot,
checkcondition_bit);
}
/* Array must be sorted! */
bool
execconsistent(QUERYTYPE *query, ArrayType *array, bool calcnot)
{
CHKVAL chkval;
CHECKARRVALID(array);
chkval.arrb = ARRPTR(array);
chkval.arre = chkval.arrb + ARRNELEMS(array);
return execute(GETQUERY(query) + query->size - 1,
(void *) &chkval, calcnot,
checkcondition_arr);
}
typedef struct
{
ITEM *first;
bool *mapped_check;
} GinChkVal;
static bool
checkcondition_gin(void *checkval, ITEM *item)
{
GinChkVal *gcv = (GinChkVal *) checkval;
return gcv->mapped_check[item - gcv->first];
}
bool
gin_bool_consistent(QUERYTYPE *query, bool *check)
{
GinChkVal gcv;
ITEM *items = GETQUERY(query);
int i,
j = 0;
if (query->size <= 0)
return FALSE;
/*
* Set up data for checkcondition_gin. This must agree with the query
* extraction code in ginint4_queryextract.
*/
gcv.first = items;
gcv.mapped_check = (bool *) palloc(sizeof(bool) * query->size);
for (i = 0; i < query->size; i++)
{
if (items[i].type == VAL)
gcv.mapped_check[i] = check[j++];
}
return execute(GETQUERY(query) + query->size - 1,
(void *) &gcv, true,
checkcondition_gin);
}
static bool
contains_required_value(ITEM *curitem)
{
/* since this function recurses, it could be driven to stack overflow */
check_stack_depth();
if (curitem->type == VAL)
return true;
else if (curitem->val == (int4) '!')
{
/*
* Assume anything under a NOT is non-required. For some cases with
* nested NOTs, we could prove there's a required value, but it seems
* unlikely to be worth the trouble.
*/
return false;
}
else if (curitem->val == (int4) '&')
{
/* If either side has a required value, we're good */
if (contains_required_value(curitem + curitem->left))
return true;
else
return contains_required_value(curitem - 1);
}
else
{ /* |-operator */
/* Both sides must have required values */
if (contains_required_value(curitem + curitem->left))
return contains_required_value(curitem - 1);
else
return false;
}
return false;
}
bool
query_has_required_values(QUERYTYPE *query)
{
if (query->size <= 0)
return false;
return contains_required_value(GETQUERY(query) + query->size - 1);
}
/*
* boolean operations
*/
Datum
rboolop(PG_FUNCTION_ARGS)
{
/* just reverse the operands */
return DirectFunctionCall2(boolop,
PG_GETARG_DATUM(1),
PG_GETARG_DATUM(0));
}
Datum
boolop(PG_FUNCTION_ARGS)
{
ArrayType *val = PG_GETARG_ARRAYTYPE_P_COPY(0);
QUERYTYPE *query = PG_GETARG_QUERYTYPE_P(1);
CHKVAL chkval;
bool result;
CHECKARRVALID(val);
PREPAREARR(val);
chkval.arrb = ARRPTR(val);
chkval.arre = chkval.arrb + ARRNELEMS(val);
result = execute(GETQUERY(query) + query->size - 1,
&chkval, true,
checkcondition_arr);
pfree(val);
PG_FREE_IF_COPY(query, 1);
PG_RETURN_BOOL(result);
}
static void
findoprnd(ITEM *ptr, int4 *pos)
{
#ifdef BS_DEBUG
elog(DEBUG3, (ptr[*pos].type == OPR) ?
"%d %c" : "%d %d", *pos, ptr[*pos].val);
#endif
if (ptr[*pos].type == VAL)
{
ptr[*pos].left = 0;
(*pos)--;
}
else if (ptr[*pos].val == (int4) '!')
{
ptr[*pos].left = -1;
(*pos)--;
findoprnd(ptr, pos);
}
else
{
ITEM *curitem = &ptr[*pos];
int4 tmp = *pos;
(*pos)--;
findoprnd(ptr, pos);
curitem->left = *pos - tmp;
findoprnd(ptr, pos);
}
}
/*
* input
*/
Datum
bqarr_in(PG_FUNCTION_ARGS)
{
char *buf = (char *) PG_GETARG_POINTER(0);
WORKSTATE state;
int4 i;
QUERYTYPE *query;
int4 commonlen;
ITEM *ptr;
NODE *tmp;
int4 pos = 0;
#ifdef BS_DEBUG
StringInfoData pbuf;
#endif
state.buf = buf;
state.state = WAITOPERAND;
state.count = 0;
state.num = 0;
state.str = NULL;
/* make polish notation (postfix, but in reverse order) */
makepol(&state);
if (!state.num)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("empty query")));
commonlen = COMPUTESIZE(state.num);
query = (QUERYTYPE *) palloc(commonlen);
SET_VARSIZE(query, commonlen);
query->size = state.num;
ptr = GETQUERY(query);
for (i = state.num - 1; i >= 0; i--)
{
ptr[i].type = state.str->type;
ptr[i].val = state.str->val;
tmp = state.str->next;
pfree(state.str);
state.str = tmp;
}
pos = query->size - 1;
findoprnd(ptr, &pos);
#ifdef BS_DEBUG
initStringInfo(&pbuf);
for (i = 0; i < query->size; i++)
{
if (ptr[i].type == OPR)
appendStringInfo(&pbuf, "%c(%d) ", ptr[i].val, ptr[i].left);
else
appendStringInfo(&pbuf, "%d ", ptr[i].val);
}
elog(DEBUG3, "POR: %s", pbuf.data);
pfree(pbuf.data);
#endif
PG_RETURN_POINTER(query);
}
/*
* out function
*/
typedef struct
{
ITEM *curpol;
char *buf;
char *cur;
int4 buflen;
} INFIX;
#define RESIZEBUF(inf,addsize) while( ( (inf)->cur - (inf)->buf ) + (addsize) + 1 >= (inf)->buflen ) { \
int4 len = inf->cur - inf->buf; \
inf->buflen *= 2; \
inf->buf = (char*) repalloc( (void*)inf->buf, inf->buflen ); \
inf->cur = inf->buf + len; \
}
static void
infix(INFIX *in, bool first)
{
if (in->curpol->type == VAL)
{
RESIZEBUF(in, 11);
sprintf(in->cur, "%d", in->curpol->val);
in->cur = strchr(in->cur, '\0');
in->curpol--;
}
else if (in->curpol->val == (int4) '!')
{
bool isopr = false;
RESIZEBUF(in, 1);
*(in->cur) = '!';
in->cur++;
*(in->cur) = '\0';
in->curpol--;
if (in->curpol->type == OPR)
{
isopr = true;
RESIZEBUF(in, 2);
sprintf(in->cur, "( ");
in->cur = strchr(in->cur, '\0');
}
infix(in, isopr);
if (isopr)
{
RESIZEBUF(in, 2);
sprintf(in->cur, " )");
in->cur = strchr(in->cur, '\0');
}
}
else
{
int4 op = in->curpol->val;
INFIX nrm;
in->curpol--;
if (op == (int4) '|' && !first)
{
RESIZEBUF(in, 2);
sprintf(in->cur, "( ");
in->cur = strchr(in->cur, '\0');
}
nrm.curpol = in->curpol;
nrm.buflen = 16;
nrm.cur = nrm.buf = (char *) palloc(sizeof(char) * nrm.buflen);
/* get right operand */
infix(&nrm, false);
/* get & print left operand */
in->curpol = nrm.curpol;
infix(in, false);
/* print operator & right operand */
RESIZEBUF(in, 3 + (nrm.cur - nrm.buf));
sprintf(in->cur, " %c %s", op, nrm.buf);
in->cur = strchr(in->cur, '\0');
pfree(nrm.buf);
if (op == (int4) '|' && !first)
{
RESIZEBUF(in, 2);
sprintf(in->cur, " )");
in->cur = strchr(in->cur, '\0');
}
}
}
Datum
bqarr_out(PG_FUNCTION_ARGS)
{
QUERYTYPE *query = PG_GETARG_QUERYTYPE_P(0);
INFIX nrm;
if (query->size == 0)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("empty query")));
nrm.curpol = GETQUERY(query) + query->size - 1;
nrm.buflen = 32;
nrm.cur = nrm.buf = (char *) palloc(sizeof(char) * nrm.buflen);
*(nrm.cur) = '\0';
infix(&nrm, true);
PG_FREE_IF_COPY(query, 0);
PG_RETURN_POINTER(nrm.buf);
}
/* Useless old "debugging" function for a fundamentally wrong algorithm */
Datum
querytree(PG_FUNCTION_ARGS)
{
elog(ERROR, "querytree is no longer implemented");
PG_RETURN_NULL();
}
|
from mesa import Agent, Model
from mesa.time import RandomActivation
from mesa.space import MultiGrid
from mesa.datacollection import DataCollector
def compute_gini(model):
agent_wealths = [agent.wealth for agent in model.schedule.agents]
x = sorted(agent_wealths)
N = model.num_agents
B = sum(xi * (N - i) for i, xi in enumerate(x)) / (N * sum(x))
return 1 + (1 / N) - 2 * B
class BoltzmannWealthModel(Model):
"""A simple model of an economy where agents exchange currency at random.
All the agents begin with one unit of currency, and each time step can give
a unit of currency to another agent. Note how, over time, this produces a
highly skewed distribution of wealth.
"""
def __init__(self, N=100, width=10, height=10):
self.num_agents = N
self.grid = MultiGrid(height, width, True)
self.schedule = RandomActivation(self)
self.datacollector = DataCollector(
model_reporters={"Gini": compute_gini}, agent_reporters={"Wealth": "wealth"}
)
# Create agents
for i in range(self.num_agents):
a = MoneyAgent(i, self)
self.schedule.add(a)
# Add the agent to a random grid cell
x = self.random.randrange(self.grid.width)
y = self.random.randrange(self.grid.height)
self.grid.place_agent(a, (x, y))
self.running = True
self.datacollector.collect(self)
def step(self):
self.schedule.step()
# collect data
self.datacollector.collect(self)
def run_model(self, n):
for i in range(n):
self.step()
class MoneyAgent(Agent):
""" An agent with fixed initial wealth."""
def __init__(self, unique_id, model):
super().__init__(unique_id, model)
self.wealth = 1
def move(self):
possible_steps = self.model.grid.get_neighborhood(
self.pos, moore=True, include_center=False
)
new_position = self.random.choice(possible_steps)
self.model.grid.move_agent(self, new_position)
def give_money(self):
cellmates = self.model.grid.get_cell_list_contents([self.pos])
if len(cellmates) > 1:
other = self.random.choice(cellmates)
other.wealth += 1
self.wealth -= 1
def step(self):
self.move()
if self.wealth > 0:
self.give_money()
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from .. import models
class DiscoveredSecuritySolutionsOperations(object):
"""DiscoveredSecuritySolutionsOperations operations.
You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
:ivar api_version: API version for the operation. Constant value: "2015-06-01-preview".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2015-06-01-preview"
self.config = config
def list(
self, custom_headers=None, raw=False, **operation_config):
"""Gets a list of discovered Security Solutions for the subscription.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of DiscoveredSecuritySolution
:rtype:
~azure.mgmt.security.models.DiscoveredSecuritySolutionPaged[~azure.mgmt.security.models.DiscoveredSecuritySolution]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def prepare_request(next_link=None):
if not next_link:
# Construct URL
url = self.list.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str', pattern=r'^[0-9A-Fa-f]{8}-([0-9A-Fa-f]{4}-){3}[0-9A-Fa-f]{12}$')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
return request
def internal_paging(next_link=None):
request = prepare_request(next_link)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
header_dict = None
if raw:
header_dict = {}
deserialized = models.DiscoveredSecuritySolutionPaged(internal_paging, self._deserialize.dependencies, header_dict)
return deserialized
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Security/discoveredSecuritySolutions'}
def list_by_home_region(
self, custom_headers=None, raw=False, **operation_config):
"""Gets a list of discovered Security Solutions for the subscription and
location.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of DiscoveredSecuritySolution
:rtype:
~azure.mgmt.security.models.DiscoveredSecuritySolutionPaged[~azure.mgmt.security.models.DiscoveredSecuritySolution]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def prepare_request(next_link=None):
if not next_link:
# Construct URL
url = self.list_by_home_region.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str', pattern=r'^[0-9A-Fa-f]{8}-([0-9A-Fa-f]{4}-){3}[0-9A-Fa-f]{12}$'),
'ascLocation': self._serialize.url("self.config.asc_location", self.config.asc_location, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
return request
def internal_paging(next_link=None):
request = prepare_request(next_link)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
header_dict = None
if raw:
header_dict = {}
deserialized = models.DiscoveredSecuritySolutionPaged(internal_paging, self._deserialize.dependencies, header_dict)
return deserialized
list_by_home_region.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Security/locations/{ascLocation}/discoveredSecuritySolutions'}
def get(
self, resource_group_name, discovered_security_solution_name, custom_headers=None, raw=False, **operation_config):
"""Gets a specific discovered Security Solution.
:param resource_group_name: The name of the resource group within the
user's subscription. The name is case insensitive.
:type resource_group_name: str
:param discovered_security_solution_name: Name of a discovered
security solution.
:type discovered_security_solution_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: DiscoveredSecuritySolution or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.security.models.DiscoveredSecuritySolution or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.get.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str', pattern=r'^[0-9A-Fa-f]{8}-([0-9A-Fa-f]{4}-){3}[0-9A-Fa-f]{12}$'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'ascLocation': self._serialize.url("self.config.asc_location", self.config.asc_location, 'str'),
'discoveredSecuritySolutionName': self._serialize.url("discovered_security_solution_name", discovered_security_solution_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('DiscoveredSecuritySolution', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Security/locations/{ascLocation}/discoveredSecuritySolutions/{discoveredSecuritySolutionName}'}
|
import tensorflow as tf
import numpy as np
import sys
from tensorflow.examples.tutorials.mnist import input_data
sys.path.append("/data")
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
print(mnist.train.images.shape, mnist.train.labels.shape)
print(mnist.test.images.shape, mnist.test.labels.shape)
print(mnist.validation.images.shape, mnist.validation.labels.shape)
x = tf.placeholder(tf.float32, [None, 784])
W = tf.Variable(tf.zeros([784, 10]))
b = tf.Variable(tf.zeros([10]))
y = tf.nn.softmax(tf.matmul(x, W) + b)
y_ = tf.placeholder(tf.float32, [None, 10])
# cross_entropy = -tf.reduce_sum(y_*tf.log(y))
cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y)))
train_step = tf.train.GradientDescentOptimizer(0.005).minimize(cross_entropy)
correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
init = tf.initialize_all_variables()
sess = tf.Session()
tf.train.write_graph(sess.graph_def, '/data/scripts/study_case/pbtxt_files', 'karch.pbtxt')
# tf.global_variables_initializer().run(session=sess)
'''inserted code'''
from scripts.utils.tf_utils import TensorFlowScheduler
scheduler = TensorFlowScheduler(name="karch")
'''inserted code'''
with sess.as_default():
sess.run(init)
while True:
batch_xs, batch_ys = mnist.train.next_batch(100)
_, loss = sess.run([train_step, cross_entropy], feed_dict={x: batch_xs, y_: batch_ys})
'''inserted code'''
scheduler.loss_checker(loss)
scheduler.check_time()
'''inserted code'''
|
/*
Copyright (c) 2003-2013, CKSource - Frederico Knabben. All rights reserved.
For licensing, see LICENSE.html or http://ckeditor.com/license
*/
CKEDITOR.plugins.setLang( 'blockquote', 'hr', {
toolbar: 'Blockquote'
});
|
module.exports={A:{A:{"2":"J C G E B A TB"},B:{"2":"D X g H L"},C:{"1":"m n o p q r w x v","2":"0 1 2 4 RB F I J C G E z t s PB OB","132":"L M N O P Q R S T U V W u Y Z a b c d e f K h i j k l","164":"B A D X g H"},D:{"1":"0 2 4 8 h i j k l m n o p q r w x v z t s DB AB SB BB","2":"F I J C G E B A D X g H L M N O P Q R S T U V W u Y Z a b c d e f","66":"K"},E:{"2":"7 F I J C G E B A CB EB FB GB HB IB JB"},F:{"1":"U V W u Y Z a b c d e f K h i j k l m n o p q r","2":"5 6 E A D H L M N O P Q R S T KB LB MB NB QB y"},G:{"2":"3 7 9 G A UB VB WB XB YB ZB aB bB"},H:{"2":"cB"},I:{"1":"s","2":"1 3 F dB eB fB gB hB iB"},J:{"2":"C B"},K:{"1":"K","2":"5 6 B A D y"},L:{"1":"8"},M:{"1":"t"},N:{"2":"B A"},O:{"132":"jB"},P:{"1":"F I"},Q:{"2":"kB"},R:{"1":"lB"}},B:4,C:"Battery Status API"};
|
import SwitchComponent from './switch-component.js';
export const SwitchControl = wp.customize.KadenceControl.extend( {
renderContent: function renderContent() {
let control = this;
ReactDOM.render(
<SwitchComponent control={control}/>,
control.container[0]
);
}
} );
|
#!/usr/bin/env node
const childProcess = require('child_process')
const crypto = require('crypto')
const fs = require('fs')
const { hashElement } = require('folder-hash')
const path = require('path')
const unknownFlags = []
const args = require('minimist')(process.argv, {
string: ['runners'],
unknown: arg => unknownFlags.push(arg)
})
const unknownArgs = []
for (const flag of unknownFlags) {
unknownArgs.push(flag)
const onlyFlag = flag.replace(/^-+/, '')
if (args[onlyFlag]) {
unknownArgs.push(args[onlyFlag])
}
}
const utils = require('./lib/utils')
const { YARN_VERSION } = require('./yarn')
const BASE = path.resolve(__dirname, '../..')
const NPM_CMD = process.platform === 'win32' ? 'npm.cmd' : 'npm'
const NPX_CMD = process.platform === 'win32' ? 'npx.cmd' : 'npx'
const specHashPath = path.resolve(__dirname, '../spec/.hash')
let runnersToRun = null
if (args.runners) {
runnersToRun = args.runners.split(',')
console.log('Only running:', runnersToRun)
} else {
console.log('Will trigger all spec runners')
}
async function main () {
const [lastSpecHash, lastSpecInstallHash] = loadLastSpecHash()
const [currentSpecHash, currentSpecInstallHash] = await getSpecHash()
const somethingChanged = (currentSpecHash !== lastSpecHash) ||
(lastSpecInstallHash !== currentSpecInstallHash)
if (somethingChanged) {
await installSpecModules()
await getSpecHash().then(saveSpecHash)
}
if (!fs.existsSync(path.resolve(__dirname, '../electron.d.ts'))) {
console.log('Generating electron.d.ts as it is missing')
generateTypeDefinitions()
}
await runElectronTests()
}
function generateTypeDefinitions () {
const { status } = childProcess.spawnSync('npm', ['run', 'create-typescript-definitions'], {
cwd: path.resolve(__dirname, '..'),
stdio: 'inherit'
})
if (status !== 0) {
throw new Error(`Electron typescript definition generation failed with exit code: ${status}.`)
}
}
function loadLastSpecHash () {
return fs.existsSync(specHashPath)
? fs.readFileSync(specHashPath, 'utf8').split('\n')
: [null, null]
}
function saveSpecHash ([newSpecHash, newSpecInstallHash]) {
fs.writeFileSync(specHashPath, `${newSpecHash}\n${newSpecInstallHash}`)
}
async function runElectronTests () {
const errors = []
const runners = new Map([
['main', { description: 'Main process specs', run: runMainProcessElectronTests }],
['remote', { description: 'Remote based specs', run: runRemoteBasedElectronTests }]
])
const testResultsDir = process.env.ELECTRON_TEST_RESULTS_DIR
for (const [runnerId, { description, run }] of runners) {
if (runnersToRun && !runnersToRun.includes(runnerId)) {
console.info('\nSkipping:', description)
continue
}
try {
console.info('\nRunning:', description)
if (testResultsDir) {
process.env.MOCHA_FILE = path.join(testResultsDir, `test-results-${runnerId}.xml`)
}
await run()
} catch (err) {
errors.push([runnerId, err])
}
}
if (errors.length !== 0) {
for (const err of errors) {
console.error('\n\nRunner Failed:', err[0])
console.error(err[1])
}
throw new Error('Electron test runners have failed')
}
}
async function runRemoteBasedElectronTests () {
let exe = path.resolve(BASE, utils.getElectronExec())
const runnerArgs = ['electron/spec', ...unknownArgs.slice(2)]
if (process.platform === 'linux') {
runnerArgs.unshift(path.resolve(__dirname, 'dbus_mock.py'), exe)
exe = 'python'
}
const { status } = childProcess.spawnSync(exe, runnerArgs, {
cwd: path.resolve(__dirname, '../..'),
stdio: 'inherit'
})
if (status !== 0) {
const textStatus = process.platform === 'win32' ? `0x${status.toString(16)}` : status.toString()
throw new Error(`Electron tests failed with code ${textStatus}.`)
}
}
async function runMainProcessElectronTests () {
const exe = path.resolve(BASE, utils.getElectronExec())
const { status } = childProcess.spawnSync(exe, ['electron/spec-main', ...unknownArgs.slice(2)], {
cwd: path.resolve(__dirname, '../..'),
stdio: 'inherit'
})
if (status !== 0) {
const textStatus = process.platform === 'win32' ? `0x${status.toString(16)}` : status.toString()
throw new Error(`Electron tests failed with code ${textStatus}.`)
}
}
async function installSpecModules () {
const nodeDir = path.resolve(BASE, `out/${utils.OUT_DIR}/gen/node_headers`)
const env = Object.assign({}, process.env, {
npm_config_nodedir: nodeDir,
npm_config_msvs_version: '2017'
})
const { status } = childProcess.spawnSync(NPX_CMD, [`yarn@${YARN_VERSION}`, 'install', '--frozen-lockfile'], {
env,
cwd: path.resolve(__dirname, '../spec'),
stdio: 'inherit'
})
if (status !== 0 && !process.env.IGNORE_YARN_INSTALL_ERROR) {
throw new Error('Failed to yarn install in the spec folder')
}
}
function getSpecHash () {
return Promise.all([
(async () => {
const hasher = crypto.createHash('SHA256')
hasher.update(fs.readFileSync(path.resolve(__dirname, '../spec/package.json')))
hasher.update(fs.readFileSync(path.resolve(__dirname, '../spec/yarn.lock')))
return hasher.digest('hex')
})(),
(async () => {
const specNodeModulesPath = path.resolve(__dirname, '../spec/node_modules')
if (!fs.existsSync(specNodeModulesPath)) {
return null
}
const { hash } = await hashElement(specNodeModulesPath, {
folders: {
exclude: ['.bin']
}
})
return hash
})()
])
}
main().catch((error) => {
console.error('An error occurred inside the spec runner:', error)
process.exit(1)
})
|
#!/usr/bin/env python
import base_filters
COPY_GOOGLE_DOC_KEY = '1DZv9_GXRjZXz4K_EQaQSirSCUuYY9xGv3rwM3c1W304'
USE_ASSETS = False
# Use these variables to override the default cache timeouts for this graphic
# DEFAULT_MAX_AGE = 20
# ASSETS_MAX_AGE = 300
JINJA_FILTER_FUNCTIONS = base_filters.FILTERS
|
#-------------------------------------------------------------------------------
# Copyright 2017 Cognizant Technology Solutions
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
#-------------------------------------------------------------------------------
'''
Created on Dec 28, 2017
@author: 610962
'''
from dateutil import parser
import datetime
from BaseAgent import BaseAgent
import time
import calendar
import random
import string
import os
import json
import logging.handlers
class DummyDataAgent(BaseAgent):
def process(self):
self.printLog("DummyDataAgent processing started ",True)
jiraSample = {
"jiraStatus":"Completed",
"jiraProjectName":"Knowledge Transfer",
"jiraCreator":"393565",
"inSightsTimeX":"2016-09-13T14:15:44Z",
"jiraPriority":"Medium",
"jiraUpdated":"2016-09-13T14:15:44.000+0530",
"jiraIssueType":"Story",
"toolName":"JIRA",
"jiraKey":"KKT-3",
"inSightsTime":1473777524,
"sprint":"Sprint4",
"fixVersions": "ACS17.0.4.3"
}
# GIT sample json
gitSample = {
"gitCommitId":1,
"inSightsTimeX":"2016-03-16T10:47:22Z",
"toolName":"GIT",
"gitAuthorName":"Akshay",
"gitReponame":"Insights",
"inSightsTime":1458122182,
"jiraKey":"IS-10",
"gitCommiTime":"2016-03-16T15:47:22Z"
}
# Jenkins sample json
jenkinsSample = {
"environment": "PROD",
"endTime": 1508351788,
"gitCommitId":1,
"jobName": "BillingApproved",
"duration": 10178,
"buildNumber": 1,
"sprintID": "S52",
"vector": "BUILD",
"startTime": 1508341610,
"projectName": "PaymentServices",
"inSightsTimeX": "2017-10-18T15:46:50Z",
"status": "Success",
"toolName": "JENKINS",
"projectID": "1002"
}
# Sonar Sample json
sonarSample = {
"id": 4,
"k": "PaymentServices",
"nm": "PaymentServices",
"sc": "PRJ",
"qu": "TRK"
}
sprintSample = {
"sprintName":"Adoption",
"sprintId":"ad1",
"state":"closed"
}
# Jira variables
jira_status = ['Open', 'Backlog', 'To Do', 'In Progress', 'Canceled', 'Done', 'Closed', 'Reopen']
jira_priority = ['Low', 'Medium', 'High']
jira_issuetype = ['Story', 'Task', 'Sub-task', 'Bug', 'Epic', 'User Story']
jira_creator = ['Akshay', 'Mayank', 'Vishwajit', 'Prajakta', 'Vishal']
jira_sprint = ['S51', 'S52', 'S53', 'S54', 'S55']
jira_project_name = ['PaymentServices', 'MobileServices', 'ClaimFinder', 'AgentLocator']
Story_Id = ['ST-10', 'ST-11', 'ST-12', 'ST-13', 'ST-14']
jira_version = ['ACS17.0.4.3', 'BDE17.0.4.3', 'ACS19.0.3.1']
state = ['start', 'closed', 'finish', 'deliver']
Priority = ['2', '3', '4', '5']
Author_Name = ['HAri', 'Dhrubaj', 'Akshay', 'Tommy']
resolution = ['Done', 'Completed', 'Reopen']
storyPoints = ['1', '2', '3', '5', '8', '13']
#alm_ID = ['a23', 'a33', 'a44', 'a55']
progressTimeSec = ['1232', '32342', '2323']
assigneeID = ['1231212', '2345253', '234234', '1342323']
assigneeEmail = ['hari@cognizant.com', 'sashikala@cognizant.com', 'drubaj@cognizant.com', 'kalaivani@cognizant.com']
# Sprint variables
sprint_Name = ['Adoption', 'UIEnhance', 'Three', 'Testphase']
state = ['start', 'closed', 'finish', 'deliver']
issue_Type = ['Bug', 'Sprint_Bug', 'SIT_Bug', 'Performance_Bug', 'Regression_Bug']
# GIT variables
repo = ['Insights', 'InsightsDemo', 'InsightsTest', 'InsightsTest']
author = ['Akshay', 'Mayank', 'Vishwajit', 'Prajakta']
# message = ['Adding debug lines for IS-10 / S51', 'Remvoing bug for IS-11 / S52', 'New feature added for IS-1 / S53', 'Rolling back changes IS-13 / S54']
Commit_Id = ['123', '456', '789', '111', '009', '008', '007', '990']
# Jenkins variables
sprint = ['S51', 'S52', 'S53', 'S54', 'S55']
status = ['Success', 'Failure', 'Aborted']
project_name = ['PaymentServices', 'MobileServices', 'ClaimFinder', 'AgentLocator']
job_name = ['BillingApproved', 'BillingInvoice', 'ClaimValidated', 'ClaimProcessed', 'deploy']
projectId = ['1001', '1002', '1003', '1004']
jen_env = ['PROD', 'DEV', 'INT', 'RELEASE']
buildUrl = ['productv4.1.devops.com', 'productv4.2.devops.com', 'productv4.3.devops.com', 'productv4.4.devops.com']
result = ['SUCCESS', 'FAILURE', 'ABORTED']
master = ['master1', 'master2']
# Sonar variables
project = ['PaymentServices', 'MobileServices', 'ClaimFinder', 'AgentLocator']
sonar_key = ['payment1', 'Mobile1', 'Claim', 'agent']
project_id = ['1', '2', '3', '4']
resourceKey = ['09', '099', '89', '32']
sonar_quality_gate_Status = ['SUCCESS', 'FAILED']
sonar_coverage = ['35','50','70','85']
sonar_complexity = ['35','50','70','85','100','125']
sonar_duplicate = ['15','25','45','60']
sonar_techdepth = ['3','5','17','25','21']
rundeck_env=['PROD','DEV','INTG','SIT','UAT']
dataCount = self.config.get("dataCount")
start_date_days = self.config.get("start_date_days")
sleepTime= self.config.get("sleepTime")
createSprintData= self.config.get("createSprintData", False)
currentDate= datetime.datetime.now() - datetime.timedelta(days=start_date_days)
self.printLog(currentDate,True)
flag = 1
# To save the data count in tracking.json
script_dir = os.path.dirname(__file__)
#print(script_dir)
file_path = os.path.join(script_dir, 'config.json')
self.printLog(file_path, False)
# Input your system path to tracking.json of DummyAgent
with open(file_path, "r") as jsonFile: # Open the JSON file for reading
data = json.load(jsonFile) # Read the JSON into the buffer
#self.printLog('Starting Agent!')
#currentDT = datetime.datetime.now()
#print(currentDT)
record_count = 0
total_record_count = 0
globle_sprintArr = []
sprint_data = []
self.printLog('Jira sprint Started .... 50', False)
# sprint json configurations
sprintEndDate=currentDate
sprintDay=7
numberOfSprint=150
try:
for rangeNumber in range(0,numberOfSprint ) :
sprint = 'ST-' + str(rangeNumber)
#if sprint not in globle_sprintArr :
sprintSample = {}
sprintStartDate = sprintEndDate
sprintEndDate=(sprintStartDate + datetime.timedelta(days=sprintDay))
if createSprintData:
self.printLog(sprint +' '+str(sprintStartDate) +' '+str(sprintEndDate), False)
sprintSample['sprintName'] = random.choice(sprint_Name)
sprintSample['sprintId'] = sprint
sprintSample['state'] = random.choice(state)
sprintSample['issueType'] = random.choice(issue_Type)
sprintSample['sprintStartDate'] =sprintStartDate.strftime("%Y-%m-%dT%H:%M:%SZ")
sprintSample['sprintEndDate'] = sprintEndDate.strftime("%Y-%m-%dT%H:%M:%SZ")
sprint_data.append(sprintSample)
globle_sprintArr.append(sprint)
#print(sprintSample)
if createSprintData:
metadata = {"labels" : ["Sprint"]}
#self.printLog(len(sprint_data), False)
total_record_count =total_record_count + len(sprint_data)
self.publishToolsData(sprint_data, metadata)
except Exception as ex:
self.printLog(ex,True)
while flag == 1 :
jira_data = []
sprint_data = []
git_data = []
jenkins_data = []
sonar_data = []
rundeck_data = []
#print(jira_data)
# Run-time calculated variables
currentDT = datetime.datetime.now()
self.printLog('currentDate '+str(currentDate),True)
time_tuple = time.strptime(currentDate.strftime('%Y-%m-%d %H:%M:%S'), '%Y-%m-%d %H:%M:%S')
#print(time_tuple)
time_epoch = time.mktime(time_tuple)
#print(time_epoch)
randomStr = ''.join([random.choice(string.ascii_letters + string.digits) for n in xrange(32)])
#randonjirakey = 'LS-' + str(''.join([random.choice(string.digits) for n in xrange(1)]))
#randonGitCommitId = 'CM-' + str(''.join([random.choice(string.digits) for n in xrange(1)]))
time_start = (random.randint(100, 500))
time_end = (random.randint(501, 800))
publish_message_count_loop=""
self.printLog('Jira Started .... ', False)
# jira_count =[]
jira_count = 0
jira_keyArr = []
jira_sprintArr = []
while jira_count != 50 :
try:
randonjirakey = 'LS-' + str(''.join([random.choice(string.digits) for n in xrange(10)]))
randonSprintStringId = 'ST-' + str(''.join([random.choice(string.digits) for n in xrange(3)]))
#print(randonSprintStringId)
#Jira json configurations
time_offset_jira = (random.randint(01, 24))
time_offset = (random.randint(101, 800))
jira_date =(currentDate + datetime.timedelta(hours=time_offset_jira,seconds=time_offset))
sprintNumber =random.choice(globle_sprintArr)
self.printLog('sprintNumber '+sprintNumber+' jira date '+str(jira_date), False)
jiraSample ={}
jiraSample['inSightsTimeX'] = jira_date.strftime("%Y-%m-%dT%H:%M:%SZ")
jiraSample['jiraUpdated'] = (jira_date + datetime.timedelta(days=time_offset_jira)).strftime("%Y-%m-%dT%H:%M:%SZ")
jiraSample['creationDate'] = jira_date.strftime("%Y-%m-%dT%H:%M:%SZ")
jiraSample['inSightsTime'] = time_epoch
jiraSample['jiraCreator'] = random.choice(jira_creator)
jiraSample['jiraPriority'] = random.choice(jira_priority)
jiraSample['jiraIssueType'] = random.choice(jira_issuetype)
jiraSample['sprintId'] = sprintNumber
jiraSample['jiraStatus'] = random.choice(jira_status)
jiraSample['fixVersions'] = random.choice(jira_version)
jiraSample['issueType'] = random.choice(issue_Type)
jiraSample['jiraKey'] = randonjirakey
jiraSample['storyId'] = random.choice(Story_Id)
jiraSample['Priority'] = random.choice(Priority)
jiraSample['projectName'] = random.choice(jira_project_name)
jiraSample['resolution'] = random.choice(resolution)
jiraSample['storyPoints'] = random.choice(storyPoints)
jiraSample['progressTimeSec'] = random.choice(progressTimeSec)
jiraSample['assigneeID'] = random.choice(assigneeID)
jiraSample['assigneeEmail'] = random.choice(assigneeEmail)
jiraSample['authorName'] = random.choice(Author_Name)
jiraSample['toolName'] = "JIRA"
jiraSample['categoryName'] = "ALM"
jira_count += 1
jira_data.append(jiraSample)
#print(jiraSample)
jira_keyArr.append(jiraSample)
#if randonSprintStringId not in jira_sprintArr:
# jira_sprintArr.append(randonSprintStringId)
except Exception as ex:
self.printLog(ex,True)
jiraMetadata = {"labels" : ["JIRA"]}
total_record_count =total_record_count + len(jira_data)
self.publishToolsData(jira_data, jiraMetadata)
publish_message_count_loop=publish_message_count_loop+' Jira Data='+str(len(jira_data))
#print(jira_keyArr)
#print(jira_sprintArr)
self.printLog('GIT Started .... ', False)
#print(jira_keyArr)
#print(len(jira_keyArr))
git = 0
git_CommitArr = []
for rangeNumber in range(0, len(jira_keyArr)) :
git_count = 0
#print(jirakey)
jiraSampleData=jira_keyArr[rangeNumber]
while git_count != 50:
randonGitCommitId = 'CM-' + str(''.join([random.choice(string.digits) for n in xrange(10)]))
time_offset = (random.randint(101, 800))
# GIT json configurations 10 2
#print("GIT 1")
git_date = (datetime.datetime.strptime(jiraSampleData['inSightsTimeX'],"%Y-%m-%dT%H:%M:%SZ") + datetime.timedelta(seconds=time_offset))
git_datetime_epoch = int(time.mktime(git_date.timetuple()))
#print(git_datetime_epoch)
self.printLog(' jirakey '+ jiraSampleData['jiraKey'] +' jira Date '+jiraSampleData['inSightsTimeX'] +' GIT Date '+str(git_date), False)
gitSample = {}
gitSample['inSightsTimeX'] = git_date.strftime("%Y-%m-%dT%H:%M:%SZ")
gitSample['gitCommiTime'] = git_date.strftime("%Y-%m-%dT%H:%M:%SZ")
gitSample['inSightsTime'] = git_datetime_epoch
gitSample['gitCommitId'] = randomStr
if git_count < 2001 :
gitSample['jiraKey'] = jiraSampleData['jiraKey']
gitSample['message'] = 'This commit is associated with jira-key : ' + str(jiraSampleData['jiraKey'])
gitSample['gitReponame'] = random.choice(repo)
gitSample['gitAuthorName'] = random.choice(author)
gitSample['repoName'] = random.choice(repo)
gitSample['commitId'] = randonGitCommitId
gitSample['toolName'] = "GIT"
gitSample['categoryName'] = "SCM"
#gitSample['git_date']=str(git_date)
git_count += 1
#print(gitSample)
git_CommitArr.append(gitSample)
git_data.append(gitSample)
gitMetadata = {"labels" : ["GIT"]}
#print(len(git_data))
total_record_count =total_record_count + len(git_data)
self.publishToolsData(git_data, gitMetadata)
publish_message_count_loop=publish_message_count_loop+' GIT Data='+str(len(git_data))
self.printLog('Jenkins Started ....', False)
#print(git_CommitArr)
#print(len(git_CommitArr))
jenkins_count = 0
jenkins_keyArr = []
for rangeNumber in range(0, len(git_CommitArr)) :
try:
gitSampleData = git_CommitArr[rangeNumber]
#print(gitSampleData) + time_start
#print(gitSampleData['commitId'])
time_offset = (random.randint(101, 800))
randomJenkineBuildNumber = str(''.join([random.choice(string.digits) for n in xrange(10)]))
#print('a jenkine key '+randomJenkineBuildNumber +' '+gitSampleData['inSightsTimeX']) #+' '+gitSample['git_date']
jenkins_date = (datetime.datetime.strptime(gitSampleData['inSightsTimeX'],"%Y-%m-%dT%H:%M:%SZ") + datetime.timedelta(seconds=120))
self.printLog('GIT Commit Id '+gitSampleData['commitId']+' GIT Date '+ gitSampleData['inSightsTimeX'] +' Jenkine Date '+str(jenkins_date), False)
jenkins_startTime = (jenkins_date)
jenkins_endTime = (jenkins_date + datetime.timedelta(seconds=time_offset))
jenkine_epochtime=int(time.mktime(jenkins_date.timetuple()))
jenkine_status =random.choice(status)
jenkinsSample = {}
jenkinsSample['inSightsTimeX'] = (jenkins_date).strftime("%Y-%m-%dT%H:%M:%SZ")
jenkinsSample['inSightsTime'] = jenkine_epochtime
jenkinsSample['startTime'] = jenkins_startTime.strftime("%Y-%m-%dT%H:%M:%SZ")
jenkinsSample['endTime'] = jenkins_endTime.strftime("%Y-%m-%dT%H:%M:%SZ")
jenkinsSample['duration'] = (jenkins_endTime - jenkins_startTime).seconds
jenkinsSample['status'] = jenkine_status
#jenkinsSample['sprintID'] = random.choice(sprint)
jenkinsSample['buildNumber'] = randomJenkineBuildNumber
jenkinsSample['jobName'] = random.choice(job_name)
jenkinsSample['projectName'] = random.choice(project_name)
jenkinsSample['projectID'] = random.choice(projectId)
jenkinsSample['environment'] = random.choice(jen_env)
jenkinsSample['buildUrl'] = random.choice(buildUrl)
jenkinsSample['result'] = random.choice(result)
jenkinsSample['master'] = random.choice(master)
jenkinsSample['jenkins_date']=str(jenkins_date)
if rangeNumber < 2001 :
jenkinsSample['scmcommitId'] = gitSampleData['commitId']
jenkinsSample['toolName'] = "JENKINS"
jenkinsSample['categoryName'] = "CI"
#print(jenkinsSample)
if jenkine_status=="Success":
jenkins_keyArr.append(jenkinsSample)
jenkins_data.append(jenkinsSample)
except Exception as ex:
self.printLog(ex,True)
jenkinsMetadata = {"labels" : ["JENKINS"]}
#self.printLog(len(jenkins_data), False)
total_record_count =total_record_count + len(jenkins_data)
self.publishToolsData(jenkins_data, jenkinsMetadata)
publish_message_count_loop=publish_message_count_loop+' Jenkins Data='+str(len(jenkins_data))
self.printLog('Sonar Started ....', False)
#print(jenkins_keyArr)
jenkine_success_build =len(jenkins_keyArr)
self.printLog('Jenkine Array size for success build '+str(jenkine_success_build),True)
sonar_count = 0
for rangeNumber in range(0, len(jenkins_keyArr)) :
jenkinsSampleData = jenkins_keyArr[rangeNumber]
#print(jenkinsSampleData)
#print(jenkinsSampleData['buildNumber'])
# Sonar jenkins configuration
ramdomSonarKey =str(''.join([random.choice(string.digits) for n in xrange(10)]))
sonar_date = (datetime.datetime.strptime(jenkinsSampleData['inSightsTimeX'],"%Y-%m-%dT%H:%M:%SZ") + datetime.timedelta(seconds=120))
time_offset = (random.randint(101, 800))
self.printLog('Jenkine build number '+jenkinsSampleData['buildNumber']+' Jenkine Date '+jenkinsSampleData['inSightsTimeX']+' Sonar Date '+str(sonar_date), False)
try:
sonar_startTime = sonar_date.strftime("%Y-%m-%dT%H:%M:%SZ")
sonar_endTime = (sonar_date + datetime.timedelta(seconds=time_offset)).strftime("%Y-%m-%dT%H:%M:%SZ")
sonarSample = {}
sonarSample['inSightsTimeX'] = sonar_date.strftime("%Y-%m-%dT%H:%M:%SZ")
sonarSample['inSightsTime'] = int(time.mktime(sonar_date.timetuple()))
sonarSample['startTime'] = sonar_startTime
sonarSample['endTime'] = sonar_endTime
sonarSample['projectname'] = random.choice(project)
sonarSample['ProjectID'] = random.choice(projectId)
sonarSample['ProjectKey'] = random.choice(sonar_key)
sonarSample['resourceKey'] = random.choice(resourceKey)
if rangeNumber < (jenkine_success_build - 200) :
sonarSample['jenkineBuildNumber'] = jenkinsSampleData['buildNumber']
sonarSample['sonarKey']=ramdomSonarKey
sonarSample['sonarQualityGateStatus']= random.choice(sonar_quality_gate_Status)
sonarSample['sonarCoverage']= random.choice(sonar_coverage)
sonarSample['sonarComplexity']= random.choice(sonar_complexity)
sonarSample['sonarDuplicateCode']= random.choice(sonar_duplicate)
sonarSample['sonarTechDepth']= random.choice(sonar_techdepth)
sonarSample['toolName'] = "SONAR"
sonarSample['categoryName'] = "CODEQUALITY"
sonar_data.append(sonarSample)
except Exception as ex:
self.printLog(ex,True)
#print(sonarSample)
sonarMetadata = {"labels" : ["SONAR"]}
#print(len(sonar_data))
total_record_count =total_record_count + len(sonar_data)
self.publishToolsData(sonar_data, sonarMetadata)
publish_message_count_loop=publish_message_count_loop+' Sonar Data='+str(len(sonar_data))
self.printLog('Rundeck Started ....', False)
#print(jenkins_keyArr)
#print(len(jenkins_keyArr))
Rundeck_count = 0
for rangeNumber in range(0, len(jenkins_keyArr)):
try:
jenkinsSampleData = jenkins_keyArr[rangeNumber]
#print(jenkinsSampleData)
#print(jenkinsSampleData['buildNumber'])
rundeck_date = (datetime.datetime.strptime(jenkinsSampleData['inSightsTimeX'],"%Y-%m-%dT%H:%M:%SZ") + datetime.timedelta(seconds=120))
time_offset = (random.randint(101, 800))
self.printLog('Jenkine build number '+jenkinsSampleData['buildNumber']+' Jenkine Date '+jenkinsSampleData['inSightsTimeX']+' Runduck Date '+str(rundeck_date), False)
rundeck_startTime = rundeck_date
rundeck_endTime = (rundeck_date + datetime.timedelta(seconds=time_offset))
rundeckSample = {}
rundeckSample['inSightsTimeX'] = rundeck_date.strftime("%Y-%m-%dT%H:%M:%SZ")
rundeckSample['inSightsTime'] = int(time.mktime(rundeck_startTime.timetuple()))
rundeckSample['startTime'] = rundeck_startTime.strftime("%Y-%m-%dT%H:%M:%SZ")
rundeckSample['endTime'] = rundeck_endTime.strftime("%Y-%m-%dT%H:%M:%SZ")
rundeckSample['status'] = random.choice(status)
rundeckSample['environment'] = random.choice(rundeck_env)
if rangeNumber < (jenkine_success_build - 200) :
rundeckSample['jenkineBuildNumber'] = jenkinsSampleData['buildNumber']
rundeckSample['toolName'] = "RUNDECK"
rundeckSample['categoryName'] = "DEPLOYMENT"
rundeck_data.append(rundeckSample)
#print(rundeckSample)
except Exception as ex:
self.printLog(ex,True)
RundeckMetadata = {"labels" : ["RUNDECK"]}
#print(len(rundeck_data))
total_record_count =total_record_count + len(rundeck_data)
self.publishToolsData(rundeck_data, RundeckMetadata)
publish_message_count_loop=publish_message_count_loop+' Rundeck Data='+str(len(rundeck_data))
self.printLog('Published data: '+ str(record_count) + " Details "+publish_message_count_loop,True)
record_count += 1
currentDate += datetime.timedelta(days=1)
#print(currentDate)
time.sleep(sleepTime)
if(dataCount == record_count):
flag = 0
currentCompletedDT = datetime.datetime.now()
self.printLog('Start Time '+ str(currentDT),True)
self.printLog('Completed Time ==== '+ str(currentCompletedDT),True)
self.printLog('Total Record count '+str(total_record_count),True)
self.printLog("Dummy Agent Processing Completed .....",True)
if __name__ == "__main__":
DummyDataAgent()
|
/**
* Copyright (c) 2015-present, Facebook, Inc.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*
* @providesModule EventSubscription
* @flow
*/
'use strict';
import type EventSubscriptionVendor from 'EventSubscriptionVendor';
/**
* EventSubscription represents a subscription to a particular event. It can
* remove its own subscription.
*/
class EventSubscription {
eventType: string;
key: number;
subscriber: EventSubscriptionVendor;
/**
* @param {EventSubscriptionVendor} subscriber the subscriber that controls
* this subscription.
*/
constructor(subscriber: EventSubscriptionVendor) {
this.subscriber = subscriber;
}
/**
* Removes this subscription from the subscriber that controls it.
*/
remove() {
this.subscriber.removeSubscription(this);
}
}
module.exports = EventSubscription;
|
# SPDX-License-Identifier: MIT OR Apache-2.0
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the MIT License. See the LICENSE file in the root of this
# repository for complete details.
import asyncio
import secrets
import pytest
import structlog
from structlog.contextvars import (
_CONTEXT_VARS,
bind_contextvars,
clear_contextvars,
get_contextvars,
get_merged_contextvars,
merge_contextvars,
reset_contextvars,
unbind_contextvars,
)
# All test coroutines will be treated as marked.
pytestmark = pytest.mark.asyncio
@pytest.fixture(autouse=True)
def _clear_contextvars():
"""
Make sure all tests start with a clean slate.
"""
clear_contextvars()
class TestContextvars:
async def test_bind(self, event_loop):
"""
Binding a variable causes it to be included in the result of
merge_contextvars.
"""
async def coro():
bind_contextvars(a=1)
return merge_contextvars(None, None, {"b": 2})
assert {"a": 1, "b": 2} == await event_loop.create_task(coro())
async def test_multiple_binds(self, event_loop):
"""
Multiple calls to bind_contextvars accumulate values instead of
replacing them. But they override redefined ones.
"""
async def coro():
bind_contextvars(a=1, c=3)
bind_contextvars(c=333, d=4)
return merge_contextvars(None, None, {"b": 2})
assert {
"a": 1,
"b": 2,
"c": 333,
"d": 4,
} == await event_loop.create_task(coro())
async def test_reset(self, event_loop):
"""
reset_contextvars allows resetting contexvars to
previously-set values.
"""
async def coro():
bind_contextvars(a=1)
assert {"a": 1} == get_contextvars()
await event_loop.create_task(nested_coro())
async def nested_coro():
tokens = bind_contextvars(a=2, b=3)
assert {"a": 2, "b": 3} == get_contextvars()
reset_contextvars(**tokens)
assert {"a": 1} == get_contextvars()
await event_loop.create_task(coro())
async def test_nested_async_bind(self, event_loop):
"""
Context is passed correctly between "nested" concurrent operations.
"""
async def coro():
bind_contextvars(a=1)
return await event_loop.create_task(nested_coro())
async def nested_coro():
bind_contextvars(c=3)
return merge_contextvars(None, None, {"b": 2})
assert {"a": 1, "b": 2, "c": 3} == await event_loop.create_task(coro())
async def test_merge_works_without_bind(self, event_loop):
"""
merge_contextvars returns values as normal even when there has
been no previous calls to bind_contextvars.
"""
async def coro():
return merge_contextvars(None, None, {"b": 2})
assert {"b": 2} == await event_loop.create_task(coro())
async def test_merge_overrides_bind(self, event_loop):
"""
Variables included in merge_contextvars override previously
bound variables.
"""
async def coro():
bind_contextvars(a=1)
return merge_contextvars(None, None, {"a": 111, "b": 2})
assert {"a": 111, "b": 2} == await event_loop.create_task(coro())
async def test_clear(self, event_loop):
"""
The context-local context can be cleared, causing any previously bound
variables to not be included in merge_contextvars's result.
"""
async def coro():
bind_contextvars(a=1)
clear_contextvars()
return merge_contextvars(None, None, {"b": 2})
assert {"b": 2} == await event_loop.create_task(coro())
async def test_clear_without_bind(self, event_loop):
"""
The context-local context can be cleared, causing any previously bound
variables to not be included in merge_contextvars's result.
"""
async def coro():
clear_contextvars()
return merge_contextvars(None, None, {})
assert {} == await event_loop.create_task(coro())
async def test_unbind(self, event_loop):
"""
Unbinding a previously bound variable causes it to be removed from the
result of merge_contextvars.
"""
async def coro():
bind_contextvars(a=1)
unbind_contextvars("a")
return merge_contextvars(None, None, {"b": 2})
assert {"b": 2} == await event_loop.create_task(coro())
async def test_unbind_not_bound(self, event_loop):
"""
Unbinding a not bound variable causes doesn't raise an exception.
"""
async def coro():
# Since unbinding means "setting to Ellipsis", we have to make
# some effort to ensure that the ContextVar never existed.
unbind_contextvars("a" + secrets.token_hex())
return merge_contextvars(None, None, {"b": 2})
assert {"b": 2} == await event_loop.create_task(coro())
async def test_parallel_binds(self, event_loop):
"""
Binding a variable causes it to be included in the result of
merge_contextvars.
"""
coro1_bind = asyncio.Event()
coro2_bind = asyncio.Event()
bind_contextvars(c=3)
async def coro1():
bind_contextvars(a=1)
coro1_bind.set()
await coro2_bind.wait()
return merge_contextvars(None, None, {"b": 2})
async def coro2():
bind_contextvars(a=2)
await coro1_bind.wait()
coro2_bind.set()
return merge_contextvars(None, None, {"b": 2})
coro1_task = event_loop.create_task(coro1())
coro2_task = event_loop.create_task(coro2())
assert {"a": 1, "b": 2, "c": 3} == await coro1_task
assert {"a": 2, "b": 2, "c": 3} == await coro2_task
def test_get_only_gets_structlog_without_deleted(self):
"""
get_contextvars returns only the structlog-specific key/values with the
prefix removed. Deleted keys (= Ellipsis) are ignored.
"""
bind_contextvars(a=1, b=2)
unbind_contextvars("b")
_CONTEXT_VARS["foo"] = "bar"
assert {"a": 1} == get_contextvars()
def test_get_merged_merges_context(self):
"""
get_merged_contextvars merges a bound context into the copy.
"""
bind_contextvars(x=1)
log = structlog.get_logger().bind(y=2)
assert {"x": 1, "y": 2} == get_merged_contextvars(log)
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
import glob
import os.path
class Elfutils(AutotoolsPackage, SourcewarePackage):
"""elfutils is a collection of various binary tools such as
eu-objdump, eu-readelf, and other utilities that allow you to
inspect and manipulate ELF files. Refer to Table 5.Tools Included
in elfutils for Red Hat Developer for a complete list of binary
tools that are distributed with the Red Hat Developer Toolset
version of elfutils."""
homepage = "https://fedorahosted.org/elfutils/"
sourceware_mirror_path = "elfutils/0.179/elfutils-0.179.tar.bz2"
list_url = "https://sourceware.org/elfutils/ftp"
list_depth = 1
version('0.182', sha256='ecc406914edf335f0b7fc084ebe6c460c4d6d5175bfdd6688c1c78d9146b8858')
version('0.181', sha256='29a6ad7421ec2acfee489bb4a699908281ead2cb63a20a027ce8804a165f0eb3')
version('0.180', sha256='b827b6e35c59d188ba97d7cf148fa8dc6f5c68eb6c5981888dfdbb758c0b569d')
version('0.179', sha256='25a545566cbacaa37ae6222e58f1c48ea4570f53ba991886e2f5ce96e22a23a2')
version('0.178', sha256='31e7a00e96d4e9c4bda452e1f2cdac4daf8abd24f5e154dee232131899f3a0f2')
version('0.177', sha256='fa489deccbcae7d8c920f60d85906124c1989c591196d90e0fd668e3dc05042e')
version('0.176', sha256='eb5747c371b0af0f71e86215a5ebb88728533c3a104a43d4231963f308cd1023')
version('0.175', sha256='f7ef925541ee32c6d15ae5cb27da5f119e01a5ccdbe9fe57bf836730d7b7a65b')
version('0.174', sha256='cdf27e70076e10a29539d89e367101d516bc4aa11b0d7777fe52139e3fcad08a')
version('0.173', sha256='b76d8c133f68dad46250f5c223482c8299d454a69430d9aa5c19123345a000ff')
version('0.170', sha256='1f844775576b79bdc9f9c717a50058d08620323c1e935458223a12f249c9e066')
version('0.168', sha256='b88d07893ba1373c7dd69a7855974706d05377766568a7d9002706d5de72c276')
version('0.163', sha256='7c774f1eef329309f3b05e730bdac50013155d437518a2ec0e24871d312f2e23')
# Libraries for reading compressed DWARF sections.
variant('bzip2', default=False,
description='Support bzip2 compressed sections.')
variant('xz', default=False,
description='Support xz (lzma) compressed sections.')
# Native language support from libintl.
variant('nls', default=True,
description='Enable Native Language Support.')
# libdebuginfod support
# NB: For 0.181 and newer, this enables _both_ the client and server
variant('debuginfod', default=False,
description='Enable libdebuginfod support.')
depends_on('bzip2', type='link', when='+bzip2')
depends_on('xz', type='link', when='+xz')
depends_on('zlib', type='link')
depends_on('gettext', when='+nls')
depends_on('m4', type='build')
# debuginfod has extra dependencies
# NB: Waiting on an elfutils patch before we can use libmicrohttpd@0.9.71
depends_on('libmicrohttpd@0.9.33:0.9.70', type='link', when='+debuginfod')
depends_on('libarchive@3.1.2:', type='link', when='+debuginfod')
depends_on('sqlite@3.7.17:', type='link', when='+debuginfod')
depends_on('curl@7.29.0:', type='link', when='+debuginfod')
conflicts('%gcc@7.2.0:', when='@0.163')
conflicts('+debuginfod', when='@:0.178')
provides('elf@1')
# Elfutils uses nested functions in C code, which is implemented
# in gcc, but not in clang. C code compiled with gcc is
# binary-compatible with clang, so it should be possible to build
# elfutils with gcc, and then link it to clang-built libraries.
conflicts('%apple-clang')
conflicts('%clang')
# Elfutils uses -Wall and we don't want to fail the build over a
# stray warning.
def patch(self):
files = glob.glob(os.path.join('*', 'Makefile.in'))
filter_file('-Werror', '', *files)
flag_handler = AutotoolsPackage.build_system_flags
def configure_args(self):
spec = self.spec
args = []
if '+bzip2' in spec:
args.append('--with-bzlib=%s' % spec['bzip2'].prefix)
else:
args.append('--without-bzlib')
if '+xz' in spec:
args.append('--with-lzma=%s' % spec['xz'].prefix)
else:
args.append('--without-lzma')
# zlib is required
args.append('--with-zlib=%s' % spec['zlib'].prefix)
if '+nls' in spec:
# configure doesn't use LIBS correctly
args.append('LDFLAGS=-Wl,--no-as-needed -L%s -lintl' %
spec['gettext'].prefix.lib)
else:
args.append('--disable-nls')
if '+debuginfod' in spec:
args.append('--enable-debuginfod')
if spec.satisfies('@0.181:'):
args.append('--enable-libdebuginfod')
else:
args.append('--disable-debuginfod')
if spec.satisfies('@0.181:'):
args.append('--disable-libdebuginfod')
return args
# Install elf.h to include directory.
@run_after('install')
def install_elfh(self):
install(join_path('libelf', 'elf.h'), self.prefix.include)
# Provide location of libelf.so to match libelf.
@property
def libs(self):
return find_libraries('libelf', self.prefix, recursive=True)
|
# from django.urls import path
# from Projeto_Joaninha.pedidos import views as v
# app_name="pedidos"
# urlpatterns=[
# path('',v.pedidos_list,name='pedidos_list'),
# path('<int:pk>/',v.pedidos_detail,name='pedidos_detail'),
# ]
from django.urls import path
from Projeto_Joaninha.pedidos import views as v
app_name="pedidos"
urlpatterns=[
path('',v.pedidos_list,name='pedidos_list'),
path('cardapio/',v.pedido_create_view,name="pedidos_add"),
path('<int:pk>/',v.pedidos_detail,name='pedidos_detail'),
]
|
// SPDX-License-Identifier: GPL-2.0
// Copyright (C) 2005-2017 Andes Technology Corporation
#include <linux/sched.h>
#include <linux/sched/debug.h>
#include <linux/sched/task_stack.h>
#include <linux/delay.h>
#include <linux/kallsyms.h>
#include <linux/uaccess.h>
#include <asm/elf.h>
#include <asm/proc-fns.h>
#include <linux/ptrace.h>
#include <linux/reboot.h>
extern void setup_mm_for_reboot(char mode);
#ifdef CONFIG_PROC_FS
struct proc_dir_entry *proc_dir_cpu;
EXPORT_SYMBOL(proc_dir_cpu);
#endif
extern inline void arch_reset(char mode)
{
if (mode == 's') {
/* Use cpu handler, jump to 0 */
cpu_reset(0);
}
}
void (*pm_power_off) (void);
EXPORT_SYMBOL(pm_power_off);
static char reboot_mode_nds32 = 'h';
int __init reboot_setup(char *str)
{
reboot_mode_nds32 = str[0];
return 1;
}
static int cpub_pwroff(void)
{
return 0;
}
__setup("reboot=", reboot_setup);
void machine_halt(void)
{
cpub_pwroff();
}
EXPORT_SYMBOL(machine_halt);
void machine_power_off(void)
{
if (pm_power_off)
pm_power_off();
}
EXPORT_SYMBOL(machine_power_off);
void machine_restart(char *cmd)
{
/*
* Clean and disable cache, and turn off interrupts
*/
cpu_proc_fin();
/*
* Tell the mm system that we are going to reboot -
* we may need it to insert some 1:1 mappings so that
* soft boot works.
*/
setup_mm_for_reboot(reboot_mode_nds32);
/* Execute kernel restart handler call chain */
do_kernel_restart(cmd);
/*
* Now call the architecture specific reboot code.
*/
arch_reset(reboot_mode_nds32);
/*
* Whoops - the architecture was unable to reboot.
* Tell the user!
*/
mdelay(1000);
pr_info("Reboot failed -- System halted\n");
while (1) ;
}
EXPORT_SYMBOL(machine_restart);
void show_regs(struct pt_regs *regs)
{
printk("PC is at %pS\n", (void *)instruction_pointer(regs));
printk("LP is at %pS\n", (void *)regs->lp);
pr_info("pc : [<%08lx>] lp : [<%08lx>] %s\n"
"sp : %08lx fp : %08lx gp : %08lx\n",
instruction_pointer(regs),
regs->lp, print_tainted(), regs->sp, regs->fp, regs->gp);
pr_info("r25: %08lx r24: %08lx\n", regs->uregs[25], regs->uregs[24]);
pr_info("r23: %08lx r22: %08lx r21: %08lx r20: %08lx\n",
regs->uregs[23], regs->uregs[22],
regs->uregs[21], regs->uregs[20]);
pr_info("r19: %08lx r18: %08lx r17: %08lx r16: %08lx\n",
regs->uregs[19], regs->uregs[18],
regs->uregs[17], regs->uregs[16]);
pr_info("r15: %08lx r14: %08lx r13: %08lx r12: %08lx\n",
regs->uregs[15], regs->uregs[14],
regs->uregs[13], regs->uregs[12]);
pr_info("r11: %08lx r10: %08lx r9 : %08lx r8 : %08lx\n",
regs->uregs[11], regs->uregs[10],
regs->uregs[9], regs->uregs[8]);
pr_info("r7 : %08lx r6 : %08lx r5 : %08lx r4 : %08lx\n",
regs->uregs[7], regs->uregs[6], regs->uregs[5], regs->uregs[4]);
pr_info("r3 : %08lx r2 : %08lx r1 : %08lx r0 : %08lx\n",
regs->uregs[3], regs->uregs[2], regs->uregs[1], regs->uregs[0]);
pr_info(" IRQs o%s Segment %s\n",
interrupts_enabled(regs) ? "n" : "ff",
segment_eq(get_fs(), get_ds())? "kernel" : "user");
}
EXPORT_SYMBOL(show_regs);
void flush_thread(void)
{
}
DEFINE_PER_CPU(struct task_struct *, __entry_task);
asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
int copy_thread(unsigned long clone_flags, unsigned long stack_start,
unsigned long stk_sz, struct task_struct *p)
{
struct pt_regs *childregs = task_pt_regs(p);
memset(&p->thread.cpu_context, 0, sizeof(struct cpu_context));
if (unlikely(p->flags & PF_KTHREAD)) {
memset(childregs, 0, sizeof(struct pt_regs));
/* kernel thread fn */
p->thread.cpu_context.r6 = stack_start;
/* kernel thread argument */
p->thread.cpu_context.r7 = stk_sz;
} else {
*childregs = *current_pt_regs();
if (stack_start)
childregs->sp = stack_start;
/* child get zero as ret. */
childregs->uregs[0] = 0;
childregs->osp = 0;
if (clone_flags & CLONE_SETTLS)
childregs->uregs[25] = childregs->uregs[3];
}
/* cpu context switching */
p->thread.cpu_context.pc = (unsigned long)ret_from_fork;
p->thread.cpu_context.sp = (unsigned long)childregs;
#ifdef CONFIG_HWZOL
childregs->lb = 0;
childregs->le = 0;
childregs->lc = 0;
#endif
return 0;
}
/*
* fill in the fpe structure for a core dump...
*/
int dump_fpu(struct pt_regs *regs, elf_fpregset_t * fpu)
{
int fpvalid = 0;
return fpvalid;
}
EXPORT_SYMBOL(dump_fpu);
unsigned long get_wchan(struct task_struct *p)
{
unsigned long fp, lr;
unsigned long stack_start, stack_end;
int count = 0;
if (!p || p == current || p->state == TASK_RUNNING)
return 0;
if (IS_ENABLED(CONFIG_FRAME_POINTER)) {
stack_start = (unsigned long)end_of_stack(p);
stack_end = (unsigned long)task_stack_page(p) + THREAD_SIZE;
fp = thread_saved_fp(p);
do {
if (fp < stack_start || fp > stack_end)
return 0;
lr = ((unsigned long *)fp)[0];
if (!in_sched_functions(lr))
return lr;
fp = *(unsigned long *)(fp + 4);
} while (count++ < 16);
}
return 0;
}
EXPORT_SYMBOL(get_wchan);
|
(function (global, factory) {
typeof exports === 'object' && typeof module !== 'undefined' ? module.exports = factory() :
typeof define === 'function' && define.amd ? define(factory) :
(global.Dashboard20 = factory());
}(this, (function () { 'use strict';
var _20 = {
elem: 'svg',
attrs: {
xmlns: 'http://www.w3.org/2000/svg',
viewBox: '0 0 32 32',
width: 20,
height: 20,
},
content: [
{
elem: 'path',
attrs: {
d:
'M24 21h2v5h-2zm-4-5h2v10h-2zm-9 10a5.006 5.006 0 0 1-5-5h2a3 3 0 1 0 3-3v-2a5 5 0 0 1 0 10z',
},
},
{
elem: 'path',
attrs: {
d:
'M28 2H4a2.002 2.002 0 0 0-2 2v24a2.002 2.002 0 0 0 2 2h24a2.003 2.003 0 0 0 2-2V4a2.002 2.002 0 0 0-2-2zm0 9H14V4h14zM12 4v7H4V4zM4 28V13h24l.002 15z',
},
},
],
name: 'dashboard',
size: 20,
};
return _20;
})));
|
from typing import Tuple
import jax.numpy as np
from jax.nn import softmax, softplus
from pzflow.bijectors.bijectors import (
Bijector,
Bijector_Info,
Chain,
ForwardFunction,
InitFunction,
InverseFunction,
Roll,
)
from pzflow.utils import DenseReluNetwork
def _RationalQuadraticSpline(
inputs: np.ndarray,
W: np.ndarray,
H: np.ndarray,
D: np.ndarray,
B: float,
periodic: bool = False,
inverse: bool = False,
) -> Tuple[np.ndarray, np.ndarray]:
"""Apply rational quadratic spline to inputs and return outputs with log_det.
Applies the piecewise rational quadratic spline developed in [1].
Parameters
----------
inputs : np.ndarray
The inputs to be transformed.
W : np.ndarray
The widths of the spline bins.
H : np.ndarray
The heights of the spline bins.
D : np.ndarray
The derivatives of the inner spline knots.
B : float
Range of the splines.
Outside of (-B,B), the transformation is just the identity.
inverse : bool, default=False
If True, perform the inverse transformation.
Otherwise perform the forward transformation.
periodic : bool, default=False
Whether to make this a periodic, Circular Spline [2].
Returns
-------
outputs : np.ndarray
The result of applying the splines to the inputs.
log_det : np.ndarray
The log determinant of the Jacobian at the inputs.
References
----------
[1] Conor Durkan, Artur Bekasov, Iain Murray, George Papamakarios.
Neural Spline Flows. arXiv:1906.04032, 2019.
https://arxiv.org/abs/1906.04032
[2] Rezende, Danilo Jimenez et al.
Normalizing Flows on Tori and Spheres. arxiv:2002.02428, 2020
http://arxiv.org/abs/2002.02428
"""
# knot x-positions
xk = np.pad(
-B + np.cumsum(W, axis=-1),
[(0, 0)] * (len(W.shape) - 1) + [(1, 0)],
mode="constant",
constant_values=-B,
)
# knot y-positions
yk = np.pad(
-B + np.cumsum(H, axis=-1),
[(0, 0)] * (len(H.shape) - 1) + [(1, 0)],
mode="constant",
constant_values=-B,
)
# knot derivatives
if periodic:
dk = np.pad(D, [(0, 0)] * (len(D.shape) - 1) + [(1, 0)], mode="wrap")
else:
dk = np.pad(
D,
[(0, 0)] * (len(D.shape) - 1) + [(1, 1)],
mode="constant",
constant_values=1,
)
# knot slopes
sk = H / W
# if not periodic, out-of-bounds inputs will have identity applied
# if periodic, we map the input into the appropriate region inside
# the period. For now, we will pretend all inputs are periodic.
# This makes sure that out-of-bounds inputs don't cause problems
# with the spline, but for the non-periodic case, we will replace
# these with their original values at the end
out_of_bounds = (inputs <= -B) | (inputs >= B)
masked_inputs = np.where(out_of_bounds, np.abs(inputs) - B, inputs)
# find bin for each input
if inverse:
idx = np.sum(yk <= masked_inputs[..., None], axis=-1)[..., None] - 1
else:
idx = np.sum(xk <= masked_inputs[..., None], axis=-1)[..., None] - 1
# get kx, ky, kyp1, kd, kdp1, kw, ks for the bin corresponding to each input
input_xk = np.take_along_axis(xk, idx, -1)[..., 0]
input_yk = np.take_along_axis(yk, idx, -1)[..., 0]
input_dk = np.take_along_axis(dk, idx, -1)[..., 0]
input_dkp1 = np.take_along_axis(dk, idx + 1, -1)[..., 0]
input_wk = np.take_along_axis(W, idx, -1)[..., 0]
input_hk = np.take_along_axis(H, idx, -1)[..., 0]
input_sk = np.take_along_axis(sk, idx, -1)[..., 0]
if inverse:
# [1] Appendix A.3
# quadratic formula coefficients
a = (input_hk) * (input_sk - input_dk) + (masked_inputs - input_yk) * (
input_dkp1 + input_dk - 2 * input_sk
)
b = (input_hk) * input_dk - (masked_inputs - input_yk) * (
input_dkp1 + input_dk - 2 * input_sk
)
c = -input_sk * (masked_inputs - input_yk)
relx = 2 * c / (-b - np.sqrt(b ** 2 - 4 * a * c))
outputs = relx * input_wk + input_xk
# if not periodic, replace out-of-bounds values with original values
if not periodic:
outputs = np.where(out_of_bounds, inputs, outputs)
# [1] Appendix A.2
# calculate the log determinant
dnum = (
input_dkp1 * relx ** 2
+ 2 * input_sk * relx * (1 - relx)
+ input_dk * (1 - relx) ** 2
)
dden = input_sk + (input_dkp1 + input_dk - 2 * input_sk) * relx * (1 - relx)
log_det = 2 * np.log(input_sk) + np.log(dnum) - 2 * np.log(dden)
# if not periodic, replace log_det for out-of-bounds values = 0
if not periodic:
log_det = np.where(out_of_bounds, 0, log_det)
log_det = log_det.sum(axis=1)
return outputs, -log_det
else:
# [1] Appendix A.1
# calculate spline
relx = (masked_inputs - input_xk) / input_wk
num = input_hk * (input_sk * relx ** 2 + input_dk * relx * (1 - relx))
den = input_sk + (input_dkp1 + input_dk - 2 * input_sk) * relx * (1 - relx)
outputs = input_yk + num / den
# if not periodic, replace out-of-bounds values with original values
if not periodic:
outputs = np.where(out_of_bounds, inputs, outputs)
# [1] Appendix A.2
# calculate the log determinant
dnum = (
input_dkp1 * relx ** 2
+ 2 * input_sk * relx * (1 - relx)
+ input_dk * (1 - relx) ** 2
)
dden = input_sk + (input_dkp1 + input_dk - 2 * input_sk) * relx * (1 - relx)
log_det = 2 * np.log(input_sk) + np.log(dnum) - 2 * np.log(dden)
# if not periodic, replace log_det for out-of-bounds values = 0
if not periodic:
log_det = np.where(out_of_bounds, 0, log_det)
log_det = log_det.sum(axis=1)
return outputs, log_det
@Bijector
def NeuralSplineCoupling(
K: int = 16,
B: float = 3,
hidden_layers: int = 2,
hidden_dim: int = 128,
transformed_dim: int = None,
n_conditions: int = 0,
periodic: bool = False,
) -> Tuple[InitFunction, Bijector_Info]:
"""A coupling layer bijection with rational quadratic splines.
This Bijector is a Coupling Layer [1,2], and as such only transforms
the second half of input dimensions (or the last N dimensions, where
N = transformed_dim). In order to transform all of the dimensions,
you need multiple Couplings interspersed with Bijectors that change
the order of inputs dimensions, e.g., Reverse, Shuffle, Roll, etc.
NeuralSplineCoupling uses piecewise rational quadratic splines,
as developed in [3].
If periodic=True, then this is a Circular Spline as described in [4].
Parameters
----------
K : int, default=16
Number of bins in the spline (the number of knots is K+1).
B : float, default=3
Range of the splines.
Outside of (-B,B), the transformation is just the identity.
hidden_layers : int, default=2
The number of hidden layers in the neural network used to calculate
the positions and derivatives of the spline knots.
hidden_dim : int, default=128
The width of the hidden layers in the neural network used to
calculate the positions and derivatives of the spline knots.
transformed_dim : int, optional
The number of dimensions transformed by the splines.
Default is ceiling(input_dim /2).
n_conditions : int, default=0
The number of variables to condition the bijection on.
periodic : bool, default=False
Whether to make this a periodic, Circular Spline [4].
Returns
-------
InitFunction
The InitFunction of the NeuralSplineCoupling Bijector.
Bijector_Info
Tuple of the Bijector name and the input parameters.
This allows it to be recreated later.
References
----------
[1] Laurent Dinh, David Krueger, Yoshua Bengio. NICE: Non-linear
Independent Components Estimation. arXiv: 1605.08803, 2015.
http://arxiv.org/abs/1605.08803
[2] Laurent Dinh, Jascha Sohl-Dickstein, Samy Bengio.
Density Estimation Using Real NVP. arXiv: 1605.08803, 2017.
http://arxiv.org/abs/1605.08803
[3] Conor Durkan, Artur Bekasov, Iain Murray, George Papamakarios.
Neural Spline Flows. arXiv:1906.04032, 2019.
https://arxiv.org/abs/1906.04032
[4] Rezende, Danilo Jimenez et al.
Normalizing Flows on Tori and Spheres. arxiv:2002.02428, 2020
http://arxiv.org/abs/2002.02428
"""
if not isinstance(periodic, bool):
raise ValueError("`periodic` must be True or False.")
bijector_info = (
"NeuralSplineCoupling",
(K, B, hidden_layers, hidden_dim, transformed_dim, n_conditions, periodic),
)
@InitFunction
def init_fun(rng, input_dim, **kwargs):
if transformed_dim is None:
upper_dim = input_dim // 2 # variables that determine NN params
lower_dim = input_dim - upper_dim # variables transformed by the NN
else:
upper_dim = input_dim - transformed_dim
lower_dim = transformed_dim
# create the neural network that will take in the upper dimensions and
# will return the spline parameters to transform the lower dimensions
network_init_fun, network_apply_fun = DenseReluNetwork(
(3 * K - 1 + int(periodic)) * lower_dim, hidden_layers, hidden_dim
)
_, network_params = network_init_fun(rng, (upper_dim + n_conditions,))
# calculate spline parameters as a function of the upper variables
def spline_params(params, upper, conditions):
key = np.hstack((upper, conditions))[:, : upper_dim + n_conditions]
outputs = network_apply_fun(params, key)
outputs = np.reshape(outputs, [-1, lower_dim, 3 * K - 1 + int(periodic)])
W, H, D = np.split(outputs, [K, 2 * K], axis=2)
W = 2 * B * softmax(W)
H = 2 * B * softmax(H)
D = softplus(D)
return W, H, D
@ForwardFunction
def forward_fun(params, inputs, conditions, **kwargs):
# lower dimensions are transformed as function of upper dimensions
upper, lower = inputs[:, :upper_dim], inputs[:, upper_dim:]
# widths, heights, derivatives = function(upper dimensions)
W, H, D = spline_params(params, upper, conditions)
# transform the lower dimensions with the Rational Quadratic Spline
lower, log_det = _RationalQuadraticSpline(
lower, W, H, D, B, periodic, inverse=False
)
outputs = np.hstack((upper, lower))
return outputs, log_det
@InverseFunction
def inverse_fun(params, inputs, conditions, **kwargs):
# lower dimensions are transformed as function of upper dimensions
upper, lower = inputs[:, :upper_dim], inputs[:, upper_dim:]
# widths, heights, derivatives = function(upper dimensions)
W, H, D = spline_params(params, upper, conditions)
# transform the lower dimensions with the Rational Quadratic Spline
lower, log_det = _RationalQuadraticSpline(
lower, W, H, D, B, periodic, inverse=True
)
outputs = np.hstack((upper, lower))
return outputs, log_det
return network_params, forward_fun, inverse_fun
return init_fun, bijector_info
@Bijector
def RollingSplineCoupling(
nlayers: int,
shift: int = 1,
K: int = 16,
B: float = 3,
hidden_layers: int = 2,
hidden_dim: int = 128,
transformed_dim: int = None,
n_conditions: int = 0,
periodic: bool = False,
) -> Tuple[InitFunction, Bijector_Info]:
"""Bijector that alternates NeuralSplineCouplings and Roll bijections.
Parameters
----------
nlayers : int
The number of (NeuralSplineCoupling(), Roll()) couplets in the chain.
shift : int
How far the inputs are shifted on each Roll().
K : int, default=16
Number of bins in the RollingSplineCoupling.
B : float, default=3
Range of the splines in the RollingSplineCoupling.
hidden_layers : int, default=2
The number of hidden layers in the neural network used to calculate
the bins and derivatives in the RollingSplineCoupling.
hidden_dim : int, default=128
The width of the hidden layers in the neural network used to
calculate the bins and derivatives in the RollingSplineCoupling.
transformed_dim : int, optional
The number of dimensions transformed by the splines.
Default is ceiling(input_dim /2).
n_conditions : int, default=0
The number of variables to condition the bijection on.
periodic : bool, default=False
Whether to make this a periodic, Circular Spline
Returns
-------
InitFunction
The InitFunction of the RollingSplineCoupling Bijector.
Bijector_Info
Nested tuple of the Bijector name and input parameters. This allows
it to be recreated later.
"""
return Chain(
*(
NeuralSplineCoupling(
K=K,
B=B,
hidden_layers=hidden_layers,
hidden_dim=hidden_dim,
transformed_dim=transformed_dim,
n_conditions=n_conditions,
periodic=periodic,
),
Roll(shift),
)
* nlayers
)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""This script provides the common methods and data structures that are generally used in the project."""
import os
import re
import numpy as np
import pandas as pd
import torch
from rdkit import Chem, DataStructs, rdBase
from rdkit.Chem import AllChem
from rdkit.Chem.Scaffolds import MurckoScaffold
from sklearn.externals import joblib
from torch.utils.data import Dataset
torch.set_num_threads(1)
rdBase.DisableLog('rdApp.error')
# designate the device that the PyTorch is allowed to use.
if torch.cuda.is_available():
dev = torch.device('cuda')
else:
dev = torch.device('cpu')
class Voc(object):
""" Vocabulary Class for all of the tokens for SMILES string construction.
It also provides the method to encode SMILES string into the index array of tokens
and decode the index array into the SMILES string.
Arguments:
path (str): the path of vocabulary file that contains all of the tokens split by '\n'
"""
def __init__(self, path, max_len=100):
self.chars = ['EOS', 'GO']
if path is not None and os.path.exists(path):
f = open(path, 'r')
chars = f.read().split()
assert len(set(chars)) == len(chars)
self.chars += chars
self.size = len(self.chars)
# dict -> {token: index} for encoding
self.tk2ix = dict(zip(self.chars, range(len(self.chars))))
# dict -> {index: token} for decoding
self.ix2tk = {v: k for k, v in self.tk2ix.items()}
self.max_len = max_len
def tokenize(self, smile):
"""Transform a SMILES string into a series of tokens
Arguments:
smile (str): SMILES string with correct grammar
Returns:
tokens (list): the list of tokens that are contained in the vocabulary
"""
regex = '(\[[^\[\]]{1,6}\])'
smile = re.sub('\[\d+', '[', smile)
smile = Chem.CanonSmiles(smile, 0)
smile = smile.replace('Cl', 'L').replace('Br', 'R')
tokens = []
for word in re.split(regex, smile):
if word == '' or word is None: continue
if word.startswith('['):
tokens.append(word)
else:
for i, char in enumerate(word):
tokens.append(char)
tokens.append('EOS')
return tokens
def encode(self, tokens):
""" Encoding a series of tokens into a SMILES string
Arguments:
tokens (list): a series of tokens. Commonly, it is the output of
the "tokenize" method.
Returns:
arr (LongTensor): a long tensor storing the indices of all tokens for one SMILES
s """
arr = torch.zeros(len(tokens)).long()
for i, char in enumerate(tokens):
arr[i] = self.tk2ix[char]
return arr
def decode(self, arr):
"""Takes an array of indices and returns the corresponding SMILES
Arguments:
arr (LongTensor): LongTensor stores the indices of all tokens for one SMILES
Returns:
smile (str): decoded SMILES string
"""
chars = []
for i in arr.cpu().numpy():
if i == self.tk2ix['EOS']: break
chars.append(self.ix2tk[i])
smile = "".join(chars)
smile = smile.replace('L', 'Cl').replace('R', 'Br')
return smile
class MolData(Dataset):
"""Custom PyTorch Dataset that takes a file containing separated SMILES
Arguments:
df (str or DataFrame): it is file path of dataset if it is str;
this data frame contains the column of CANONICAL_SMILES
voc (Voc): the instance of Voc for SMILES token vocabulary
token (str, optional): the column name in df for tokens;
this is for time-saving if the SMILES can be transformed into a series of tokens
and be saved into table, the "tokenize" step which is quite time-consuming can
be ignored. (Default: None)
"""
def __init__(self, df, voc, token=None):
self.voc = voc
if isinstance(df, str) and os.path.exists(df):
df = pd.read_table(df)
self.smiles = df.CANONICAL_SMILES.values
self.tokens = []
if token is None:
for smile in self.smiles:
token = self.voc.tokenize(smile)
if len(token) > self.voc.max_len: continue
self.tokens.append(token)
else:
for sent in df[token].values:
token = sent.split(' ')
self.tokens.append(token)
def __getitem__(self, i):
# mol = self.smiles[i]
# tokenized = self.voc.tokenize(mol)
encoded = self.voc.encode(self.tokens[i])
return encoded
def __len__(self):
return len(self.tokens)
@classmethod
def collate_fn(cls, arr, max_len=100):
"""Function to take a list of encoded sequences and turn them into a batch"""
collated_arr = torch.zeros(len(arr), max_len).long()
for i, seq in enumerate(arr):
collated_arr[i, :seq.size(0)] = seq
return collated_arr
class QSARData(Dataset):
"""Custom PyTorch Dataset that takes a file containing \n separated SMILES"""
def __init__(self, voc, ligand):
self.voc = voc
self.smile = [voc.encode(voc.tokenize(i)) for i in ligand['CANONICAL_SMILES']]
self.label = torch.Tensor((ligand['PCHEMBL_VALUE'] >= 6.5).values).float()
def __getitem__(self, i):
return self.smile[i], self.label[i]
def __len__(self):
return len(self.label)
def collate_fn(self, arr):
"""Function to take a list of encoded sequences and turn them into a batch"""
max_len = max([item[0].size(0) for item in arr])
smile_arr = torch.zeros(len(arr), max_len).long()
label_arr = torch.zeros(len(arr), 1)
for i, data in enumerate(arr):
smile_arr[i, :data[0].size(0)] = data[0]
label_arr[i, :] = data[1]
return smile_arr, label_arr
class Environment:
"""Vitural environment that provided the reward for each molecule
based on an ECFP predictor for activity.
Arguments:
env_path (str): the file path of predictor.
radius (int): the radius parameter of ECFP
bit_len (int): the the vector length of ECFP
is_reg (bool, optional): regresstion (True) or classification (False) model (Default: False)
"""
def __init__(self, env_path, radius=3, bit_len=4096, is_reg=False):
self.clf_path = env_path
self.clf = joblib.load(self.clf_path)
self.radius = radius
self.bit_len = bit_len
self.is_reg = is_reg
def __call__(self, smiles):
fps = self.ECFP_from_SMILES(smiles)
if self.is_reg:
preds = self.clf.predict(fps)
else:
preds = self.clf.predict_proba(fps)[:, 1]
return preds
@classmethod
def ECFP_from_SMILES(cls, smiles, radius=3, bit_len=4096, scaffold=0, index=None):
fps = np.zeros((len(smiles), bit_len))
for i, smile in enumerate(smiles):
mol = Chem.MolFromSmiles(smile)
arr = np.zeros((1,))
try:
if scaffold == 1:
mol = MurckoScaffold.GetScaffoldForMol(mol)
elif scaffold == 2:
mol = MurckoScaffold.MakeScaffoldGeneric(mol)
fp = AllChem.GetMorganFingerprintAsBitVect(mol, radius, nBits=bit_len)
DataStructs.ConvertToNumpyArray(fp, arr)
fps[i, :] = arr
except:
print(smile)
fps[i, :] = [0] * bit_len
return pd.DataFrame(fps, index=(smiles if index is None else index))
def check_smiles(seqs, voc):
"""Decoding the indices LongTensor into a list of SMILES string
and checking whether they can be correctly parsed into molecule by RDKit
Arguments:
seqs (LongTensor): m X n indices LongTensor, generally it is the output of RNN sampling.
m is No. of samples; n is the value of max_len in voc
voc (Voc): the instance of Voc for the SMILES token vocabulary.
Returns:
smiles (list): a list of decoded SMILES string.
valids (ndarray): each value in this array is np.byte type and indicates
whether the counterpart is grammar correct SMILES or not.
"""
valids = []
smiles = []
for j, seq in enumerate(seqs.cpu()):
smile = voc.decode(seq)
valids.append(1 if Chem.MolFromSmiles(smile) else 0)
smiles.append(smile)
valids = np.array(valids, dtype=np.byte)
return smiles, valids
def unique(arr):
"""Removing the duplicated row of indices and only reserving the unique rows for decoding
Arguments:
arr (LongTensor): m X n indices LongTensor. Generally it is the output of RNN sampling.
m is No. of samples; n is the value of max_len in voc
Returns:
indices (LongTensor): l X n indices LongTensor without any repetitive rows.
n is No. of samples; n is the value of max_len in voc
"""
arr = arr.cpu().numpy()
arr_ = np.ascontiguousarray(arr).view(np.dtype((np.void, arr.dtype.itemsize * arr.shape[1])))
_, indices = np.unique(arr_, return_index=True)
indices = torch.LongTensor(np.sort(indices)).to(dev)
return indices
|
'use strict'
process.env.BABEL_ENV = 'web'
const path = require('path')
const webpack = require('webpack')
const BabiliWebpackPlugin = require('babili-webpack-plugin')
const CopyWebpackPlugin = require('copy-webpack-plugin')
const MiniCssExtractPlugin = require('mini-css-extract-plugin')
const HtmlWebpackPlugin = require('html-webpack-plugin')
const { VueLoaderPlugin } = require('vue-loader')
let webConfig = {
devtool: '#cheap-module-eval-source-map',
entry: {
web: path.join(__dirname, '../src/renderer/main.js')
},
module: {
rules: [
{
test: /\.(js|vue)$/,
enforce: 'pre',
exclude: /node_modules/,
use: {
loader: 'eslint-loader',
options: {
formatter: require('eslint-friendly-formatter')
}
}
},
{
test: /\.css$/,
use: ['vue-style-loader', 'css-loader']
},
{
test: /\.html$/,
use: 'vue-html-loader'
},
{
test: /\.js$/,
use: 'babel-loader',
include: [ path.resolve(__dirname, '../src/renderer') ],
exclude: /node_modules/
},
{
test: /\.vue$/,
use: {
loader: 'vue-loader',
options: {
extractCSS: true,
loaders: {
sass: 'vue-style-loader!css-loader!sass-loader?indentedSyntax=1',
scss: 'vue-style-loader!css-loader!sass-loader'
}
}
}
},
{
test: /\.(png|jpe?g|gif|svg)(\?.*)?$/,
use: {
loader: 'url-loader',
query: {
limit: 10000,
name: 'imgs/[name].[ext]'
}
}
},
{
test: /\.(woff2?|eot|ttf|otf)(\?.*)?$/,
use: {
loader: 'url-loader',
query: {
limit: 10000,
name: 'fonts/[name].[ext]'
}
}
}
]
},
plugins: [
new VueLoaderPlugin(),
new MiniCssExtractPlugin({filename: 'styles.css'}),
new HtmlWebpackPlugin({
filename: 'index.html',
template: path.resolve(__dirname, '../src/index.ejs'),
minify: {
collapseWhitespace: true,
removeAttributeQuotes: true,
removeComments: true
},
nodeModules: false
}),
new webpack.DefinePlugin({
'process.env.IS_WEB': 'true'
}),
new webpack.HotModuleReplacementPlugin(),
new webpack.NoEmitOnErrorsPlugin()
],
output: {
filename: '[name].js',
path: path.join(__dirname, '../dist/web')
},
resolve: {
alias: {
'@': path.join(__dirname, '../src/renderer'),
'vue$': 'vue/dist/vue.esm.js'
},
extensions: ['.js', '.vue', '.json', '.css']
},
target: 'web'
}
/**
* Adjust webConfig for production settings
*/
if (process.env.NODE_ENV === 'production') {
webConfig.devtool = ''
webConfig.plugins.push(
new BabiliWebpackPlugin({
removeConsole: true,
removeDebugger: true
}),
new CopyWebpackPlugin([
{
from: path.join(__dirname, '../static'),
to: path.join(__dirname, '../dist/web/static'),
ignore: ['.*']
}
]),
new webpack.DefinePlugin({
'process.env.NODE_ENV': '"production"'
}),
new webpack.LoaderOptionsPlugin({
minimize: true
})
)
}
module.exports = webConfig
|
SMLTOWN.Util = {
getById: function(array, id) {
for (var i = 0; i < array.length; i++) {
if (array[i].id == id) {
return array[i];
}
}
return false;
}
,
getViewport: function() {
// var e = window, a = 'inner';
// if (!('innerWidth' in window)) {
// a = 'client';
// e = document.documentElement || document.body;
// }
//http://help.dottoro.com/ljknuajj.php
var e = document.documentElement;
var a = 'client';
return {width: e[a + 'Width'], height: e[a + 'Height']};
}
,
parseTime: function(time) {
var secs = time % 60;
return ~~(time / 60) + ":" + (secs < 10 ? "0" : "") + secs;
}
,
setPersistentCookie: function(key, value) { // for 1 year
console.log(key + " cookie = " + value);
//prevent duplications (websocket and ajax will find different ones)
this.deleteCookie(key);
// Build the expiration date string:
var expiration_date = new Date();
expiration_date.setFullYear(expiration_date.getFullYear() + 1);
// Build the set-cookie string:
document.cookie = key + "=" + value + "; path=/; expires=" + expiration_date.toGMTString();
}
,
getCookie: function(name) {
var value = "; " + document.cookie;
var parts = value.split("; " + name + "=");
if (parts.length > 1) { // not == 2; (if duplicate cookie bug) get last.
return parts.pop().split(";").shift();
}
}
,
deleteCookie: function(name) {
if (this.getCookie(name)) {
document.cookie = name + '=; expires=Thu, 01 Jan 1970 00:00:01 GMT;';
}
}
,
setLocalStorage: function(key, value) {
localStorage.setItem(key, value);
}
,
getLocalStorage: function(key) {
var item = localStorage.getItem(key);
if (!item) {
item = this.getCookie(key);
if (item) {
localStorage.setItem(key, item);
}
}
return item;
}
,
translateHTML: function() {
var textNodes = $("#smltown_html > div *:not(script)").contents().filter(function() {
return(this.nodeType === 3 && $.trim(this.textContent).length > 1);
});
for (var i = 0; i < textNodes.length; i++) {
var node = textNodes[i];
node.textContent = SMLTOWN.Message.translate(node.textContent);
}
}
,
isNumeric: function(number) {
return !jQuery.isArray(number) && (number - parseFloat(number) + 1) >= 0;
}
};
SMLTOWN.Game.askPassword = function(log) {
if (!log) {
log = "";
}
$("#smltown_body").append("<div class='smltown_dialog'>"
+ "<form id='smltown_passwordForm'>"
+ "<input type='text' id='smltown_password' placeholder='password'>"
+ "<input type='submit' value='Ok'>"
+ "<div id='smltown_cancel' class='smltown_button'>Cancel</div>"
+ "<div class='smltown_log'>" + log + "</div>"
+ "</form>"
+ "<div>");
$("#smltown_password").focus();
$("#smltown_passwordForm").submit(function() {
var password = $("#smltown_password").val();
SMLTOWN.Server.request.addUserInGame(SMLTOWN.Game.info.id, password);
$(".smltown_dialog").remove();
return false;
});
$(".smltown_dialog #smltown_cancel").on("tap", function() {
console.log("cancel password");
$(".smltown_dialog").remove();
SMLTOWN.Load.showPage("gameList");
});
};
|
'use strict';
var multer = require('multer');
module.exports = {
app: {
title: 'Time Attendance',
description: '',
keywords: 'Time Attendance',
googleAnalyticsTrackingID: process.env.GOOGLE_ANALYTICS_TRACKING_ID || 'GOOGLE_ANALYTICS_TRACKING_ID'
},
port: process.env.PORT || 3000,
templateEngine: 'swig',
// Session Cookie settings
sessionCookie: {
// session expiration is set by default to 24 hours
// maxAge: 24 * (60 * 60 * 1000),
maxAge: 3650 * 24 * (60 * 60 * 1000),
// httpOnly flag makes sure the cookie is only accessed
// through the HTTP protocol and not JS/browser
httpOnly: true,
// secure cookie should be turned to true to provide additional
// layer of security so that the cookie is set only when working
// in HTTPS mode.
secure: false
},
// sessionSecret should be changed for security measures and concerns
sessionSecret: process.env.SESSION_SECRET || 'MEAN',
// sessionKey is set to the generic sessionId key used by PHP applications
// for obsecurity reasons
sessionKey: 'sessionId',
sessionCollection: 'sessions',
logo: 'modules/core/client/img/brand/logo.png',
favicon: 'modules/core/client/img/brand/favicon.ico',
uploads: {
profileUpload: {
// dest: './public/', // Profile upload to public folder
// dest: './modules/users/client/img/profile/uploads/', // Profile upload destination path
storage: multer.diskStorage({
destination: function(req, file, cb) {
cb(null, './public/');
},
filename: function(req, file, cb) {
cb(null, Date.now() + '.jpg');
}
}),
limits: {
fileSize: 1 * 1024 * 1024 // Max file size in bytes (1 MB)
}
},
productUpload: {
// dest: './public/', // Profile upload to public folder
// dest: './modules/products/client/img/uploads/', // Profile upload destination path
storage: multer.diskStorage({
destination: function(req, file, cb) {
cb(null, './public/');
},
filename: function(req, file, cb) {
cb(null, Date.now() + '.jpg');
}
}),
limits: {
fileSize: 1 * 1024 * 1024 // Max file size in bytes (1 MB)
}
},
mbjnewsUpload: {
storage: multer.diskStorage({
destination: function(req, file, cb) {
cb(null, './public/');
},
filename: function(req, file, cb) {
cb(null, Date.now() + '.jpg');
}
}),
limits: {
fileSize: 1 * 1024 * 1024 // Max file size in bytes (1 MB)
}
}
}
};
|
import React from "react";
import TextFieldBase from "./TextFieldBase";
import useTextFieldStyles from "./textFieldStyles";
const SmallTextField = (props) => {
const classes = useTextFieldStyles();
return (
<TextFieldBase
classes={{
root: classes.inputField,
}}
fullWidth
variant="outlined"
InputProps={{
classes: {
root: classes.textfield,
input: classes.textfieldInput,
},
}}
heightCtrl={{ input: classes.smallInputStyles }}
{...props}
/>
);
};
export default SmallTextField;
|
from unittest import TestCase
from OO.carro import Motor, Direcao
class CarroTestCase(TestCase):
# Motor
def teste_velocidade(self):
motor = Motor()
self.assertEqual(0, motor.velocidade)
def teste_acelerar(self):
motor = Motor()
motor.acelerar()
self.assertEqual(1, motor.velocidade)
def teste_frear(self):
motor = Motor()
motor.acelerar()
motor.acelerar()
motor.acelerar()
motor.acelerar()
motor.frear()
self.assertEqual(2, motor.velocidade)
# Direção
def teste_direcao(self):
direcao = Direcao()
self.assertEqual('Norte', direcao.valor)
def teste_girar_a_direita(self):
direcao = Direcao()
direcao.girar_a_direita()
self.assertEqual('Leste', direcao.valor)
direcao.girar_a_direita()
self.assertEqual('Sul', direcao.valor)
def teste_girar_a_esquerda(self):
direcao = Direcao()
direcao.girar_a_esquerda()
self.assertEqual('Oeste', direcao.valor)
direcao.girar_a_esquerda()
self.assertEqual('Sul', direcao.valor)
|
!function(e){function t(t){for(var o,s,a=t[0],l=t[1],c=t[2],h=0,u=[];h<a.length;h++)s=a[h],Object.prototype.hasOwnProperty.call(i,s)&&i[s]&&u.push(i[s][0]),i[s]=0;for(o in l)Object.prototype.hasOwnProperty.call(l,o)&&(e[o]=l[o]);for(d&&d(t);u.length;)u.shift()();return r.push.apply(r,c||[]),n()}function n(){for(var e,t=0;t<r.length;t++){for(var n=r[t],o=!0,a=1;a<n.length;a++){var l=n[a];0!==i[l]&&(o=!1)}o&&(r.splice(t--,1),e=s(s.s=n[0]))}return e}var o={},i={0:0},r=[];function s(t){if(o[t])return o[t].exports;var n=o[t]={i:t,l:!1,exports:{}};return e[t].call(n.exports,n,n.exports,s),n.l=!0,n.exports}s.m=e,s.c=o,s.d=function(e,t,n){s.o(e,t)||Object.defineProperty(e,t,{enumerable:!0,get:n})},s.r=function(e){"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(e,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(e,"__esModule",{value:!0})},s.t=function(e,t){if(1&t&&(e=s(e)),8&t)return e;if(4&t&&"object"==typeof e&&e&&e.__esModule)return e;var n=Object.create(null);if(s.r(n),Object.defineProperty(n,"default",{enumerable:!0,value:e}),2&t&&"string"!=typeof e)for(var o in e)s.d(n,o,function(t){return e[t]}.bind(null,o));return n},s.n=function(e){var t=e&&e.__esModule?function(){return e.default}:function(){return e};return s.d(t,"a",t),t},s.o=function(e,t){return Object.prototype.hasOwnProperty.call(e,t)},s.p="";var a=window.webpackJsonp=window.webpackJsonp||[],l=a.push.bind(a);a.push=t,a=a.slice();for(var c=0;c<a.length;c++)t(a[c]);var d=l;r.push([47,1]),n()}({34:function(e,t,n){},47:function(e,t,n){"use strict";n.r(t);n(33),n(34);var o=n(9),i=n.p+"nodes.json",r=n.p+"links.json",a=n(6),l=n(27),c=n.n(l),d=()=>{document.querySelector("#autoComplete").addEventListener("autoComplete",e=>{console.log(e)});new c.a({data:{src:async()=>s.nodes,key:["name"],cache:!1},sort:(e,t)=>e.match<t.match?-1:e.match>t.match?1:0,placeHolder:"Search",selector:"#autoComplete",threshold:0,debounce:0,searchEngine:"loose",highlight:!0,maxResults:10,resultsList:{render:!0,container:e=>{e.setAttribute("id","autoComplete_list")},destination:document.querySelector("#autoComplete"),position:"afterend",element:"ul"},resultItem:{content:(e,t)=>{t.innerHTML=e.match},element:"li"},noResults:()=>{const e=document.createElement("li");e.setAttribute("class","no_result"),e.setAttribute("tabindex","1"),e.innerHTML="No Results",document.querySelector("#autoComplete_list").appendChild(e)},onSelection:e=>{console.log(e);e.selection.key;const t=e.selection.value,{x:n,y:o,name:i}=t;document.querySelector("#autoComplete").value=i;const r=new a.Point(n,o),l=new a.Point(0,0),c=()=>s.pixi.snapZoom({width:100,height:100,time:1e3,center:r,ease:"easeInOutSine",forceStart:!0,removeOnComplete:!0});s.pixi.scale.x<1?(console.log("in"),c()):(console.log("out"),s.pixi.snapZoom({width:1e3,height:1e3,time:1e3,center:l,ease:"easeInOutSine",forceStart:!0,removeOnComplete:!0}),setTimeout(c(),1e3))}})},h=n(28),u=n(29),p=n.n(u);let f;var m=n(18);let w;const y=o.geoMercator(),g=o.geoPath(y);let v,x,b,S=[];const k=16777215,C=new a.TextStyle({fontFamily:"Arial",fontSize:24,fill:k,align:"center"});let M;const z=16777215;new a.Point(1/0,1/0);const O=o.geoMercator();o.geoPath(O);var P=()=>{const e=document.querySelector("canvas#background");e.width=window.innerWidth,e.height=window.innerHeight,document.querySelector("body").prepend(e);const t=e.getContext("2d",{alpha:!1}),n=t.createRadialGradient(e.width/2,e.height/2,0,e.width/2,e.height/2,e.width/2);n.addColorStop(1,d3.rgb(0,0,0)),n.addColorStop(0,d3.rgb(255,255,255)),t.fillStyle=n,t.fillRect(0,0,e.width,e.height)},j=(n(48),n(32));Math.cos,Math.sin;const T=Math.atan2,E=e=>{return[T(e[1],e[0]),(t=e[2],t>1?halfPi:t<-1?-halfPi:Math.asin(t))];var t};let q;function H(){w.removeChildren().forEach(e=>e.destroy()),console.log("hey"),s.links.forEach(e=>{const t=g({type:"LineString",coordinates:[e.source.spherical,e.target.spherical]});let n=document.createElement("svg");n.style.width="300",n.innerHTML=`<path stroke="black" stroke-width=".1" fill='none' d='${t}' />`;const o=new m.a(n);w.addChild(o)})}window.d3=o,window.s={distance:30,links:r,nodes:i,tokens:[]},Promise.all([o.json(i),o.json(r)]).then(([e,t])=>{s.links=t,s.nodes=e,console.log("nodes",s.nodes.length),console.log("links",s.links.length),(()=>{const e=new a.Application({width:window.innerWidth,height:window.innerHeight,antialias:!0,transparent:!0,resolution:2,autoDensity:!0,autoResize:!0,resizeTo:window});document.body.prepend(e.view);const t=new h.a({screenWidth:window.innerWidth,screenHeight:window.innerHeight,interaction:e.renderer.plugins.interaction});e.stage.addChild(t),s.pixi=t,t.drag().pinch().wheel().decelerate().clampZoom({minScale:.3,maxScale:5}),window.addEventListener("wheel",e=>{e.preventDefault()},{passive:!1})})(),(()=>{const e=p()({every:1});setInterval(()=>{e.tick()},1e3/60);const t=document.getElementById("fps");e.on("data",(function(e){t.innerHTML=Math.floor(parseInt(e))}))})(),function(){const e=new a.Graphics;f=s.pixi.addChild(e)}(),function(){const e=new a.Graphics;w=s.pixi.addChild(e)}(),function(){const e=new a.Graphics;M=s.pixi.addChild(e),new a.TextStyle({fontFamily:"Arial",fontSize:5,fill:z,align:"center"}),s.nodes.forEach(e=>{})}(),function(){const e=new a.Graphics;v=s.pixi.addChild(e),x=Math.pow(2*s.distance-2,2),b=Math.pow(2*s.distance+2,2),S=s.links.filter(e=>e.value>.01),S.forEach(e=>{const[t,n]=Object.entries(e.tokens)[0],o=7e-4*n;e.txt=new a.Text(t,C),e.txt.scale.set(o),e.txt.position.set(1/0,1/0),v.addChild(e.txt)})}(),P(),j.a().numDimensions(3).nodes(s.nodes).force("collide",d3.forceCollide().radius(60)).force("charge",d3.forceManyBody().strength(-20)).force("link",d3.forceLink(s.links).id(e=>e.id).strength(e=>1*e.value)).force("center",d3.forceCenter()).force("surface",(function(e){const t=Math.sqrt(d3.median(s.nodes.map(e=>e.x**2+e.y**2+2*e.z))),n=15*Math.sqrt(s.nodes.length);q=t;for(const e of s.nodes){e.fx&&(e.x=e.fx),e.fy&&(e.y=e.fy),e.fz&&(e.z=e.fz),e.norm=Math.sqrt(e.x**2+e.y**2+e.z**2),e.norm||(e.norm=1),e.cartesian=[e.x/e.norm,e.y/e.norm,e.z/e.norm],e.spherical=E([e.x/e.norm,e.y/e.norm,e.z/e.norm]).map(e=>180*e/Math.PI);{const t=(1+n/e.norm)/2;e.x=e.x*t,e.y=e.y*t,e.z=e.z*t}{const t=(e.vx*e.x+e.vy*e.y+e.vz*e.z)/e.norm**2;e.vx-=e.x*t,e.vy-=e.y*t,e.vz-=e.z*t}}})).tick().on("tick",H),d(),window.onresize=function(){P(),s.pixi.resize()}})}});
|
from __future__ import absolute_import, division, print_function
from collections import namedtuple
from libtbx.phil import parse
from dials.algorithms.refinement import DialsRefineConfigError
from scitbx.array_family import flex
from scitbx import sparse
from dials.algorithms.refinement.restraints.restraints import SingleUnitCellTie
from dials.algorithms.refinement.restraints.restraints import MeanUnitCellTie
from dials.algorithms.refinement.restraints.restraints import LowMemoryMeanUnitCellTie
from dials.algorithms.refinement.restraints.restraints import MedianUnitCellTie
# PHIL options for unit cell restraints
uc_phil_str = """
restraints
.help = "Least squares unit cell restraints to use in refinement."
.expert_level = 1
{
tie_to_target
.multiple = True
{
values = None
.type = floats(size=6)
.help = "Target unit cell parameters for the restraint for this"
"parameterisation"
sigmas = None
.help = "The unit cell target values are associated with sigmas which are"
"used to determine the weight of each restraint. A sigma of zero"
"will remove the restraint at that position. If symmetry"
"constrains two cell dimensions to be equal then only the"
"smaller of the two sigmas will be kept"
.type = floats(size=6, value_min=0.)
id = None
.help = "Select only the specified experiments when looking up which"
"parameterisations to apply these restraints to. If an identified"
"parameterisation affects multiple experiments then the index"
"of any one of those experiments suffices to restrain that"
"parameterisation. If None (the default) then the restraints"
"will be applied to all experiments."
.type = ints(value_min=0)
}
tie_to_group
.multiple = True
{
target = *mean low_memory_mean median
.type = choice
.help = "Function to tie group parameter values to"
sigmas = None
.help = "The unit cell parameters are associated with sigmas which are"
"used to determine the weight of each restraint. A sigma of zero"
"will remove the restraint at that position."
.type = floats(size=6, value_min=0.)
id = None
.help = "Select only the specified experiments when looking up which "
"parameterisations to apply these restraints to. For every"
"parameterisation that requires a restraint at least one"
"experiment index must be supplied. If None (the default) the"
"restraints will be applied to all experiments."
.type = ints(value_min=0)
}
}
"""
uc_phil_scope = parse(uc_phil_str)
# Define a couple of namedtuple types we will use for convenience
ParamIndex = namedtuple("ParamIndex", ["parameterisation", "istart"])
RestraintIndex = namedtuple("RestraintIndex", ["restraint", "istart"])
class RestraintsParameterisation(object):
def __init__(
self,
detector_parameterisations=None,
beam_parameterisations=None,
xl_orientation_parameterisations=None,
xl_unit_cell_parameterisations=None,
goniometer_parameterisations=None,
):
if detector_parameterisations is None:
detector_parameterisations = []
if beam_parameterisations is None:
beam_parameterisations = []
if xl_orientation_parameterisations is None:
xl_orientation_parameterisations = []
if xl_unit_cell_parameterisations is None:
xl_unit_cell_parameterisations = []
if goniometer_parameterisations is None:
goniometer_parameterisations = []
# Keep references to all parameterised models
self._detector_parameterisations = detector_parameterisations
self._beam_parameterisations = beam_parameterisations
self._xl_orientation_parameterisations = xl_orientation_parameterisations
self._xl_unit_cell_parameterisations = xl_unit_cell_parameterisations
self._goniometer_parameterisations = goniometer_parameterisations
# Loop over all parameterisations, extract experiment IDs and record
# global parameter index for each that tells us which parameters have
# non-zero derivatives
iparam = 0
self._exp_to_det_param = {}
for detp in self._detector_parameterisations:
for iexp in detp.get_experiment_ids():
self._exp_to_det_param[iexp] = ParamIndex(detp, iparam)
iparam += detp.num_free()
self._exp_to_beam_param = {}
for beamp in self._beam_parameterisations:
for iexp in beamp.get_experiment_ids():
self._exp_to_beam_param[iexp] = ParamIndex(beamp, iparam)
iparam += beamp.num_free()
self._exp_to_xlo_param = {}
for xlop in self._xl_orientation_parameterisations:
for iexp in xlop.get_experiment_ids():
self._exp_to_xlo_param[iexp] = ParamIndex(xlop, iparam)
iparam += xlop.num_free()
self._exp_to_xluc_param = {}
for xlucp in self._xl_unit_cell_parameterisations:
for iexp in xlucp.get_experiment_ids():
self._exp_to_xluc_param[iexp] = ParamIndex(xlucp, iparam)
iparam += xlucp.num_free()
self._exp_to_gon_param = {}
for gonp in self._goniometer_parameterisations:
for iexp in gonp.get_experiment_ids():
self._exp_to_gon_param[iexp] = ParamIndex(gonp, iparam)
iparam += gonp.num_free()
# the number of free parameters
self._nparam = iparam
# keep a set that will ensure every model parameterisation only gets
# a single restraint.
self._param_to_restraint = set()
# keep lists of restraint objects that we will add
self._single_model_restraints = []
self._group_model_restraints = []
def add_restraints_to_target_xl_unit_cell(self, experiment_id, values, sigma):
# On input we will have one id value, 6 target values and 6 sigmas.
# select the right parameterisation, if one exists
try:
param_i = self._exp_to_xluc_param[experiment_id]
except KeyError:
return
# fail now if this is already restrained.
if param_i.parameterisation in self._param_to_restraint:
raise DialsRefineConfigError(
"Parameterisation already restrained. Cannot create "
"additional restraint with experiment {}".format(experiment_id)
)
# create new restraint
tie = SingleUnitCellTie(
model_parameterisation=param_i.parameterisation, target=values, sigma=sigma
)
# add to the restraint list along with the global parameter index
self._single_model_restraints.append(RestraintIndex(tie, param_i.istart))
# also add the parameterisation to the set for uniqueness testing
self._param_to_restraint.add(param_i.parameterisation)
def add_restraints_to_group_xl_unit_cell(self, target, experiment_ids, sigma):
# select the right parameterisations, if they exist
if experiment_ids == "all":
param_indices = list(self._exp_to_xluc_param.values())
else:
param_indices = []
for exp_id in experiment_ids:
try:
param_indices.append(self._exp_to_xluc_param[exp_id])
except KeyError:
# ignore experiment without a parameterisation
pass
params = [e.parameterisation for e in param_indices]
istarts = [e.istart for e in param_indices]
# fail if any of the parameterisations has already been restrained.
for param in params:
if param in self._param_to_restraint:
raise DialsRefineConfigError(
"Parameterisation already restrained. Cannot create "
"additional group restraint for experiment(s) {}".format(
str(experiment_ids)
)
)
# create new group of restraints
if target == "mean":
tie = MeanUnitCellTie(model_parameterisations=params, sigma=sigma)
elif target == "low_memory_mean":
tie = LowMemoryMeanUnitCellTie(model_parameterisations=params, sigma=sigma)
elif target == "median":
tie = MedianUnitCellTie(model_parameterisations=params, sigma=sigma)
else:
raise DialsRefineConfigError("target type {} not available".format(target))
# add to the restraint list along with the global parameter indices
self._group_model_restraints.append(RestraintIndex(tie, istarts))
@property
def num_residuals(self):
"""Get the total number of residuals across all parameterised restraints"""
n_single = sum(e.restraint.num_residuals for e in self._single_model_restraints)
n_group = sum(e.restraint.num_residuals for e in self._group_model_restraints)
return n_single + n_group
def get_residuals_gradients_and_weights(self):
residuals = flex.double()
weights = flex.double()
row_start = []
irow = 0
# process restraints residuals and weights for single models
for r in self._single_model_restraints:
res = r.restraint.residuals()
wgt = r.restraint.weights()
residuals.extend(flex.double(res))
weights.extend(flex.double(wgt))
row_start.append(irow)
irow += len(res)
# keep track of the row at the start of group models
group_model_irow = irow
# process restraints residuals and weights for groups of models
for r in self._group_model_restraints:
residuals.extend(flex.double(r.restraint.residuals()))
weights.extend(flex.double(r.restraint.weights()))
# now it is clear how many residuals there are we can set up a sparse
# matrix for the restraints jacobian
nrows = len(residuals)
gradients = sparse.matrix(nrows, self._nparam)
# assign gradients in blocks for the single model restraints
for irow, r in zip(row_start, self._single_model_restraints):
icol = r.istart
# convert square list-of-lists into a 2D array for block assignment
grads = flex.double(r.restraint.gradients())
gradients.assign_block(grads, irow, icol)
# assign gradients in blocks for the group model restraints
for r in self._group_model_restraints:
# loop over the included unit cell models, k
for k, (icol, grads) in enumerate(zip(r.istart, r.restraint.gradients())):
irow = group_model_irow
for grad in grads:
gradients.assign_block(grad, irow, icol)
irow += grad.n_rows
group_model_irow = irow
return residuals, gradients, weights
|
/**
* \file
*
* \brief SAM Clock Driver
*
* Copyright (C) 2012-2014 Atmel Corporation. All rights reserved.
*
* \asf_license_start
*
* \page License
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. The name of Atmel may not be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* 4. This software may only be redistributed and used in connection with an
* Atmel microcontroller product.
*
* THIS SOFTWARE IS PROVIDED BY ATMEL "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT ARE
* EXPRESSLY AND SPECIFICALLY DISCLAIMED. IN NO EVENT SHALL ATMEL BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
* \asf_license_stop
*
*/
#ifndef SYSTEM_CLOCK_H_INCLUDED
#define SYSTEM_CLOCK_H_INCLUDED
#ifdef __cplusplus
extern "C" {
#endif
/**
* \defgroup asfdoc_sam0_system_clock_group SAM System Clock Management Driver (SYSTEM CLOCK)
*
* This driver for SAM devices provides an interface for the configuration
* and management of the device's clocking related functions. This includes
* the various clock sources, bus clocks and generic clocks within the device,
* with functions to manage the enabling, disabling, source selection and
* prescaling of clocks to various internal peripherals.
*
* The following peripherals are used by this module:
*
* - GCLK (Generic Clock Management)
* - PM (Power Management)
* - SYSCTRL (Clock Source Control)
*
* The following devices can use this module:
* - SAM D20/D21
* - SAM R21
* - SAM D10/D11
*
* The outline of this documentation is as follows:
* - \ref asfdoc_sam0_system_clock_prerequisites
* - \ref asfdoc_sam0_system_clock_module_overview
* - \ref asfdoc_sam0_system_clock_special_considerations
* - \ref asfdoc_sam0_system_clock_extra_info
* - \ref asfdoc_sam0_system_clock_examples
* - \ref asfdoc_sam0_system_clock_api_overview
*
*
* \section asfdoc_sam0_system_clock_prerequisites Prerequisites
*
* There are no prerequisites for this module.
*
*
* \section asfdoc_sam0_system_clock_module_overview Module Overview
* The SAM devices contain a sophisticated clocking system, which is designed
* to give the maximum flexibility to the user application. This system allows
* a system designer to tune the performance and power consumption of the device
* in a dynamic manner, to achieve the best trade-off between the two for a
* particular application.
*
* This driver provides a set of functions for the configuration and management
* of the various clock related functionality within the device.
*
* \subsection asfdoc_sam0_system_clock_module_features Driver Feature Macro Definition
* <table>
* <tr>
* <th>Driver Feature Macro</th>
* <th>Supported devices</th>
* </tr>
* <tr>
* <td>FEATURE_SYSTEM_CLOCK_DPLL</td>
* <td>SAMD21, SAMR21, SAMD10, SAMD11</td>
* </tr>
* </table>
* \note The specific features are only available in the driver when the
* selected device supports those features.
*
* \subsection asfdoc_sam0_system_clock_module_overview_clock_sources Clock Sources
* The SAM devices have a number of master clock source modules, each of
* which being capable of producing a stabilized output frequency which can then
* be fed into the various peripherals and modules within the device.
*
* Possible clock source modules include internal R/C oscillators, internal
* DFLL modules, as well as external crystal oscillators and/or clock inputs.
*
* \subsection asfdoc_sam0_system_clock_module_overview_cpu_clock CPU / Bus Clocks
* The CPU and AHB/APBx buses are clocked by the same physical clock source
* (referred in this module as the Main Clock), however the APBx buses may
* have additional prescaler division ratios set to give each peripheral bus a
* different clock speed.
*
* The general main clock tree for the CPU and associated buses is shown in
* \ref asfdoc_sam0_system_clock_module_clock_tree "the figure below".
*
* \anchor asfdoc_sam0_system_clock_module_clock_tree
* \dot
* digraph overview {
* rankdir=LR;
* clk_src [label="Clock Sources", shape=none, height=0];
* node [label="CPU Bus" shape=ellipse] cpu_bus;
* node [label="AHB Bus" shape=ellipse] ahb_bus;
* node [label="APBA Bus" shape=ellipse] apb_a_bus;
* node [label="APBB Bus" shape=ellipse] apb_b_bus;
* node [label="APBC Bus" shape=ellipse] apb_c_bus;
* node [label="Main Bus\nPrescaler" shape=square] main_prescaler;
* node [label="APBA Bus\nPrescaler" shape=square] apb_a_prescaler;
* node [label="APBB Bus\nPrescaler" shape=square] apb_b_prescaler;
* node [label="APBC Bus\nPrescaler" shape=square] apb_c_prescaler;
* node [label="", shape=polygon, sides=4, distortion=0.6, orientation=90, style=filled, fillcolor=black, height=0.9, width=0.2] main_clock_mux;
*
* clk_src -> main_clock_mux;
* main_clock_mux -> main_prescaler;
* main_prescaler -> cpu_bus;
* main_prescaler -> ahb_bus;
* main_prescaler -> apb_a_prescaler;
* main_prescaler -> apb_b_prescaler;
* main_prescaler -> apb_c_prescaler;
* apb_a_prescaler -> apb_a_bus;
* apb_b_prescaler -> apb_b_bus;
* apb_c_prescaler -> apb_c_bus;
* }
* \enddot
*
* \subsection asfdoc_sam0_system_clock_module_overview_clock_masking Clock Masking
* To save power, the input clock to one or more peripherals on the AHB and APBx
* buses can be masked away - when masked, no clock is passed into the module.
* Disabling of clocks of unused modules will prevent all access to the masked
* module, but will reduce the overall device power consumption.
*
* \subsection asfdoc_sam0_system_clock_module_overview_gclk Generic Clocks
* Within the SAM devices are a number of Generic Clocks; these are used to
* provide clocks to the various peripheral clock domains in the device in a
* standardized manner. One or more master source clocks can be selected as the
* input clock to a Generic Clock Generator, which can prescale down the input
* frequency to a slower rate for use in a peripheral.
*
* Additionally, a number of individually selectable Generic Clock Channels are
* provided, which multiplex and gate the various generator outputs for one or
* more peripherals within the device. This setup allows for a single common
* generator to feed one or more channels, which can then be enabled or disabled
* individually as required.
*
* \anchor asfdoc_sam0_system_clock_module_chain_overview
* \dot
* digraph overview {
* rankdir=LR;
* node [label="Clock\nSource a" shape=square] system_clock_source;
* node [label="Generator 1" shape=square] clock_gen;
* node [label="Channel x" shape=square] clock_chan0;
* node [label="Channel y" shape=square] clock_chan1;
* node [label="Peripheral x" shape=ellipse style=filled fillcolor=lightgray] peripheral0;
* node [label="Peripheral y" shape=ellipse style=filled fillcolor=lightgray] peripheral1;
*
* system_clock_source -> clock_gen;
* clock_gen -> clock_chan0;
* clock_chan0 -> peripheral0;
* clock_gen -> clock_chan1;
* clock_chan1 -> peripheral1;
* }
* \enddot
*
* \subsubsection asfdoc_sam0_system_clock_module_chain_example Clock Chain Example
* An example setup of a complete clock chain within the device is shown in
* \ref asfdoc_sam0_system_clock_module_chain_example_fig "the figure below".
*
* \anchor asfdoc_sam0_system_clock_module_chain_example_fig
* \dot
* digraph overview {
* rankdir=LR;
* node [label="External\nOscillator" shape=square] system_clock_source0;
* node [label="Generator 0" shape=square] clock_gen0;
* node [label="Channel x" shape=square] clock_chan0;
* node [label="Core CPU" shape=ellipse style=filled fillcolor=lightgray] peripheral0;
*
* system_clock_source0 -> clock_gen0;
* clock_gen0 -> clock_chan0;
* clock_chan0 -> peripheral0;
* node [label="8MHz R/C\nOscillator (OSC8M)" shape=square fillcolor=white] system_clock_source1;
* node [label="Generator 1" shape=square] clock_gen1;
* node [label="Channel y" shape=square] clock_chan1;
* node [label="Channel z" shape=square] clock_chan2;
* node [label="SERCOM\nModule" shape=ellipse style=filled fillcolor=lightgray] peripheral1;
* node [label="Timer\nModule" shape=ellipse style=filled fillcolor=lightgray] peripheral2;
*
* system_clock_source1 -> clock_gen1;
* clock_gen1 -> clock_chan1;
* clock_gen1 -> clock_chan2;
* clock_chan1 -> peripheral1;
* clock_chan2 -> peripheral2;
* }
* \enddot
*
* \subsubsection asfdoc_sam0_system_clock_module_overview_gclk_generators Generic Clock Generators
* Each Generic Clock generator within the device can source its input clock
* from one of the provided Source Clocks, and prescale the output for one or
* more Generic Clock Channels in a one-to-many relationship. The generators
* thus allow for several clocks to be generated of different frequencies,
* power usages and accuracies, which can be turned on and off individually to
* disable the clocks to multiple peripherals as a group.
*
* \subsubsection asfdoc_sam0_system_clock_module_overview_gclk_channels Generic Clock Channels
* To connect a Generic Clock Generator to a peripheral within the
* device, a Generic Clock Channel is used. Each peripheral or
* peripheral group has an associated Generic Clock Channel, which serves as the
* clock input for the peripheral(s). To supply a clock to the peripheral
* module(s), the associated channel must be connected to a running Generic
* Clock Generator and the channel enabled.
*
* \section asfdoc_sam0_system_clock_special_considerations Special Considerations
*
* There are no special considerations for this module.
*
*
* \section asfdoc_sam0_system_clock_extra_info Extra Information
*
* For extra information see \ref asfdoc_sam0_system_clock_extra. This includes:
* - \ref asfdoc_sam0_system_clock_extra_acronyms
* - \ref asfdoc_sam0_system_clock_extra_dependencies
* - \ref asfdoc_sam0_system_clock_extra_errata
* - \ref asfdoc_sam0_system_clock_extra_history
*
*
* \section asfdoc_sam0_system_clock_examples Examples
*
* For a list of examples related to this driver, see
* \ref asfdoc_sam0_system_clock_exqsg.
*
*
* \section asfdoc_sam0_system_clock_api_overview API Overview
* @{
*/
#include <compiler.h>
#include <gclk.h>
/**
* \name Driver feature definition
* Define system clock features set according to different device family.
* @{
*/
#if (SAMD21) || (SAMR21) || (SAMD11) || (SAMD10) || defined(__DOXYGEN__)
/** Digital Phase Locked Loop (DPLL) feature support */
# define FEATURE_SYSTEM_CLOCK_DPLL
#endif
/*@}*/
/**
* \brief Available start-up times for the XOSC32K
*
* Available external 32KHz oscillator start-up times, as a number of external
* clock cycles.
*/
enum system_xosc32k_startup {
/** Wait 0 clock cycles until the clock source is considered stable */
SYSTEM_XOSC32K_STARTUP_0,
/** Wait 32 clock cycles until the clock source is considered stable */
SYSTEM_XOSC32K_STARTUP_32,
/** Wait 2048 clock cycles until the clock source is considered stable */
SYSTEM_XOSC32K_STARTUP_2048,
/** Wait 4096 clock cycles until the clock source is considered stable */
SYSTEM_XOSC32K_STARTUP_4096,
/** Wait 16384 clock cycles until the clock source is considered stable */
SYSTEM_XOSC32K_STARTUP_16384,
/** Wait 32768 clock cycles until the clock source is considered stable */
SYSTEM_XOSC32K_STARTUP_32768,
/** Wait 65536 clock cycles until the clock source is considered stable */
SYSTEM_XOSC32K_STARTUP_65536,
/** Wait 131072 clock cycles until the clock source is considered stable */
SYSTEM_XOSC32K_STARTUP_131072,
};
/**
* \brief Available start-up times for the XOSC
*
* Available external oscillator start-up times, as a number of external clock
* cycles.
*/
enum system_xosc_startup {
/** Wait 1 clock cycles until the clock source is considered stable */
SYSTEM_XOSC_STARTUP_1,
/** Wait 2 clock cycles until the clock source is considered stable */
SYSTEM_XOSC_STARTUP_2,
/** Wait 4 clock cycles until the clock source is considered stable */
SYSTEM_XOSC_STARTUP_4,
/** Wait 8 clock cycles until the clock source is considered stable */
SYSTEM_XOSC_STARTUP_8,
/** Wait 16 clock cycles until the clock source is considered stable */
SYSTEM_XOSC_STARTUP_16,
/** Wait 32 clock cycles until the clock source is considered stable */
SYSTEM_XOSC_STARTUP_32,
/** Wait 64 clock cycles until the clock source is considered stable */
SYSTEM_XOSC_STARTUP_64,
/** Wait 128 clock cycles until the clock source is considered stable */
SYSTEM_XOSC_STARTUP_128,
/** Wait 256 clock cycles until the clock source is considered stable */
SYSTEM_XOSC_STARTUP_256,
/** Wait 512 clock cycles until the clock source is considered stable */
SYSTEM_XOSC_STARTUP_512,
/** Wait 1024 clock cycles until the clock source is considered stable */
SYSTEM_XOSC_STARTUP_1024,
/** Wait 2048 clock cycles until the clock source is considered stable */
SYSTEM_XOSC_STARTUP_2048,
/** Wait 4096 clock cycles until the clock source is considered stable */
SYSTEM_XOSC_STARTUP_4096,
/** Wait 8192 clock cycles until the clock source is considered stable */
SYSTEM_XOSC_STARTUP_8192,
/** Wait 16384 clock cycles until the clock source is considered stable */
SYSTEM_XOSC_STARTUP_16384,
/** Wait 32768 clock cycles until the clock source is considered stable */
SYSTEM_XOSC_STARTUP_32768,
};
/**
* \brief Available start-up times for the OSC32K
*
* Available internal 32KHz oscillator start-up times, as a number of internal
* OSC32K clock cycles.
*/
enum system_osc32k_startup {
/** Wait 3 clock cycles until the clock source is considered stable */
SYSTEM_OSC32K_STARTUP_3,
/** Wait 4 clock cycles until the clock source is considered stable */
SYSTEM_OSC32K_STARTUP_4,
/** Wait 6 clock cycles until the clock source is considered stable */
SYSTEM_OSC32K_STARTUP_6,
/** Wait 10 clock cycles until the clock source is considered stable */
SYSTEM_OSC32K_STARTUP_10,
/** Wait 18 clock cycles until the clock source is considered stable */
SYSTEM_OSC32K_STARTUP_18,
/** Wait 34 clock cycles until the clock source is considered stable */
SYSTEM_OSC32K_STARTUP_34,
/** Wait 66 clock cycles until the clock source is considered stable */
SYSTEM_OSC32K_STARTUP_66,
/** Wait 130 clock cycles until the clock source is considered stable */
SYSTEM_OSC32K_STARTUP_130,
};
/**
* \brief Division prescalers for the internal 8MHz system clock
*
* Available prescalers for the internal 8MHz (nominal) system clock.
*/
enum system_osc8m_div {
/** Do not divide the 8MHz RC oscillator output */
SYSTEM_OSC8M_DIV_1,
/** Divide the 8MHz RC oscillator output by 2 */
SYSTEM_OSC8M_DIV_2,
/** Divide the 8MHz RC oscillator output by 4 */
SYSTEM_OSC8M_DIV_4,
/** Divide the 8MHz RC oscillator output by 8 */
SYSTEM_OSC8M_DIV_8,
};
/**
* \brief Frequency range for the internal 8Mhz RC oscillator
*
* Internal 8Mhz RC oscillator frequency range setting
*/
enum system_osc8m_frequency_range {
/** Frequency range 4 Mhz to 6 Mhz */
SYSTEM_OSC8M_FREQUENCY_RANGE_4_TO_6,
/** Frequency range 6 Mhz to 8 Mhz */
SYSTEM_OSC8M_FREQUENCY_RANGE_6_TO_8,
/** Frequency range 8 Mhz to 11 Mhz */
SYSTEM_OSC8M_FREQUENCY_RANGE_8_TO_11,
/** Frequency range 11 Mhz to 15 Mhz */
SYSTEM_OSC8M_FREQUENCY_RANGE_11_TO_15,
};
/**
* \brief Main CPU and APB/AHB bus clock source prescaler values
*
* Available division ratios for the CPU and APB/AHB bus clocks.
*/
enum system_main_clock_div {
/** Divide Main clock by 1 */
SYSTEM_MAIN_CLOCK_DIV_1,
/** Divide Main clock by 2 */
SYSTEM_MAIN_CLOCK_DIV_2,
/** Divide Main clock by 4 */
SYSTEM_MAIN_CLOCK_DIV_4,
/** Divide Main clock by 8 */
SYSTEM_MAIN_CLOCK_DIV_8,
/** Divide Main clock by 16 */
SYSTEM_MAIN_CLOCK_DIV_16,
/** Divide Main clock by 32 */
SYSTEM_MAIN_CLOCK_DIV_32,
/** Divide Main clock by 64 */
SYSTEM_MAIN_CLOCK_DIV_64,
/** Divide Main clock by 128 */
SYSTEM_MAIN_CLOCK_DIV_128,
};
/**
* \brief External clock source types.
*
* Available external clock source types.
*/
enum system_clock_external {
/** The external clock source is a crystal oscillator */
SYSTEM_CLOCK_EXTERNAL_CRYSTAL,
/** The connected clock source is an external logic level clock signal */
SYSTEM_CLOCK_EXTERNAL_CLOCK,
};
/**
* \brief Operating modes of the DFLL clock source.
*
* Available operating modes of the DFLL clock source module,
*/
enum system_clock_dfll_loop_mode {
/** The DFLL is operating in open loop mode with no feedback */
SYSTEM_CLOCK_DFLL_LOOP_MODE_OPEN,
/** The DFLL is operating in closed loop mode with frequency feedback from
* a low frequency reference clock
*/
SYSTEM_CLOCK_DFLL_LOOP_MODE_CLOSED = SYSCTRL_DFLLCTRL_MODE,
#ifdef SYSCTRL_DFLLCTRL_USBCRM
/** The DFLL is operating in USB recovery mode with frequency feedback
* from USB SOF
*/
SYSTEM_CLOCK_DFLL_LOOP_MODE_USB_RECOVERY = SYSCTRL_DFLLCTRL_USBCRM,
#endif
};
/**
* \brief Locking behavior for the DFLL during device wake-up
*
* DFLL lock behavior modes on device wake-up from sleep.
*/
enum system_clock_dfll_wakeup_lock {
/** Keep DFLL lock when the device wakes from sleep */
SYSTEM_CLOCK_DFLL_WAKEUP_LOCK_KEEP,
/** Lose DFLL lock when the devices wakes from sleep */
SYSTEM_CLOCK_DFLL_WAKEUP_LOCK_LOSE = SYSCTRL_DFLLCTRL_LLAW,
};
/**
* \brief Fine tracking behavior for the DFLL once a lock has been acquired
*
* DFLL fine tracking behavior modes after a lock has been acquired.
*/
enum system_clock_dfll_stable_tracking {
/** Keep tracking after the DFLL has gotten a fine lock */
SYSTEM_CLOCK_DFLL_STABLE_TRACKING_TRACK_AFTER_LOCK,
/** Stop tracking after the DFLL has gotten a fine lock */
SYSTEM_CLOCK_DFLL_STABLE_TRACKING_FIX_AFTER_LOCK = SYSCTRL_DFLLCTRL_STABLE,
};
/**
* \brief Chill-cycle behavior of the DFLL module
*
* DFLL chill-cycle behavior modes of the DFLL module. A chill cycle is a period
* of time when the DFLL output frequency is not measured by the unit, to allow
* the output to stabilize after a change in the input clock source.
*/
enum system_clock_dfll_chill_cycle {
/** Enable a chill cycle, where the DFLL output frequency is not measured */
SYSTEM_CLOCK_DFLL_CHILL_CYCLE_ENABLE,
/** Disable a chill cycle, where the DFLL output frequency is not measured */
SYSTEM_CLOCK_DFLL_CHILL_CYCLE_DISABLE = SYSCTRL_DFLLCTRL_CCDIS,
};
/**
* \brief QuickLock settings for the DFLL module
*
* DFLL QuickLock settings for the DFLL module, to allow for a faster lock of
* the DFLL output frequency at the expense of accuracy.
*/
enum system_clock_dfll_quick_lock {
/** Enable the QuickLock feature for looser lock requirements on the DFLL */
SYSTEM_CLOCK_DFLL_QUICK_LOCK_ENABLE,
/** Disable the QuickLock feature for strict lock requirements on the DFLL */
SYSTEM_CLOCK_DFLL_QUICK_LOCK_DISABLE = SYSCTRL_DFLLCTRL_QLDIS,
};
/**
* \brief Available clock sources in the system
*
* Clock sources available to the GCLK generators
*/
enum system_clock_source {
/** Internal 8MHz RC oscillator */
SYSTEM_CLOCK_SOURCE_OSC8M = GCLK_SOURCE_OSC8M,
/** Internal 32kHz RC oscillator */
SYSTEM_CLOCK_SOURCE_OSC32K = GCLK_SOURCE_OSC32K,
/** External oscillator */
SYSTEM_CLOCK_SOURCE_XOSC = GCLK_SOURCE_XOSC ,
/** External 32kHz oscillator */
SYSTEM_CLOCK_SOURCE_XOSC32K = GCLK_SOURCE_XOSC32K,
/** Digital Frequency Locked Loop (DFLL) */
SYSTEM_CLOCK_SOURCE_DFLL = GCLK_SOURCE_DFLL48M,
/** Internal Ultra Low Power 32kHz oscillator */
SYSTEM_CLOCK_SOURCE_ULP32K = GCLK_SOURCE_OSCULP32K,
/** Generator input pad */
SYSTEM_CLOCK_SOURCE_GCLKIN = GCLK_SOURCE_GCLKIN,
/** Generic clock generator 1 output */
SYSTEM_CLOCK_SOURCE_GCLKGEN1 = GCLK_SOURCE_GCLKGEN1,
#ifdef FEATURE_SYSTEM_CLOCK_DPLL
/** Digital Phase Locked Loop (DPLL).
* Check \c FEATURE_SYSTEM_CLOCK_DPLL for which device support it.
*/
SYSTEM_CLOCK_SOURCE_DPLL = GCLK_SOURCE_FDPLL,
#endif
};
/**
* \brief List of APB peripheral buses
*
* Available bus clock domains on the APB bus.
*/
enum system_clock_apb_bus {
/** Peripheral bus A on the APB bus. */
SYSTEM_CLOCK_APB_APBA,
/** Peripheral bus B on the APB bus. */
SYSTEM_CLOCK_APB_APBB,
/** Peripheral bus C on the APB bus. */
SYSTEM_CLOCK_APB_APBC,
};
/**
* \brief Configuration structure for XOSC
*
* External oscillator clock configuration structure.
*/
struct system_clock_source_xosc_config {
/** External clock type */
enum system_clock_external external_clock;
/** Crystal oscillator start-up time */
enum system_xosc_startup startup_time;
/** Enable automatic amplitude gain control */
bool auto_gain_control;
/** External clock/crystal frequency */
uint32_t frequency;
/** Keep the XOSC enabled in standby sleep mode */
bool run_in_standby;
/** Run On Demand. If this is set the XOSC won't run
* until requested by a peripheral */
bool on_demand;
};
/**
* \brief Configuration structure for XOSC32K
*
* External 32KHz oscillator clock configuration structure.
*/
struct system_clock_source_xosc32k_config {
/** External clock type */
enum system_clock_external external_clock;
/** Crystal oscillator start-up time */
enum system_xosc32k_startup startup_time;
/** Enable automatic amplitude control */
bool auto_gain_control;
/** Enable 1kHz output */
bool enable_1khz_output;
/** Enable 32kHz output */
bool enable_32khz_output;
/** External clock/crystal frequency */
uint32_t frequency;
/** Keep the XOSC32K enabled in standby sleep mode */
bool run_in_standby;
/** Run On Demand. If this is set the XOSC32K won't run
* until requested by a peripheral */
bool on_demand;
/** Lock configuration after it has been written,
* a device reset will release the lock */
bool write_once;
};
/**
* \brief Configuration structure for OSC8M
*
* Internal 8MHz (nominal) oscillator configuration structure.
*/
struct system_clock_source_osc8m_config {
/* Internal 8MHz RC oscillator prescaler */
enum system_osc8m_div prescaler;
/** Keep the OSC8M enabled in standby sleep mode */
bool run_in_standby;
/** Run On Demand. If this is set the OSC8M won't run
* until requested by a peripheral */
bool on_demand;
};
/**
* \brief Configuration structure for OSC32K
*
* Internal 32KHz (nominal) oscillator configuration structure.
*/
struct system_clock_source_osc32k_config {
/** Startup time */
enum system_osc32k_startup startup_time;
/** Enable 1kHz output */
bool enable_1khz_output;
/** Enable 32kHz output */
bool enable_32khz_output;
/** Keep the OSC32K enabled in standby sleep mode */
bool run_in_standby;
/** Run On Demand. If this is set the OSC32K won't run
* until requested by a peripheral */
bool on_demand;
/** Lock configuration after it has been written,
* a device reset will release the lock */
bool write_once;
};
/**
* \brief Configuration structure for DFLL
*
* DFLL oscillator configuration structure.
*/
struct system_clock_source_dfll_config {
/** Loop mode */
enum system_clock_dfll_loop_mode loop_mode;
/** Run On Demand. If this is set the DFLL won't run
* until requested by a peripheral */
bool on_demand;
/** Enable Quick Lock */
enum system_clock_dfll_quick_lock quick_lock;
/** Enable Chill Cycle */
enum system_clock_dfll_chill_cycle chill_cycle;
/** DFLL lock state on wakeup */
enum system_clock_dfll_wakeup_lock wakeup_lock;
/** DFLL tracking after fine lock */
enum system_clock_dfll_stable_tracking stable_tracking;
/** Coarse calibration value (Open loop mode) */
uint8_t coarse_value;
/** Fine calibration value (Open loop mode) */
uint16_t fine_value;
/** Coarse adjustment max step size (Closed loop mode) */
uint8_t coarse_max_step;
/** Fine adjustment max step size (Closed loop mode) */
uint16_t fine_max_step;
/** DFLL multiply factor (Closed loop mode */
uint16_t multiply_factor;
};
/**
* \name External Oscillator management
* @{
*/
/**
* \brief Retrieve the default configuration for XOSC
*
* Fills a configuration structure with the default configuration for an
* external oscillator module:
* - External Crystal
* - Start-up time of 16384 external clock cycles
* - Automatic crystal gain control mode enabled
* - Frequency of 12MHz
* - Don't run in STANDBY sleep mode
* - Run only when requested by peripheral (on demand)
*
* \param[out] config Configuration structure to fill with default values
*/
static inline void system_clock_source_xosc_get_config_defaults(
struct system_clock_source_xosc_config *const config)
{
Assert(config);
config->external_clock = SYSTEM_CLOCK_EXTERNAL_CRYSTAL;
config->startup_time = SYSTEM_XOSC_STARTUP_16384;
config->auto_gain_control = true;
config->frequency = 12000000UL;
config->run_in_standby = false;
config->on_demand = true;
}
void system_clock_source_xosc_set_config(
struct system_clock_source_xosc_config *const config);
/**
* @}
*/
/**
* \name External 32KHz Oscillator management
* @{
*/
/**
* \brief Retrieve the default configuration for XOSC32K
*
* Fills a configuration structure with the default configuration for an
* external 32KHz oscillator module:
* - External Crystal
* - Start-up time of 16384 external clock cycles
* - Automatic crystal gain control mode disabled
* - Frequency of 32.768KHz
* - 1KHz clock output disabled
* - 32KHz clock output enabled
* - Don't run in STANDBY sleep mode
* - Run only when requested by peripheral (on demand)
* - Don't lock registers after configuration has been written
*
* \param[out] config Configuration structure to fill with default values
*/
static inline void system_clock_source_xosc32k_get_config_defaults(
struct system_clock_source_xosc32k_config *const config)
{
Assert(config);
config->external_clock = SYSTEM_CLOCK_EXTERNAL_CRYSTAL;
config->startup_time = SYSTEM_XOSC32K_STARTUP_16384;
config->auto_gain_control = false;
config->frequency = 32768UL;
config->enable_1khz_output = false;
config->enable_32khz_output = true;
config->run_in_standby = false;
config->on_demand = true;
config->write_once = false;
}
void system_clock_source_xosc32k_set_config(
struct system_clock_source_xosc32k_config *const config);
/**
* @}
*/
/**
* \name Internal 32KHz Oscillator management
* @{
*/
/**
* \brief Retrieve the default configuration for OSC32K
*
* Fills a configuration structure with the default configuration for an
* internal 32KHz oscillator module:
* - 1KHz clock output enabled
* - 32KHz clock output enabled
* - Don't run in STANDBY sleep mode
* - Run only when requested by peripheral (on demand)
* - Set startup time to 130 cycles
* - Don't lock registers after configuration has been written
*
* \param[out] config Configuration structure to fill with default values
*/
static inline void system_clock_source_osc32k_get_config_defaults(
struct system_clock_source_osc32k_config *const config)
{
Assert(config);
config->enable_1khz_output = true;
config->enable_32khz_output = true;
config->run_in_standby = false;
config->on_demand = true;
config->startup_time = SYSTEM_OSC32K_STARTUP_130;
config->write_once = false;
}
void system_clock_source_osc32k_set_config(
struct system_clock_source_osc32k_config *const config);
/**
* @}
*/
/**
* \name Internal 8MHz Oscillator management
* @{
*/
/**
* \brief Retrieve the default configuration for OSC8M
*
* Fills a configuration structure with the default configuration for an
* internal 8MHz (nominal) oscillator module:
* - Clock output frequency divided by a factor of 8
* - Don't run in STANDBY sleep mode
* - Run only when requested by peripheral (on demand)
*
* \param[out] config Configuration structure to fill with default values
*/
static inline void system_clock_source_osc8m_get_config_defaults(
struct system_clock_source_osc8m_config *const config)
{
Assert(config);
config->prescaler = SYSTEM_OSC8M_DIV_8;
config->run_in_standby = false;
config->on_demand = true;
}
void system_clock_source_osc8m_set_config(
struct system_clock_source_osc8m_config *const config);
/**
* @}
*/
/**
* \name Internal DFLL management
* @{
*/
/**
* \brief Retrieve the default configuration for DFLL
*
* Fills a configuration structure with the default configuration for a
* DFLL oscillator module:
* - Open loop mode
* - QuickLock mode enabled
* - Chill cycle enabled
* - Output frequency lock maintained during device wake-up
* - Continuous tracking of the output frequency
* - Default tracking values at the mid-points for both coarse and fine
* tracking parameters
* - Don't run in STANDBY sleep mode
* - Run only when requested by peripheral (on demand)
*
* \param[out] config Configuration structure to fill with default values
*/
static inline void system_clock_source_dfll_get_config_defaults(
struct system_clock_source_dfll_config *const config)
{
Assert(config);
config->loop_mode = SYSTEM_CLOCK_DFLL_LOOP_MODE_OPEN;
config->quick_lock = SYSTEM_CLOCK_DFLL_QUICK_LOCK_ENABLE;
config->chill_cycle = SYSTEM_CLOCK_DFLL_CHILL_CYCLE_ENABLE;
config->wakeup_lock = SYSTEM_CLOCK_DFLL_WAKEUP_LOCK_KEEP;
config->stable_tracking = SYSTEM_CLOCK_DFLL_STABLE_TRACKING_TRACK_AFTER_LOCK;
config->on_demand = true;
/* Open loop mode calibration value */
config->coarse_value = 0x1f / 4; /* Midpoint */
config->fine_value = 0xff / 4; /* Midpoint */
/* Closed loop mode */
config->coarse_max_step = 1;
config->fine_max_step = 1;
config->multiply_factor = 6; /* Multiply 8MHz by 6 to get 48MHz */
}
void system_clock_source_dfll_set_config(
struct system_clock_source_dfll_config *const config);
/**
* @}
*/
/**
* \name Clock source management
* @{
*/
enum status_code system_clock_source_write_calibration(
const enum system_clock_source system_clock_source,
const uint16_t calibration_value,
const uint8_t freq_range);
enum status_code system_clock_source_enable(
const enum system_clock_source system_clock_source);
enum status_code system_clock_source_disable(
const enum system_clock_source clk_source);
bool system_clock_source_is_ready(
const enum system_clock_source clk_source);
uint32_t system_clock_source_get_hz(
const enum system_clock_source clk_source);
/**
* @}
*/
/**
* \name Main clock management
* @{
*/
#ifdef FEATURE_SYSTEM_CLOCK_FAILURE_DETECT
/**
* \brief Enable or disable the main clock failure detection.
*
* This mechanism allows switching automatically the main clock to the safe
* RCSYS clock, when the main clock source is considered off.
*
* This may happen for instance when an external crystal is selected as the
* clock source of the main clock and the crystal dies. The mechanism is to
* detect, during a RCSYS period, at least one rising edge of the main clock.
* If no rising edge is seen the clock is considered failed.
* As soon as the detector is enabled, the clock failure detector
* CFD) will monitor the divided main clock. When a clock failure is detected,
* the main clock automatically switches to the RCSYS clock and the CFD
* interrupt is generated if enabled.
*
* \note The failure detect must be disabled if the system clock is the same or
* slower than 32kHz as it will believe the system clock has failed with
* a too-slow clock.
*
* \param[in] enable Boolean \c true to enable, \c false to disable detection
*/
static inline void system_main_clock_set_failure_detect(
const bool enable)
{
if (enable) {
PM->CTRL.reg |= PM_CTRL_CFDEN;
} else {
PM->CTRL.reg &= ~PM_CTRL_CFDEN;
}
}
#endif
/**
* \brief Set main CPU clock divider.
*
* Sets the clock divider used on the main clock to provide the CPU clock.
*
* \param[in] divider CPU clock divider to set
*/
static inline void system_cpu_clock_set_divider(
const enum system_main_clock_div divider)
{
Assert(((uint32_t)divider & PM_CPUSEL_CPUDIV_Msk) == divider);
PM->CPUSEL.reg = (uint32_t)divider;
}
/**
* \brief Retrieves the current frequency of the CPU core.
*
* Retrieves the operating frequency of the CPU core, obtained from the main
* generic clock and the set CPU bus divider.
*
* \return Current CPU frequency in Hz.
*/
static inline uint32_t system_cpu_clock_get_hz(void)
{
return (system_gclk_gen_get_hz(GCLK_GENERATOR_0) >> PM->CPUSEL.reg);
}
/**
* \brief Set APBx clock divider.
*
* Set the clock divider used on the main clock to provide the clock for the
* given APBx bus.
*
* \param[in] divider APBx bus divider to set
* \param[in] bus APBx bus to set divider for
*
* \returns Status of the clock division change operation.
*
* \retval STATUS_ERR_INVALID_ARG Invalid bus ID was given
* \retval STATUS_OK The APBx clock was set successfully
*/
static inline enum status_code system_apb_clock_set_divider(
const enum system_clock_apb_bus bus,
const enum system_main_clock_div divider)
{
switch (bus) {
case SYSTEM_CLOCK_APB_APBA:
PM->APBASEL.reg = (uint32_t)divider;
break;
case SYSTEM_CLOCK_APB_APBB:
PM->APBBSEL.reg = (uint32_t)divider;
break;
case SYSTEM_CLOCK_APB_APBC:
PM->APBCSEL.reg = (uint32_t)divider;
break;
default:
Assert(false);
return STATUS_ERR_INVALID_ARG;
}
return STATUS_OK;
}
/**
* \brief Retrieves the current frequency of a ABPx.
*
* Retrieves the operating frequency of an APBx bus, obtained from the main
* generic clock and the set APBx bus divider.
*
* \return Current APBx bus frequency in Hz.
*/
static inline uint32_t system_apb_clock_get_hz(
const enum system_clock_apb_bus bus)
{
uint16_t bus_divider = 0;
switch (bus) {
case SYSTEM_CLOCK_APB_APBA:
bus_divider = PM->APBASEL.reg;
break;
case SYSTEM_CLOCK_APB_APBB:
bus_divider = PM->APBBSEL.reg;
break;
case SYSTEM_CLOCK_APB_APBC:
bus_divider = PM->APBCSEL.reg;
break;
default:
Assert(false);
return 0;
}
return (system_gclk_gen_get_hz(GCLK_GENERATOR_0) >> bus_divider);
}
/**
* @}
*/
/**
* \name Bus clock masking
* @{
*/
/**
* \brief Set bits in the clock mask for the AHB bus.
*
* This function will set bits in the clock mask for the AHB bus.
* Any bits set to 1 will enable that clock, 0 bits in the mask
* will be ignored
*
* \param[in] ahb_mask AHB clock mask to enable
*/
static inline void system_ahb_clock_set_mask(
const uint32_t ahb_mask)
{
PM->AHBMASK.reg |= ahb_mask;
}
/**
* \brief Clear bits in the clock mask for the AHB bus.
*
* This function will clear bits in the clock mask for the AHB bus.
* Any bits set to 1 will disable that clock, 0 bits in the mask
* will be ignored.
*
* \param[in] ahb_mask AHB clock mask to disable
*/
static inline void system_ahb_clock_clear_mask(
const uint32_t ahb_mask)
{
PM->AHBMASK.reg &= ~ahb_mask;
}
/**
* \brief Set bits in the clock mask for an APBx bus.
*
* This function will set bits in the clock mask for an APBx bus.
* Any bits set to 1 will enable the corresponding module clock, zero bits in
* the mask will be ignored.
*
* \param[in] mask APBx clock mask, a \c SYSTEM_CLOCK_APB_APBx constant from
* the device header files
* \param[in] bus Bus to set clock mask bits for, a mask of \c PM_APBxMASK_*
* constants from the device header files
*
* \returns Status indicating the result of the clock mask change operation.
*
* \retval STATUS_ERR_INVALID_ARG Invalid bus given
* \retval STATUS_OK The clock mask was set successfully
*/
static inline enum status_code system_apb_clock_set_mask(
const enum system_clock_apb_bus bus,
const uint32_t mask)
{
switch (bus) {
case SYSTEM_CLOCK_APB_APBA:
PM->APBAMASK.reg |= mask;
break;
case SYSTEM_CLOCK_APB_APBB:
PM->APBBMASK.reg |= mask;
break;
case SYSTEM_CLOCK_APB_APBC:
PM->APBCMASK.reg |= mask;
break;
default:
Assert(false);
return STATUS_ERR_INVALID_ARG;
}
return STATUS_OK;
}
/**
* \brief Clear bits in the clock mask for an APBx bus.
*
* This function will clear bits in the clock mask for an APBx bus.
* Any bits set to 1 will disable the corresponding module clock, zero bits in
* the mask will be ignored.
*
* \param[in] mask APBx clock mask, a \c SYSTEM_CLOCK_APB_APBx constant from
* the device header files
* \param[in] bus Bus to clear clock mask bits for
*
* \returns Status indicating the result of the clock mask change operation.
*
* \retval STATUS_ERR_INVALID_ARG Invalid bus ID was given.
* \retval STATUS_OK The clock mask was changed successfully.
*/
static inline enum status_code system_apb_clock_clear_mask(
const enum system_clock_apb_bus bus,
const uint32_t mask)
{
switch (bus) {
case SYSTEM_CLOCK_APB_APBA:
PM->APBAMASK.reg &= ~mask;
break;
case SYSTEM_CLOCK_APB_APBB:
PM->APBBMASK.reg &= ~mask;
break;
case SYSTEM_CLOCK_APB_APBC:
PM->APBCMASK.reg &= ~mask;
break;
default:
Assert(false);
return STATUS_ERR_INVALID_ARG;
}
return STATUS_OK;
}
/**
* @}
*/
#ifdef FEATURE_SYSTEM_CLOCK_DPLL
/**
* \brief Reference clock source of the DPLL module
*/
enum system_clock_source_dpll_reference_clock {
/** Select CLK_DPLL_REF0 as clock reference */
SYSTEM_CLOCK_SOURCE_DPLL_REFERENCE_CLOCK_REF0,
/** Select CLK_DPLL_REF1 as clock reference */
SYSTEM_CLOCK_SOURCE_DPLL_REFERENCE_CLOCK_REF1,
/** Select GCLK_DPLL as clock reference */
SYSTEM_CLOCK_SOURCE_DPLL_REFERENCE_CLOCK_GCLK,
};
/**
* \brief Lock time-out value of the DPLL module
*/
enum system_clock_source_dpll_lock_time {
/** Set no time-out as default */
SYSTEM_CLOCK_SOURCE_DPLL_LOCK_TIME_DEFAULT,
/** Set time-out if no lock within 8 ms */
SYSTEM_CLOCK_SOURCE_DPLL_LOCK_TIME_8MS = 0x04,
/** Set time-out if no lock within 9 ms */
SYSTEM_CLOCK_SOURCE_DPLL_LOCK_TIME_9MS,
/** Set time-out if no lock within 10 ms */
SYSTEM_CLOCK_SOURCE_DPLL_LOCK_TIME_10MS,
/** Set time-out if no lock within 11 ms */
SYSTEM_CLOCK_SOURCE_DPLL_LOCK_TIME_11MS,
};
/**
* \brief Filter type of the DPLL module
*/
enum system_clock_source_dpll_filter {
/** Default filter mode */
SYSTEM_CLOCK_SOURCE_DPLL_FILTER_DEFAULT,
/** Low bandwidth filter */
SYSTEM_CLOCK_SOURCE_DPLL_FILTER_LOW_BANDWIDTH_FILTER,
/** High bandwidth filter */
SYSTEM_CLOCK_SOURCE_DPLL_FILTER_HIGH_BANDWIDTH_FILTER,
/** High damping filter */
SYSTEM_CLOCK_SOURCE_DPLL_FILTER_HIGH_DAMPING_FILTER,
};
/**
* \brief Configuration structure for DPLL
*
* DPLL oscillator configuration structure.
*/
struct system_clock_source_dpll_config {
/** Run On Demand. If this is set the DPLL won't run
* until requested by a peripheral */
bool on_demand;
/** Keep the DPLL enabled in standby sleep mode */
bool run_in_standby;
/** Bypass lock signal */
bool lock_bypass;
/** Wake up fast. If this is set DPLL output clock is enabled after
* the startup time */
bool wake_up_fast;
/** Enable low power mode */
bool low_power_enable;
/** Output frequency of the clock */
uint32_t output_frequency;
/** Reference frequency of the clock */
uint32_t reference_frequency;
/** Devider of reference clock */
uint16_t reference_divider;
/** Filter type of the DPLL module */
enum system_clock_source_dpll_filter filter;
/** Lock time-out value of the DPLL module */
enum system_clock_source_dpll_lock_time lock_time;
/** Reference clock source of the DPLL module */
enum system_clock_source_dpll_reference_clock reference_clock;
};
/**
* \name Internal DPLL management
* @{
*/
/**
* \brief Retrieve the default configuration for DPLL
*
* Fills a configuration structure with the default configuration for a
* DPLL oscillator module:
* - Run only when requested by peripheral (on demand)
* - Don't run in STANDBY sleep mode
* - Lock bypass disabled
* - Fast wake up disabled
* - Low power mode disabled
* - Output frequency is 48MHz
* - Reference clock frequency is 32768Hz
* - Not divide reference clock
* - Select REF0 as reference clock
* - Set lock time to default mode
* - Use default filter
*
* \param[out] config Configuration structure to fill with default values
*/
static inline void system_clock_source_dpll_get_config_defaults(
struct system_clock_source_dpll_config *const config)
{
config->on_demand = true;
config->run_in_standby = false;
config->lock_bypass = false;
config->wake_up_fast = false;
config->low_power_enable = false;
config->output_frequency = 48000000;
config->reference_frequency = 32768;
config->reference_divider = 1;
config->reference_clock = SYSTEM_CLOCK_SOURCE_DPLL_REFERENCE_CLOCK_REF0;
config->lock_time = SYSTEM_CLOCK_SOURCE_DPLL_LOCK_TIME_DEFAULT;
config->filter = SYSTEM_CLOCK_SOURCE_DPLL_FILTER_DEFAULT;
};
void system_clock_source_dpll_set_config(
struct system_clock_source_dpll_config *const config);
/* @} */
#endif
/**
* \name System Clock Initialization
* @{
*/
void system_clock_init(void);
/**
* @}
*/
/**
* \name System Flash Wait States
* @{
*/
/**
* \brief Set flash controller wait states
*
* Will set the number of wait states that are used by the onboard
* flash memory. The number of wait states depend on both device
* supply voltage and CPU speed. The required number of wait states
* can be found in the electrical characteristics of the device.
*
* \param[in] wait_states Number of wait states to use for internal flash
*/
static inline void system_flash_set_waitstates(uint8_t wait_states)
{
Assert(NVMCTRL_CTRLB_RWS((uint32_t)wait_states) ==
((uint32_t)wait_states << NVMCTRL_CTRLB_RWS_Pos));
NVMCTRL->CTRLB.bit.RWS = wait_states;
}
/**
* @}
*/
/**
* @}
*/
/**
* \page asfdoc_sam0_system_clock_extra Extra Information for SYSTEM CLOCK Driver
*
* \section asfdoc_sam0_system_clock_extra_acronyms Acronyms
* Below is a table listing the acronyms used in this module, along with their
* intended meanings.
*
* <table>
* <tr>
* <th>Acronym</th>
* <th>Description</th>
* </tr>
* <tr>
* <td>DFLL</td>
* <td>Digital Frequency Locked Loop</td>
* </tr>
* <tr>
* <td>MUX</td>
* <td>Multiplexer</td>
* </tr>
* <tr>
* <td>OSC32K</td>
* <td>Internal 32KHz Oscillator</td>
* </tr>
* <tr>
* <td>OSC8M</td>
* <td>Internal 8MHz Oscillator</td>
* </tr>
* <tr>
* <td>PLL</td>
* <td>Phase Locked Loop</td>
* </tr>
* <tr>
* <td>OSC</td>
* <td>Oscillator</td>
* </tr>
* <tr>
* <td>XOSC</td>
* <td>External Oscillator</td>
* </tr>
* <tr>
* <td>XOSC32K</td>
* <td>External 32KHz Oscillator</td>
* </tr>
* <tr>
* <td>AHB</td>
* <td>Advanced High-performance Bus</td>
* </tr>
* <tr>
* <td>APB</td>
* <td>Advanced Peripheral Bus</td>
* </tr>
* <tr>
* <td>DPLL</td>
* <td>Digital Phase Locked Loop</td>
* </tr>
* </table>
*
*
* \section asfdoc_sam0_system_clock_extra_dependencies Dependencies
* This driver has the following dependencies:
*
* - None
*
*
* \section asfdoc_sam0_system_clock_extra_errata Errata
*
* - This driver implements workaround for errata 10558
*
* "Several reset values of SYSCTRL.INTFLAG are wrong (BOD and DFLL)"
* When system_init is called it will reset these interrupts flags before they are used.
* - This driver implements experimental workaround for errata 9905
*
* "The DFLL clock must be requested before being configured otherwise a
* write access to a DFLL register can freeze the device."
* This driver will enable and configure the DFLL before the ONDEMAND bit is set.
*
*
* \section asfdoc_sam0_system_clock_extra_history Module History
* An overview of the module history is presented in the table below, with
* details on the enhancements and fixes made to the module since its first
* release. The current version of this corresponds to the newest version in
* the table.
*
* <table>
* <tr>
* <th>Changelog</th>
* </tr>
* <tr>
* <td>
* \li Corrected OSC32K startup time definitions.
* \li Support locking of OSC32K and XOSC32K config register (default: false).
* \li Added DPLL support, functions added:
* \c system_clock_source_dpll_get_config_defaults() and
* \c system_clock_source_dpll_set_config().
* \li Moved gclk channel locking feature out of the config struct,
* functions added:
* \c system_gclk_chan_lock(),
* \c system_gclk_chan_is_locked(),
* \c system_gclk_chan_is_enabled() and
* \c system_gclk_gen_is_enabled().
* </td>
* </tr>
* <tr>
* <td>Fixed \c system_gclk_chan_disable() deadlocking if a channel is enabled
* and configured to a failed/not running clock generator.</td>
* </tr>
* <tr>
* <td>
* \li Changed default value for CONF_CLOCK_DFLL_ON_DEMAND from \c true to \c false.
* \li Fixed system_flash_set_waitstates() failing with an assertion
* if an odd number of wait states provided.
* </td>
* </tr>
* <tr>
* <td>
* \li Updated dfll configuration function to implement workaround for
* errata 9905 in the DFLL module.
* \li Updated \c system_clock_init() to reset interrupt flags before
* they are used, errata 10558.
* \li Fixed \c system_clock_source_get_hz() to return correcy DFLL
* frequency number.
* </td>
* </tr>
* <tr>
* <td>\li Fixed \c system_clock_source_is_ready not returning the correct
* state for \c SYSTEM_CLOCK_SOURCE_OSC8M.
* \li Renamed the various \c system_clock_source_*_get_default_config()
* functions to \c system_clock_source_*_get_config_defaults() to
* match the remainder of ASF.
* \li Added OSC8M calibration constant loading from the device signature
* row when the oscillator is initialized.
* \li Updated default configuration of the XOSC32 to disable Automatic
* Gain Control due to silicon errata.
* </td>
* </tr>
* <tr>
* <td>Initial Release</td>
* </tr>
* </table>
*/
/**
* \page asfdoc_sam0_system_clock_exqsg Examples for System Clock Driver
*
* This is a list of the available Quick Start guides (QSGs) and example
* applications for \ref asfdoc_sam0_system_clock_group. QSGs are simple
* examples with step-by-step instructions to configure and use this driver in
* a selection of use cases. Note that QSGs can be compiled as a standalone
* application or be added to the user application.
*
* - \subpage asfdoc_sam0_system_clock_basic_use_case
* - \subpage asfdoc_sam0_system_gclk_basic_use_case
*
* \page asfdoc_sam0_system_clock_document_revision_history Document Revision History
*
* <table>
* <tr>
* <th>Doc. Rev.</td>
* <th>Date</td>
* <th>Comments</td>
* </tr>
* <tr>
* <td>E</td>
* <td>04/2014</td>
* <td>Added support for SAMD10/D11.</td>
* </tr>
* <tr>
* <td>D</td>
* <td>02/2014</td>
* <td>Added support for SAMR21.</td>
* </tr>
* <tr>
* <td>C</td>
* <td>01/2014</td>
* <td>Added support for SAMD21.</td>
* </tr>
* <tr>
* <td>B</td>
* <td>06/2013</td>
* <td>Corrected documentation typos. Fixed missing steps in the Basic
* Use Case Quick Start Guide.</td>
* </tr>
* <tr>
* <td>A</td>
* <td>06/2013</td>
* <td>Initial release</td>
* </tr>
* </table>
*/
#ifdef __cplusplus
}
#endif
#endif /* SYSTEM_CLOCK_H_INCLUDED */
|
var rulesets = {
'input[type="text"]': {
blur: function(e) { if (this.value == '') this.value = this.defaultValue; },
focus: function(e) { if (this.value == this.defaultValue) this.value = ''; },
change: function(e) { if (this.value == '') this.style.border = '2px solid #f00'; else this.style.cssText = ''; },
mouseout: function(e) { this.style.backgroundColor = ''; },
mouseover: function(e) { this.style.backgroundColor = '#ecf'; }
},
'ul li:nth-of-type(even), table tr:nth-of-type(odd) td:nth-of-type(even), table tr:nth-of-type(even) td:nth-of-type(odd)': {
mouseout: function(e) { this.style.backgroundColor = ''; },
mouseover: function(e) { this.style.backgroundColor = '#fc6'; }
},
'ul li:nth-of-type(odd), table tr:nth-of-type(odd) td:nth-of-type(odd), table tr:nth-of-type(even) td:nth-of-type(even)': {
mouseout: function(e) { this.style.backgroundColor = ''; },
mouseover: function(e) { this.style.backgroundColor = '#396'; }
}
};
|
# sqlalchemy_teradata/base.py
# Copyright (C) 2015-2016 by Teradata
# <see AUTHORS file>
#
# This module is part of sqlalchemy-teradata and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
import re
from sqlalchemy import *
from sqlalchemy.sql import compiler
from sqlalchemy.engine import default
from sqlalchemy.ext.compiler import compiles
from sqlalchemy.sql.expression import ClauseElement, Executable
from sqlalchemy.schema import DDLElement
from sqlalchemy.sql import table
from sqlalchemy import types as sqltypes
from sqlalchemy.types import CHAR, DATE, DATETIME, \
BLOB, CLOB, TIMESTAMP, FLOAT, BIGINT, DECIMAL, NUMERIC, \
NCHAR, NVARCHAR, INTEGER, \
SMALLINT, TIME, TEXT, VARCHAR, REAL
AUTOCOMMIT_REGEXP = re.compile(
r'\s*(?:UPDATE|INSERT|CREATE|DELETE|DROP|ALTER|MERGE)',
re.I | re.UNICODE)
#TODO: Read this from the dbc.restrictedwordsv view
ReservedWords = set(["abort", "abortsession", "abs", "access_lock", "account",
"acos", "acosh", "add", "add_months", "admin", "after",
"aggregate","all", "alter", "amp", "and", "ansidate",
"any", "arglparen", "as", "asc", "asin", "asinh", "at",
"atan", "atan2", "atanh", "atomic", "authorization", "ave",
"average", "avg", "before", "begin" , "between", "bigint",
"binary", "blob", "both", "bt", "but", "by", "byte", "byteint",
"bytes", "call", "case", "case_n", "casespecific", "cast", "cd",
"char", "char_length", "char2hexint", "count","day", "desc", "hour",
"in", "le", "minute", "meets", "month", "order", "ordering",
"title", "value",
'user','password', "preceded", "second", "succeeds", "year", "match", "time", "timestamp"])
class TeradataExecutionContext(default.DefaultExecutionContext):
def __init__(self, dialect, connection, dbapi_connection, compiled_ddl):
super(TeradataExecutionContext, self).__init__(dialect, connection, dbapi_connection, compiled_ddl)
def should_autocommit_text(self, statement):
return AUTOCOMMIT_REGEXP.match(statement)
class TeradataIdentifierPreparer(compiler.IdentifierPreparer):
reserved_words = ReservedWords
def __init__(self, dialect, initial_quote='"', final_quote=None, escape_quote='"', omit_schema=False):
super(TeradataIdentifierPreparer, self).__init__(dialect, initial_quote, final_quote,
escape_quote, omit_schema)
# Views Recipe from: https://bitbucket.org/zzzeek/sqlalchemy/wiki/UsageRecipes/Views
class CreateView(DDLElement):
def __init__(self, name, selectable):
self.name = name
self.selectable = selectable
class DropView(DDLElement):
def __init__(self, name):
self.name = name
@compiles(CreateView)
def visit_create_view(element, compiler, **kw):
return "CREATE VIEW {} AS {}".format(element.name, compiler.sql_compiler.process(element.selectable))
@compiles(DropView)
def visit_drop_view(element, compiler, **kw):
return "DROP VIEW {}".format(element.name)
class CreateTableAs(DDLElement):
pass
@compiles(CreateTableAs)
def visit_create_table(element, table, **kw):
pass
class CreateTableQueue(DDLElement):
pass
class CreateTableGlobalTempTrace(DDLElement):
pass
class CreateErrorTable(DDLElement):
pass
class IdentityColumn(DDLElement):
pass
class CreateJoinIndex():
pass
class CreateHashIndex():
pass
|
import torchvision.datasets as datasets
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms
from torchvision.datasets import VisionDataset
import torch
import numpy as np
class MyDataset(Dataset):
def __init__(self, data, target, train, transform=None):
self.data = data
self.target = target
self.train = train
self.transform = transform
assert data.shape[0] == target.shape[0]
def __len__(self):
return self.data.shape[0]
def __getitem__(self, item):
sample = self.data[item]
if self.transform is not None:
sample = self.transform(sample)
return sample, self.target[item]
def make_dataset(data, target, train, transform=None):
return MyDataset(data, target, train, transform)
def load_dataset(dataset):
if dataset == "cifar10":
dataset_train = datasets.CIFAR10(root='datasets/' + dataset, download=True)
# dataset_train.data = torch.as_tensor(dataset_train.data).permute(0, 3, 1, 2)
dataset_train.targets = torch.as_tensor(np.array(dataset_train.targets))
dataset_test = datasets.CIFAR10(root='datasets/' + dataset, train=False, download=True)
# dataset_test.data = torch.as_tensor(dataset_test.data).permute(0, 3, 1, 2)
dataset_test.targets = torch.as_tensor(np.array(dataset_test.targets))
n_classes = 10
n_channels = 3
img_size = 32
elif dataset == "cifar100":
dataset_train = datasets.CIFAR100(root='datasets/' + dataset, download=True)
# dataset_train.data = torch.as_tensor(dataset_train.data).permute(0, 3, 1, 2)
dataset_train.targets = torch.as_tensor(np.array(dataset_train.targets))
dataset_test = datasets.CIFAR100(root='datasets/' + dataset, train=False, download=True)
# dataset_test.data = torch.as_tensor(dataset_test.data).permute(0, 3, 1, 2)
dataset_test.targets = torch.as_tensor(np.array(dataset_test.targets))
n_classes = 100
n_channels = 3
img_size = 32
elif dataset == "emnist-letter":
dataset_train = datasets.EMNIST(root='datasets/' + dataset, download=True, split="letters")
# dataset_train.data = torch.as_tensor(dataset_train.data).permute(0, 3, 1, 2)
dataset_train.targets = torch.as_tensor(np.array(dataset_train.targets)) - 1
dataset_test = datasets.EMNIST(root='datasets/' + dataset, train=False, download=True, split="letters")
# dataset_test.data = torch.as_tensor(dataset_test.data).permute(0, 3, 1, 2)
dataset_test.targets = torch.as_tensor(np.array(dataset_test.targets)) - 1
n_classes = 26
n_channels = 1
img_size = 28
elif dataset == "emnist-digit":
dataset_train = datasets.EMNIST(root='datasets/' + dataset, download=True, split="digits")
# dataset_train.data = torch.as_tensor(dataset_train.data).permute(0, 3, 1, 2)
dataset_train.targets = torch.as_tensor(np.array(dataset_train.targets))
dataset_test = datasets.EMNIST(root='datasets/' + dataset, train=False, download=True, split="digits")
# dataset_test.data = torch.as_tensor(dataset_test.data).permute(0, 3, 1, 2)
dataset_test.targets = torch.as_tensor(np.array(dataset_test.targets))
n_classes = 10
n_channels = 1
img_size = 28
else:
raise NotImplementedError
return dataset_train, dataset_test, n_classes, n_channels, img_size
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
def make_transforms(dataset, train=True, no_data_augmentation=False, is_distill=False):
if dataset == "cifar10" or dataset == "cifar100":
if train:
if not no_data_augmentation:
if not is_distill:
transform = transforms.Compose([
transforms.ToPILImage(),
transforms.RandomHorizontalFlip(),
transforms.RandomCrop(32, padding=4),
transforms.ToTensor(),
normalize,
])
else:
transform = transforms.Compose([
transforms.RandomHorizontalFlip(),
transforms.RandomCrop(32, padding=4),
transforms.ToTensor(),
normalize,
])
else:
transform = transforms.Compose([
transforms.ToTensor(),
normalize,
])
else:
transform = transforms.Compose([
transforms.ToTensor(),
normalize,
])
elif dataset == "emnist-digit" or dataset == "emnist-letter":
transform = transforms.Compose([
transforms.ToTensor()]
)
else:
raise NotImplementedError
return transform
def make_dataloader(args, type, dataset: MyDataset):
if type == "train":
dataloader = DataLoader(dataset, batch_size=args.local_dataloader_batch_size, shuffle=True, num_workers=0)
elif type == "test":
dataloader = DataLoader(dataset, batch_size=args.test_dataloader_batch_size, shuffle=False, num_workers=4)
elif type == "distill":
dataloader = DataLoader(dataset, batch_size=args.distill_dataloader_batch_size, shuffle=True, num_workers=4)
return dataloader
def split_dataset(args, dataset: VisionDataset, transform=None):
data = dataset.data
data = data.numpy() if torch.is_tensor(data) is True else data
label = dataset.targets
n_workers = args.n_workers
homo_ratio = args.homo_ratio
# centralized case, no need to split
if n_workers == 1:
return [make_dataset(data, label, dataset.train, transform)]
if args.heterogeneity == 'mix':
n_data = data.shape[0]
n_homo_data = int(n_data * homo_ratio)
n_homo_data = n_homo_data - n_homo_data % n_workers
n_data = n_data - n_data % n_workers
if n_homo_data > 0:
data_homo, label_homo = data[0:n_homo_data], label[0:n_homo_data]
data_homo_list, label_homo_list = np.split(data_homo, n_workers), label_homo.chunk(n_workers)
if n_homo_data < n_data:
data_hetero, label_hetero = data[n_homo_data:n_data], label[n_homo_data:n_data]
label_hetero_sorted, index = torch.sort(label_hetero)
data_hetero_sorted = data_hetero[index]
data_hetero_list, label_hetero_list = np.split(data_hetero_sorted, n_workers), label_hetero_sorted.chunk(
n_workers)
if 0 < n_homo_data < n_data:
data_list = [np.concatenate([data_homo, data_hetero], axis=0) for data_homo, data_hetero in
zip(data_homo_list, data_hetero_list)]
label_list = [torch.cat([label_homo, label_hetero], dim=0) for label_homo, label_hetero in
zip(label_homo_list, label_hetero_list)]
elif n_homo_data < n_data:
data_list = data_hetero_list
label_list = label_hetero_list
else:
data_list = data_homo_list
label_list = label_homo_list
elif args.heterogeneity == 'dir':
n_cls = (int(torch.max(label))) + 1
n_data = data.shape[0]
cls_priors = np.random.dirichlet(alpha=[args.dir_level] * n_cls, size=n_workers)
# cls_priors_init = cls_priors # Used for verification
prior_cumsum = np.cumsum(cls_priors, axis=1)
idx_list = [np.where(label == i)[0] for i in range(n_cls)]
cls_amount = [len(idx_list[i]) for i in range(n_cls)]
idx_worker = [[None] for i in range(n_workers)]
for curr_worker in range(n_workers):
for data_sample in range(n_data // n_workers):
curr_prior = prior_cumsum[curr_worker]
cls_label = np.argmax(np.random.uniform() <= curr_prior)
while cls_amount[cls_label] <= 0:
# If you run out of samples
correction = [[1 - cls_priors[i, cls_label]] * n_cls for i in range(n_workers)]
cls_priors = cls_priors / correction
cls_priors[:, cls_label] = [0] * n_workers
curr_prior = np.cumsum(cls_priors, axis=1)
cls_label = np.argmax(np.random.uniform() <= curr_prior)
cls_amount[cls_label] -= 1
if idx_worker[curr_worker] == [None]:
idx_worker[curr_worker] = [idx_list[cls_label][0]]
else:
idx_worker[curr_worker] = idx_worker[curr_worker] + [idx_list[cls_label][0]]
idx_list[cls_label] = idx_list[cls_label][1::]
data_list = [data[idx_worker[curr_worker]] for curr_worker in range(n_workers)]
label_list = [label[idx_worker[curr_worker]] for curr_worker in range(n_workers)]
else:
raise ValueError("heterogeneity should be mix or dir")
return [make_dataset(_data, _label, dataset.train, transform) for _data, _label in zip(data_list, label_list)]
|
# -*- coding:utf-8 -*-
__author__ = 'Edwin.Wu'
__date__ = '2017/3/11 00:19'
import xadmin
from .models import CityDict, CourseOrg, Teacher
class CityDictAdmin(object):
list_display = ['name', 'desc', 'add_time']
search_fields = ['name', 'desc', 'add_time']
list_filter = ['name', 'desc', 'add_time']
class CourseOrgAdmin(object):
list_display = ['name', 'desc', 'click_nums', 'fav_nums', 'image', 'address', 'city', 'add_time']
search_fields = ['name', 'desc', 'click_nums', 'fav_nums', 'image', 'address', 'city', 'add_time']
list_filter = ['name', 'desc', 'click_nums', 'fav_nums', 'image', 'address', 'city', 'add_time']
class TeacherAdmin(object):
list_display = ['org', 'name', 'work_years', 'work_company', 'work_position', 'points', 'click_nums', 'fav_nums',
'add_time']
search_fields = ['org', 'name', 'work_years', 'work_company', 'work_position', 'points', 'click_nums', 'fav_nums',
'add_time']
list_filter = ['org', 'name', 'work_years', 'work_company', 'work_position', 'points', 'click_nums', 'fav_nums',
'add_time']
xadmin.site.register(CityDict, CityDictAdmin)
xadmin.site.register(CourseOrg, CourseOrgAdmin)
xadmin.site.register(Teacher, TeacherAdmin)
|
// http://mrl.nyu.edu/~perlin/noise/
var ImprovedNoise = function () {
var p = [ 151, 160, 137, 91, 90, 15, 131, 13, 201, 95, 96, 53, 194, 233, 7, 225, 140, 36, 103, 30, 69, 142, 8, 99, 37, 240, 21, 10,
23, 190, 6, 148, 247, 120, 234, 75, 0, 26, 197, 62, 94, 252, 219, 203, 117, 35, 11, 32, 57, 177, 33, 88, 237, 149, 56, 87,
174, 20, 125, 136, 171, 168, 68, 175, 74, 165, 71, 134, 139, 48, 27, 166, 77, 146, 158, 231, 83, 111, 229, 122, 60, 211,
133, 230, 220, 105, 92, 41, 55, 46, 245, 40, 244, 102, 143, 54, 65, 25, 63, 161, 1, 216, 80, 73, 209, 76, 132, 187, 208,
89, 18, 169, 200, 196, 135, 130, 116, 188, 159, 86, 164, 100, 109, 198, 173, 186, 3, 64, 52, 217, 226, 250, 124, 123, 5,
202, 38, 147, 118, 126, 255, 82, 85, 212, 207, 206, 59, 227, 47, 16, 58, 17, 182, 189, 28, 42, 223, 183, 170, 213, 119,
248, 152, 2, 44, 154, 163, 70, 221, 153, 101, 155, 167, 43, 172, 9, 129, 22, 39, 253, 19, 98, 108, 110, 79, 113, 224, 232,
178, 185, 112, 104, 218, 246, 97, 228, 251, 34, 242, 193, 238, 210, 144, 12, 191, 179, 162, 241, 81, 51, 145, 235, 249,
14, 239, 107, 49, 192, 214, 31, 181, 199, 106, 157, 184, 84, 204, 176, 115, 121, 50, 45, 127, 4, 150, 254, 138, 236, 205,
93, 222, 114, 67, 29, 24, 72, 243, 141, 128, 195, 78, 66, 215, 61, 156, 180 ];
for ( var i = 0; i < 256; i ++ ) {
p[ 256 + i ] = p[ i ];
}
function fade( t ) {
return t * t * t * ( t * ( t * 6 - 15 ) + 10 );
}
function lerp( t, a, b ) {
return a + t * ( b - a );
}
function grad( hash, x, y, z ) {
var h = hash & 15;
var u = h < 8 ? x : y, v = h < 4 ? y : h == 12 || h == 14 ? x : z;
return ( ( h & 1 ) == 0 ? u : - u ) + ( ( h & 2 ) == 0 ? v : - v );
}
return {
noise: function ( x, y, z ) {
var floorX = Math.floor( x ), floorY = Math.floor( y ), floorZ = Math.floor( z );
var X = floorX & 255, Y = floorY & 255, Z = floorZ & 255;
x -= floorX;
y -= floorY;
z -= floorZ;
var xMinus1 = x - 1, yMinus1 = y - 1, zMinus1 = z - 1;
var u = fade( x ), v = fade( y ), w = fade( z );
var A = p[ X ] + Y, AA = p[ A ] + Z, AB = p[ A + 1 ] + Z, B = p[ X + 1 ] + Y, BA = p[ B ] + Z, BB = p[ B + 1 ] + Z;
return lerp( w, lerp( v, lerp( u, grad( p[ AA ], x, y, z ),
grad( p[ BA ], xMinus1, y, z ) ),
lerp( u, grad( p[ AB ], x, yMinus1, z ),
grad( p[ BB ], xMinus1, yMinus1, z ) ) ),
lerp( v, lerp( u, grad( p[ AA + 1 ], x, y, zMinus1 ),
grad( p[ BA + 1 ], xMinus1, y, z - 1 ) ),
lerp( u, grad( p[ AB + 1 ], x, yMinus1, zMinus1 ),
grad( p[ BB + 1 ], xMinus1, yMinus1, zMinus1 ) ) ) );
}
};
};
export { ImprovedNoise };
|
///////////////////////////////////////////////////////////////////////////
// Copyright © 2014 Esri. All Rights Reserved.
//
// Licensed under the Apache License Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
///////////////////////////////////////////////////////////////////////////
define([
'dojo/_base/declare',
'dojo/_base/array',
'dojo/_base/lang',
'dojo/Deferred',
'dojo/json',
'dojo/aspect',
'dojo/topic',
'./LayerInfo',
'esri/request',
'esri/lang',
'./LayerInfoFactory'
], function(declare, array, lang, Deferred, Json, aspect, topic, LayerInfo,
esriRequest, esriLang, LayerInfoFactory) {
return declare(LayerInfo, {
_legendInfo: null,
_sublayerIdent: null,
controlPopupInfo: null,
_jsapiLayerInfos: null,
constructor: function(operLayer, map, options) {
this._layerOptions = options.layerOptions ? options.layerOptions: null;
//other initial methods depend on '_jsapiLayerInfos', so must init first.
this._initJsapiLayerInfos();
/*jshint unused: false*/
this.initSubLayerVisible();
// init _subLayerIdent.
this._sublayerIdent = {
definitions: [],
empty: true,
defLoad: new Deferred()
};
this._sublayerIdent.definitions[this.layerObject.layerInfos.length - 1] = null;
// init control popup
this._initControlPopup();
},
_initJsapiLayerInfos: function() {
var subLayersSettingArray = this.originOperLayer.layers;
var webmapLayerInfos = array.filter(subLayersSettingArray, function(subLayersSetting) {
return (esriLang.isDefined(subLayersSetting.id) &&
esriLang.isDefined(subLayersSetting.name) &&
esriLang.isDefined(subLayersSetting.minScale) &&
esriLang.isDefined(subLayersSetting.maxScale) &&
esriLang.isDefined(subLayersSetting.parentLayerId) &&
//esriLang.isDefined(subLayersSetting.subLayerIds) &&
esriLang.isDefined(subLayersSetting.defaultVisibility));
});
if(webmapLayerInfos.length > 0) {
this._jsapiLayerInfos = webmapLayerInfos;
} else {
this._jsapiLayerInfos = this.layerObject.layerInfos;
}
},
_idIsInJsapiLayerInfos: function(subId) {
// var filterdLayerInfos = array.filter(this._jsapiLayerInfos, function(jsapiLayerInfo) {
// return jsapiLayerInfo.id === subId;
// });
// return filterdLayerInfos.length > 0 ? true : false;
var jsapiLayerInfo = this._getJsapiLayerInfoById(subId);
return jsapiLayerInfo === null ? false : true;
},
_getJsapiLayerInfoById: function(subId) {
var jsapiLayerInfo = null;
for(var i = 0; i < this._jsapiLayerInfos.length; i++) {
if(this._jsapiLayerInfos[i].id === subId) {
jsapiLayerInfo = this._jsapiLayerInfos[i];
}
}
return jsapiLayerInfo;
},
_initControlPopup: function() {
this.controlPopupInfo = {
enablePopup: undefined,
infoTemplates: lang.clone(this.layerObject.infoTemplates)
};
// backup infoTemplates to layer.
this.layerObject._infoTemplates = lang.clone(this.layerObject.infoTemplates);
aspect.after(this.layerObject, "setInfoTemplates", lang.hitch(this, function(){
this.layerObject._infoTemplates = lang.clone(this.layerObject.infoTemplates);
this.controlPopupInfo.infoTemplates = lang.clone(this.layerObject.infoTemplates);
this.traversal(function(layerInfo) {
if(layerInfo._afterSetInfoTemplates) {
layerInfo._afterSetInfoTemplates();
}
});
}));
},
initSubLayerVisible: function(visibleLayersParam) {
// this.subLayerVisible = [];
// //the subLayerVisible has the same index width layerInfos.
// this.subLayerVisible[this.layerObject.layerInfos.length - 1] = 0;
this.subLayerVisible = {};
for (var i = 0; i < this.layerObject.layerInfos.length; i++) {
this.subLayerVisible[this.layerObject.layerInfos[i].id] = 0;
}
if(visibleLayersParam) {
array.forEach(visibleLayersParam, function(visibleLayer) {
this.subLayerVisible[visibleLayer]++;
}, this);
} else if (this.originOperLayer.visibleLayers) {
// according to webmap info
array.forEach(this.originOperLayer.visibleLayers, function(visibleLayer) {
this.subLayerVisible[visibleLayer]++;
}, this);
} else {
// according to mapserver info
array.forEach(this.layerObject.layerInfos, function(layerInfo) {
if (layerInfo.defaultVisibility) {
this.subLayerVisible[layerInfo.id]++;
}
}, this);
}
},
getExtent: function() {
var extent = this.originOperLayer.layerObject.fullExtent ||
this.originOperLayer.layerObject.initialExtent;
return this._convertGeometryToMapSpatialRef(extent);
},
_resetLayerObjectVisiblityBeforeInit: function() {
if(this._layerOptions) {
//reste visibility fo parent layer.
if(this._layerOption) {
this.layerObject.setVisibility(this._layerOption.visible);
}
//reste visibles of sublayer.
var visibleLayers = [];
var visibleLayersForUpdateSubLayerVisible;
var visibleLayersForSetVisibleLayers;
var convertVisibleLayersResult;
var haseConfiguredInLayerOptionsflag = false;
array.forEach(this._jsapiLayerInfos, function(jsapiLayerInfo) {
var absoluteSublayerId = this.id + '_' + jsapiLayerInfo.id;
if(esriLang.isDefined(this._layerOptions[absoluteSublayerId]) &&
!this._isGroupLayerBySubId(jsapiLayerInfo.id)) {
haseConfiguredInLayerOptionsflag = true;
if(this._layerOptions[absoluteSublayerId].visible) {
visibleLayers.push(jsapiLayerInfo.id);
}
}
}, this);
convertVisibleLayersResult =
this._converVisibleLayers(visibleLayers);
visibleLayersForUpdateSubLayerVisible =
convertVisibleLayersResult.visibleLayersForUpdateSubLayerVisible;
visibleLayersForSetVisibleLayers =
convertVisibleLayersResult.visibleLayersForSetVisibleLayers;
if((visibleLayersForSetVisibleLayers.length > 0 || haseConfiguredInLayerOptionsflag) &&
this.layerObject.setVisibleLayers) {
// init sublayerVisible and call initVisible for all subLayers.
this.initSubLayerVisible(visibleLayersForUpdateSubLayerVisible);
this.traversal(function(layerInfo) {
layerInfo.initVisible();
});
//recall setVisibleLayers()
this.layerObject.setVisibleLayers(visibleLayersForSetVisibleLayers);
}
}
},
initVisible: function() {
this._visible = this.originOperLayer.layerObject.visible;
},
_setTopLayerVisible: function(visible) {
this.originOperLayer.layerObject.setVisibility(visible);
this._visible = visible;
},
setSubLayerVisible: function(layersVisible) {
// summary:
// set seblayer visible
// description:
// paramerter:
// {subLayerId: visble}
var ary = [-1, -1, -1], index;
var visibleLayers = array.filter(this.originOperLayer.layerObject.visibleLayers,
function(visibleSubId) {
return visibleSubId !== -1;
});
for (var child in layersVisible) {
if(layersVisible.hasOwnProperty(child) &&
(typeof layersVisible[child] !== 'function') /*&&child !== 'config'*/) {
var visible = layersVisible[child];
var subLayerId = Number(child);
if (visible) {
index = array.indexOf(visibleLayers, subLayerId);
if (index < 0) {
visibleLayers.push(subLayerId);
}
} else {
index = array.indexOf(visibleLayers, subLayerId);
if (index >= 0) {
visibleLayers.splice(index, 1);
}
}
}
}
ary = ary.concat(visibleLayers);
this.originOperLayer.layerObject.setVisibleLayers(ary);
},
//---------------new section-----------------------------------------
obtainNewSubLayers: function() {
var newSubLayers = [];
var layer = this.originOperLayer.layerObject;
var serviceLayerType = null;
if (layer.declaredClass === 'esri.layers.ArcGISDynamicMapServiceLayer') {
serviceLayerType = "dynamic";
} else {
serviceLayerType = "tiled";
}
array.forEach(layer.layerInfos, function(layerInfo) {
var featureLayer = null;
var url = layer.url + "/" + layerInfo.id;
var featureLayerId = layer.id + "_" + layerInfo.id;
// It is a group layer.
if (layerInfo.subLayerIds && layerInfo.subLayerIds.length > 0) {
/*
newSubLayers.push({
layerObject: featureLayer,
title: layerInfo.name || layerInfo.id || " ",
id: featureLayerId || " ",
subLayers: [],
mapService: {"layerInfo": this, "subId": layerInfo.id},
selfType: 'mapservice_' + serviceLayerType + '_group'
});
*/
// it's a fake layerObject, only has a url intent to show Descriptiong in popupMenu
featureLayer = {
url: url,
empty: true
};
this._addNewSubLayer(newSubLayers,
featureLayer,
featureLayerId,
layerInfo,
serviceLayerType + '_group');
} else {
//featureLayer = new FeatureLayer(url);
// featureLayer.on('load', lang.hitch(this,
// this._addNewSubLayer,
// newSubLayers, index,
// featureLayerId,
// layerInfo.id,
// deferreds[index],
// url,
// layerInfo));
// featureLayer.on('error', lang.hitch(this,
// this._handleErrorSubLayer,
// newSubLayers,
// index,
// featureLayerId,
// layerInfo.id,
// deferreds[index],
// url,
// layerInfo));
featureLayer = {
url: url,
empty: true
};
this._addNewSubLayer(newSubLayers,
featureLayer,
featureLayerId,
layerInfo,
serviceLayerType);
}
}, this);
//afer load all featureLayers.
var finalNewSubLayerInfos = [];
//reorganize newSubLayers, newSubLayers' element now is:
//{
// layerObject:
// title:
// id:
// subLayers:
//}
array.forEach(layer.layerInfos, function(layerInfo, i) {
var parentId = layerInfo.parentLayerId;
//if fetchs a FeatrueLayer error. does not add it;
if (parentId !== -1 && this._idIsInJsapiLayerInfos(layerInfo.id)
/*&& !newSubLayers[layerInfo.id].error && !newSubLayers[parentId].error*/ ) { //****
var parentLayer = getNewSubLayerBySubId(newSubLayers, parentId);
if(parentLayer) {
parentLayer.subLayers.push(newSubLayers[i]);
}
}
}, this);
array.forEach(layer.layerInfos, function(layerInfo, i) {
var subLayerInfo;
//if fetchs a FeatrueLayer error. does not add it;
if (layerInfo.parentLayerId === -1 && this._idIsInJsapiLayerInfos(layerInfo.id)
/*&& !newSubLayers[layerInfo.id].error*/ ) {
subLayerInfo = LayerInfoFactory.getInstance().create(newSubLayers[i]);
finalNewSubLayerInfos.push(subLayerInfo);
subLayerInfo.init();
}
}, this);
return finalNewSubLayerInfos;
function getNewSubLayerBySubId(newSubLayers, subId) {
var newSubLayer = null;
for(var i = 0; i < newSubLayers.length; i++) {
if(newSubLayers[i].mapService.subId === subId) {
newSubLayer = newSubLayers[i];
break;
}
}
return newSubLayer;
}
},
_addNewSubLayer: function(newSubLayers,
featureLayer,
featureLayerId,
layerInfo,
serviceLayerType) {
newSubLayers.push({
layerObject: featureLayer,
title: layerInfo.name || layerInfo.id || " ",
id: featureLayerId || " ",
subLayers: [],
mapService: {
"layerInfo": this,
"subId": layerInfo.id
},
selfType: 'mapservice_' + serviceLayerType,
parentLayerInfo: this
});
},
_handleErrorSubLayer: function(newSubLayers, index, layerId, subId, url, layerInfo) {
/*jshint unused: false*/
//newSubLayers[index] = {error: true};
//var layer = newSubLayers[index];
newSubLayers[index] = {
layerObject: null,
title: layerInfo.name || layerInfo.id || " ",
id: layerId || " ",
subLayers: [],
mapService: {
"layerInfo": this,
"subId": subId
}
};
},
getOpacity: function() {
if (this.layerObject.opacity) {
return this.layerObject.opacity;
} else {
return 1;
}
},
setOpacity: function(opacity) {
if (this.layerObject.setOpacity) {
this.layerObject.setOpacity(opacity);
}
},
getLegendInfo: function(portalUrl) {
var def = new Deferred();
if (!this._legendInfo) {
this._legendRequest(portalUrl).then(lang.hitch(this, function(results) {
this._legendInfo = results.layers;
def.resolve(this._legendInfo);
}), function() {
def.reject();
});
} else {
def.resolve(this._legendInfo);
}
return def;
},
// about legend.
_legendRequest: function(portalUrl) {
if (this.layerObject.version >= 10.01) {
return this._legendRequestServer();
} else {
return this._legendRequestTools(portalUrl);
}
},
_legendRequestServer: function() {
var url = this.layerObject.url + "/legend";
var params = {};
params.f = "json";
if (this.layerObject._params.dynamicLayers) {
params.dynamicLayers = Json.stringify(this._createDynamicLayers(this.layerObject));
if (params.dynamicLayers === "[{}]") {
params.dynamicLayers = "[]";
}
}
var request = esriRequest({
url: url,
content: params,
handleAs: 'json',
callbackParamName: 'callback'
});
return request;
},
_legendRequestTools: function(portalUrl) {
var url = portalUrl + "sharing/tools/legend?soapUrl=" + this.layerObject.url;
var request = esriRequest({
url: url,
content: {
f: 'json'
},
handleAs: 'json',
callbackParamName: 'callback'
});
return request;
},
_createDynamicLayers: function(layer) {
var dynLayerObjs = [],
dynLayerObj,
infos = layer.dynamicLayerInfos || layer.layerInfos;
array.forEach(infos, function(info) {
dynLayerObj = {
id: info.id
};
dynLayerObj.source = info.source && info.source.toJson();
var definitionExpression;
if (layer.layerDefinitions && layer.layerDefinitions[info.id]) {
definitionExpression = layer.layerDefinitions[info.id];
}
if (definitionExpression) {
dynLayerObj.definitionExpression = definitionExpression;
}
var layerDrawingOptions;
if (layer.layerDrawingOptions && layer.layerDrawingOptions[info.id]) {
layerDrawingOptions = layer.layerDrawingOptions[info.id];
}
if (layerDrawingOptions) {
dynLayerObj.drawingInfo = layerDrawingOptions.toJson();
}
dynLayerObj.minScale = info.minScale || 0;
dynLayerObj.maxScale = info.maxScale || 0;
dynLayerObjs.push(dynLayerObj);
});
return dynLayerObjs;
},
// about layer definition
_getLayerDefinition: function() {
var def = new Deferred();
var url = this.layerObject.url;
this._request(url).then(lang.hitch(this, function(result) {
def.resolve(result);
}), function(err) {
console.error(err.message || err);
def.resolve(null);
});
return def;
},
_getSublayerDefinition: function(subId) {
var def;
if (this._sublayerIdent.definitions[subId]) {
def = new Deferred();
def.resolve(this._sublayerIdent.definitions[subId]);
} else {
def = this._layerAndTableRequest(subId);
}
return def;
},
_layerAndTableRequest: function(subId) {
if (this.layerObject.version >= 10.11) {
return this._allLayerAndTableServer(subId);
} else {
return this._allLayerAndTable(subId);
}
},
// about all layer and table
_allLayerAndTableServer: function(subId) {
var def = new Deferred();
var url = this.layerObject.url + '/layers';
if(this._sublayerIdent.empty) {
this._sublayerIdent.empty = false;
this._request(url).then(lang.hitch(this, function(results) {
this._sublayerIdent.definitions = results.layers;
this._sublayerIdent.defLoad.resolve();
def.resolve(this._sublayerIdent.definitions[subId]);
}), lang.hitch(this, function(err) {
console.error(err.message || err);
this._sublayerIdent.defLoad.reject();
this._sublayerIdent.defLoad = new Deferred();
this._sublayerIdent.empty = true;
def.resolve(null);
}));
} else {
this._sublayerIdent.defLoad.then(lang.hitch(this, function() {
def.resolve(this._sublayerIdent.definitions[subId]);
}), function(err) {
console.error(err.message || err);
def.resolve(null);
});
}
return def;
},
_allLayerAndTable: function(subId) {
var def = new Deferred();
var url = this.layerObject.url + '/' + subId;
this._request(url).then(lang.hitch(this, function(result) {
this._sublayerIdent.definitions[subId] = result;
def.resolve(result);
}), function(err) {
console.error(err.message || err);
def.resolve(null);
});
return def;
},
_request: function(url) {
var request = esriRequest({
url: url,
content: {
f: 'json'
},
handleAs: 'json',
callbackParamName: 'callback'
});
return request;
},
_getSublayerSettingOfWebmap: function(subId) {
// summary:
// get webmap setting for sublayer of mapservice layer;
// description:
// return an object like:{
// id: 2,
// showLegend: false,
// popupInfo: {}
// }
// return null if sublayer has not been configured.
var subLayersSettingArray = this.originOperLayer.layers;
var subLayerSettingArray = array.filter(subLayersSettingArray, function(layerData) {
return layerData.id === subId;
});
return subLayerSettingArray.length === 1 ? subLayerSettingArray[0] : null;
},
_getSublayerShowLegendOfWebmap: function(subId) {
var subLayerSetting = this._getSublayerSettingOfWebmap(subId);
if(subLayerSetting) {
return subLayerSetting.showLegend !== undefined ? subLayerSetting.showLegend : true;
} else {
// default value is true
return true;
}
},
_isGroupLayerBySubId: function(subId) {
var jsapiLayerInfo = this.layerObject.layerInfos[subId];
if(jsapiLayerInfo.subLayerIds && jsapiLayerInfo.subLayerIds.length > 0) {
return true;
} else {
return false;
}
},
_subLayerVisibleChanged: function() {
var changedLayerInfos = [];
this.traversal(function(layerInfo) {
changedLayerInfos.push(layerInfo);
});
topic.publish('layerInfos/layerInfo/visibleChanged', changedLayerInfos);
},
/****************
* Event
***************/
_bindEvent: function() {
this.inherited(arguments);
if(this.layerObject && !this.layerObject.empty) {
this.layerObject.on('visible-layers-change',
lang.hitch(this, this._onVisibleLayersChanged));
}
},
_onVisibleLayersChanged: function(event) {
var visibleLayers = event.visibleLayers;
if(visibleLayers[0] === -1 &&
visibleLayers[1] === -1 &&
visibleLayers[2] === -1) {
this._subLayerVisibleChanged();
this._isShowInMapChanged2();
return;
}
var tempVisibleLayers = visibleLayers;
var convertVisibleLayersResult;
var visibleLayersForUpdateSubLayerVisible = [];
var visibleLayersForSetVisibleLayers = [-1, -1, -1];
// array.forEach(visibleLayers, function(subLayerIndex) {
// var layerInfo = this.findLayerInfoById(this.id + '_' + subLayerIndex);
// if(this._isGroupLayerBySubId(subLayerIndex)) {
// //add all subLayer of group layer.
// layerInfo.traversal(function(subLayerInfo) {
// tempVisibleLayers.push(subLayerInfo.originOperLayer.mapService.subId);
// });
// } else {
// // add all parent layers of no-group layer.
// while(layerInfo &&
// layerInfo.originOperLayer.mapService) {
// tempVisibleLayers.push(layerInfo.originOperLayer.mapService.subId);
// layerInfo = layerInfo.parentLayerInfo;
// }
// }
// }, this);
// // remove repetitions.
// array.forEach(tempVisibleLayers, function(subLayerIndex) {
// if(visibleLayersForUpdateSubLayerVisible.indexOf(subLayerIndex) < 0) {
// visibleLayersForUpdateSubLayerVisible.push(subLayerIndex);
// }
// }, this);
// // remove group layer
// array.forEach(visibleLayersForUpdateSubLayerVisible, function(subLayerIndex) {
// if(!this._isGroupLayerBySubId(subLayerIndex)) {
// visibleLayersForSetVisibleLayers.push(subLayerIndex);
// }
// }, this);
convertVisibleLayersResult = this._converVisibleLayers(tempVisibleLayers);
visibleLayersForUpdateSubLayerVisible =
convertVisibleLayersResult.visibleLayersForUpdateSubLayerVisible;
visibleLayersForSetVisibleLayers =
visibleLayersForSetVisibleLayers
.concat(convertVisibleLayersResult.visibleLayersForSetVisibleLayers);
// init sublayerVisible and call initVisible for all subLayers.
this.initSubLayerVisible(visibleLayersForUpdateSubLayerVisible);
this.traversal(function(layerInfo) {
layerInfo.initVisible();
});
//recall setVisibleLayers()
this.layerObject.setVisibleLayers(visibleLayersForSetVisibleLayers);
},
_converVisibleLayers: function(visibleLayers) {
var result = {
visibleLayersForUpdateSubLayerVisible: [],
visibleLayersForSetVisibleLayers: []
};
var tempVisibleLayers = visibleLayers;
array.forEach(visibleLayers, function(subLayerIndex) {
var layerInfo = this.findLayerInfoById(this.id + '_' + subLayerIndex);
if(this._isGroupLayerBySubId(subLayerIndex)) {
//add all subLayer of group layer.
layerInfo.traversal(function(subLayerInfo) {
tempVisibleLayers.push(subLayerInfo.originOperLayer.mapService.subId);
});
} else {
// add all parent layers of no-group layer.
while(layerInfo &&
layerInfo.originOperLayer.mapService) {
tempVisibleLayers.push(layerInfo.originOperLayer.mapService.subId);
layerInfo = layerInfo.parentLayerInfo;
}
}
}, this);
// remove repetitions.
array.forEach(tempVisibleLayers, function(subLayerIndex) {
if(result.visibleLayersForUpdateSubLayerVisible.indexOf(subLayerIndex) < 0) {
result.visibleLayersForUpdateSubLayerVisible.push(subLayerIndex);
}
}, this);
// remove group layer
array.forEach(result.visibleLayersForUpdateSubLayerVisible, function(subLayerIndex) {
if(!this._isGroupLayerBySubId(subLayerIndex)) {
result.visibleLayersForSetVisibleLayers.push(subLayerIndex);
}
}, this);
return result;
}
});
});
|
'use strict';
const Header = require('./header.page.js');
class EntertainmentPage extends Header {
constructor() {
super();
this['body header h1'] = element(by.css('h1'));
this['body results'] = element(by.css('#results-wrapper'));
this['path'] = 'en/entertainment.html';
}
}
const entertainment = new EntertainmentPage();
module.exports = entertainment;
|
/**
* Created by tobias on 14.10.15.
*/
$(function() {
// sets the trigger for the profile modal window
$("#showProfile").on("click", showProfile);
// set the trigger for clicking "Passwort ändern"
$("#showPW").on("click", showPW);
});
/**
* Function shows the user profile modal window and sets focus to the first input field.
*/
function showProfile() {
var modalUserElement = $("#modal-user");
// set focus on user name input field
modalUserElement.on("shown.bs.modal", function () {
$("#inputUsername").focus();
});
showModalWindow(modalUserElement);
}
/**
* Function makes input fields for password change visible/invisible (toggle).
*/
function showPW() {
$(".invis").toggleClass("vis");
}
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# (c) The James Hutton Institute 2017-2019
# (c) University of Strathclyde 2019-2020
# Author: Leighton Pritchard
#
# Contact:
# leighton.pritchard@strath.ac.uk
#
# Leighton Pritchard,
# Strathclyde Institute for Pharmacy and Biomedical Sciences,
# 161 Cathedral Street,
# Glasgow,
# G4 0RE
# Scotland,
# UK
#
# The MIT License
#
# Copyright (c) 2017-2019 The James Hutton Institute
# Copyright (c) 2019-2020 University of Strathclyde
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""Provides the anim subcommand for pyani."""
import datetime
import logging
from argparse import Namespace
from itertools import combinations
from pathlib import Path
from typing import List, NamedTuple, Tuple
from tqdm import tqdm
from pyani import (
PyaniException,
anim,
pyani_config,
pyani_jobs,
run_sge,
run_slurm,
run_multiprocessing as run_mp,
)
from pyani.pyani_files import collect_existing_output
from pyani.pyani_orm import (
Comparison,
PyaniORMException,
add_run,
add_run_genomes,
filter_existing_comparisons,
get_session,
update_comparison_matrices,
)
from pyani.pyani_tools import termcolor
# Convenience struct describing a pairwise comparison job for the SQLAlchemy
# implementation
class ComparisonJob(NamedTuple):
"""Pairwise comparison job for the SQLAlchemy implementation."""
query: str
subject: str
filtercmd: str
nucmercmd: str
outfile: Path
job: pyani_jobs.Job
# Convenience struct describing an analysis run
class RunData(NamedTuple):
"""Convenience struct describing an analysis run."""
method: str
name: str
date: datetime.datetime
cmdline: str
class ComparisonResult(NamedTuple):
"""Convenience struct for a single nucmer comparison result."""
qid: float
sid: float
aln_length: int
sim_errs: int
pid: float
qlen: int
slen: int
qcov: float
scov: float
class ProgData(NamedTuple):
"""Convenience struct for comparison program data/info."""
program: str
version: str
class ProgParams(NamedTuple):
"""Convenience struct for comparison parameters.
Use default of zero for fragsize or else db queries will not work
as SQLite/Python nulls do not match up well
"""
fragsize: str
maxmatch: bool
def subcmd_anim(args: Namespace) -> None:
"""Perform ANIm on all genome files in an input directory.
:param args: Namespace, command-line arguments
Finds ANI by the ANIm method, as described in Richter et al (2009)
Proc Natl Acad Sci USA 106: 19126-19131 doi:10.1073/pnas.0906412106.
All FASTA format files (selected by suffix) in the input directory
are compared against each other, pairwise, using NUCmer (whose path must
be provided).
For each pairwise comparison, the NUCmer .delta file output is parsed to
obtain an alignment length and similarity error count for every unique
region alignment between the two organisms, as represented by
sequences in the FASTA files. These are processed to calculated aligned
sequence lengths, average nucleotide identity (ANI) percentages, coverage
(aligned percentage of whole genome - forward direction), and similarity
error count for each pairwise comparison.
The calculated values are deposited in the SQLite3 database being used for
the analysis.
For each pairwise comparison the NUCmer output is stored in the output
directory for long enough to extract summary information, but for each run
the output is gzip compressed. Once all runs are complete, the outputs
for each comparison are concatenated into a single gzip archive.
"""
# Create logger
logger = logging.getLogger(__name__)
# Announce the analysis
logger.info(termcolor("Running ANIm analysis", bold=True))
# Get current nucmer version
nucmer_version = anim.get_version(args.nucmer_exe)
logger.info(termcolor("MUMMer nucmer version: %s", "cyan"), nucmer_version)
# Use the provided name or make one for the analysis
start_time = datetime.datetime.now()
name = args.name or "_".join(["ANIm", start_time.isoformat()])
logger.info(termcolor("Analysis name: %s", "cyan"), name)
# Get connection to existing database. This may or may not have data
logger.debug("Connecting to database %s", args.dbpath)
try:
session = get_session(args.dbpath)
except Exception:
logger.error(
"Could not connect to database %s (exiting)", args.dbpath, exc_info=True
)
raise SystemExit(1)
# Add information about this run to the database
logger.debug("Adding run info to database %s...", args.dbpath)
try:
run = add_run(
session,
method="ANIm",
cmdline=args.cmdline,
date=start_time,
status="started",
name=name,
)
except PyaniORMException:
logger.error(
"Could not add run %s to the database (exiting)", run, exc_info=True
)
raise SystemExit(1)
logger.debug("...added run ID: %s to the database", run)
# Identify input files for comparison, and populate the database
logger.debug("Adding genomes for run %s to database...", run)
try:
genome_ids = add_run_genomes(
session, run, args.indir, args.classes, args.labels
)
except PyaniORMException:
logger.error("Could not add genomes to database for run %s (exiting)", run)
raise SystemExit(1)
logger.debug("\t...added genome IDs: %s", genome_ids)
# Generate commandlines for NUCmer analysis and output compression
logger.info("Generating ANIm command-lines")
deltadir = args.outdir / pyani_config.ALIGNDIR["ANIm"]
logger.debug("NUCmer output will be written temporarily to %s", deltadir)
# Create output directories
logger.debug("Creating output directory %s", deltadir)
try:
deltadir.mkdir(exist_ok=True, parents=True)
except IOError:
logger.error(
"Could not create output directory %s (exiting)", deltadir, exc_info=True
)
raise SystemError(1)
# Get list of genome IDs for this analysis from the database
logger.info("Compiling genomes for comparison")
genomes = run.genomes.all()
logger.debug("Collected %s genomes for this run", len(genomes))
# Generate all pair combinations of genome IDs as a list of (Genome, Genome) tuples
logger.info(
"Compiling pairwise comparisons (this can take time for large datasets)..."
)
comparisons = list(combinations(tqdm(genomes, disable=args.disable_tqdm), 2))
logger.info("\t...total parwise comparisons to be performed: %s", len(comparisons))
# Check for existing comparisons; if one has been done (for the same
# software package, version, and setting) we add the comparison to this run,
# but remove it from the list of comparisons to be performed
logger.info("Checking database for existing comparison data...")
comparisons_to_run = filter_existing_comparisons(
session, run, comparisons, "nucmer", nucmer_version, None, args.maxmatch
)
logger.info(
"\t...after check, still need to run %s comparisons", len(comparisons_to_run)
)
# If there are no comparisons to run, update the Run matrices and exit
# from this function
if not comparisons_to_run:
logger.info(
termcolor(
"All comparison results present in database (skipping comparisons)",
"magenta",
)
)
logger.info("Updating summary matrices with existing results")
update_comparison_matrices(session, run)
return
# If we are in recovery mode, we are salvaging output from a previous
# run, and do not necessarily need to rerun all the jobs. In this case,
# we prepare a list of output files we want to recover from the results
# in the output directory.
if args.recovery:
logger.warning("Entering recovery mode")
logger.debug(
"\tIn this mode, existing comparison output from %s is reused", deltadir
)
existingfiles = collect_existing_output(deltadir, "nucmer", args)
logger.debug(
"\tIdentified %s existing output files for reuse", len(existingfiles)
)
else:
existingfiles = list()
logger.debug("\tIdentified no existing output files")
# Create list of NUCmer jobs for each comparison still to be performed
logger.info("Creating NUCmer jobs for ANIm")
joblist = generate_joblist(comparisons_to_run, existingfiles, args)
logger.debug(
"Generated %s jobs, %s comparisons", len(joblist), len(comparisons_to_run)
)
# Pass jobs to appropriate scheduler
logger.debug("Passing %s jobs to %s...", len(joblist), args.scheduler)
run_anim_jobs(joblist, args)
logger.info("...jobs complete")
# Process output and add results to database
# This requires us to drop out of threading/multiprocessing: Python's SQLite3
# interface doesn't allow sharing connections and cursors
logger.info("Adding comparison results to database...")
update_comparison_results(joblist, run, session, nucmer_version, args)
update_comparison_matrices(session, run)
logger.info("...database updated.")
def generate_joblist(
comparisons: List[Tuple], existingfiles: List[Path], args: Namespace,
) -> List[ComparisonJob]:
"""Return list of ComparisonJobs.
:param comparisons: list of (Genome, Genome) tuples
:param existingfiles: list of pre-existing nucmer output files
:param args: Namespace of command-line arguments for the run
"""
logger = logging.getLogger(__name__)
joblist = [] # will hold ComparisonJob structs
for idx, (query, subject) in enumerate(
tqdm(comparisons, disable=args.disable_tqdm)
):
ncmd, dcmd = anim.construct_nucmer_cmdline(
query.path,
subject.path,
args.outdir,
args.nucmer_exe,
args.filter_exe,
args.maxmatch,
)
logger.debug("Commands to run:\n\t%s\n\t%s", ncmd, dcmd)
outprefix = ncmd.split()[3] # prefix for NUCmer output
if args.nofilter:
outfname = Path(outprefix + ".delta")
else:
outfname = Path(outprefix + ".filter")
logger.debug("Expected output file for db: %s", outfname)
# If we're in recovery mode, we don't want to repeat a computational
# comparison that already exists, so we check whether the ultimate
# output is in the set of existing files and, if not, we add the jobs
# TODO: something faster than a list search (dict or set?)
# The comparisons collections always gets updated, so that results are
# added to the database whether they come from recovery mode or are run
# in this call of the script.
if args.recovery and outfname.name in existingfiles:
logger.debug("Recovering output from %s, not building job", outfname)
else:
logger.debug("Building job")
# Build jobs
njob = pyani_jobs.Job("%s_%06d-n" % (args.jobprefix, idx), ncmd)
fjob = pyani_jobs.Job("%s_%06d-f" % (args.jobprefix, idx), dcmd)
fjob.add_dependency(njob)
joblist.append(ComparisonJob(query, subject, dcmd, ncmd, outfname, fjob))
return joblist
def run_anim_jobs(joblist: List[ComparisonJob], args: Namespace) -> None:
"""Pass ANIm nucmer jobs to the scheduler.
:param joblist: list of ComparisonJob namedtuples
:param args: command-line arguments for the run
"""
logger = logging.getLogger(__name__)
logger.debug("Scheduler: %s", args.scheduler)
if args.scheduler == "multiprocessing":
logger.info("Running jobs with multiprocessing")
if not args.workers:
logger.debug("(using maximum number of worker threads)")
else:
logger.debug("(using %d worker threads, if available)", args.workers)
cumval = run_mp.run_dependency_graph(
[_.job for _ in joblist], workers=args.workers
)
if cumval > 0:
logger.error(
"At least one NUCmer comparison failed. Please investigate (exiting)"
)
raise PyaniException("Multiprocessing run failed in ANIm")
logger.info("Multiprocessing run completed without error")
elif args.scheduler.lower() == "sge" or args.scheduler.lower() == "slurm":
logger.info("Running jobs with ", args.scheduler)
logger.debug("Setting jobarray group size to %d", args.sgegroupsize)
logger.debug("Joblist contains %d jobs", len(joblist))
if args.scheduler.lower() == "sge":
run_sge.run_dependency_graph(
[_.job for _ in joblist],
jgprefix=args.jobprefix,
sgegroupsize=args.sgegroupsize,
sgeargs=args.sgeargs,
)
elif args.scheduler.lower() == "slurm":
run_slurm.run_dependency_graph(
[_.job for _ in joblist],
jgprefix=args.jobprefix,
sgegroupsize=args.sgegroupsize,
#sgeargs=args.sgeargs,
)
else:
logger.error(termcolor("Scheduler %s not recognised", "red"), args.scheduler)
raise SystemError(1)
def update_comparison_results(
joblist: List[ComparisonJob], run, session, nucmer_version: str, args: Namespace,
) -> None:
"""Update the Comparison table with the completed result set.
:param joblist: list of ComparisonJob namedtuples
:param run: Run ORM object for the current ANIm run
:param session: active pyanidb session via ORM
:param nucmer_version: version of nucmer used for the comparison
:param args: command-line arguments for this run
The Comparison table stores individual comparison results, one per row.
"""
logger = logging.getLogger(__name__)
# Add individual results to Comparison table
for job in tqdm(joblist, disable=args.disable_tqdm):
logger.debug("\t%s vs %s", job.query.description, job.subject.description)
aln_length, sim_errs = anim.parse_delta(job.outfile)
qcov = aln_length / job.query.length
scov = aln_length / job.subject.length
try:
pid = 1 - sim_errs / aln_length
except ZeroDivisionError: # aln_length was zero (no alignment)
pid = 0
run.comparisons.append(
Comparison(
query=job.query,
subject=job.subject,
aln_length=aln_length,
sim_errs=sim_errs,
identity=pid,
cov_query=qcov,
cov_subject=scov,
program="nucmer",
version=nucmer_version,
fragsize=None,
maxmatch=args.maxmatch,
)
)
# Populate db
logger.debug("Committing results to database")
session.commit()
|
from datetime import timedelta
from allauth.account.models import EmailAddress
from django.contrib.auth.models import User
from django.contrib.messages import constants as message_const
from django.http.response import HttpResponseRedirect
from django.test import TestCase
from django.urls import reverse
from django.utils import timezone
from django.views.generic.base import ContextMixin
from django_dynamic_fixture import get, new
from mock import patch
from readthedocs.builds.constants import LATEST
from readthedocs.builds.models import Build, Version
from readthedocs.oauth.models import RemoteRepository
from readthedocs.projects import tasks
from readthedocs.projects.exceptions import ProjectSpamError
from readthedocs.projects.models import Domain, Project
from readthedocs.projects.views.mixins import ProjectRelationMixin
from readthedocs.projects.views.private import ImportWizardView
from readthedocs.rtd_tests.base import (
MockBuildTestCase,
RequestFactoryTestMixin,
WizardTestCase,
)
@patch('readthedocs.projects.views.private.trigger_build', lambda x: None)
class TestProfileMiddleware(RequestFactoryTestMixin, TestCase):
wizard_class_slug = 'import_wizard_view'
url = '/dashboard/import/manual/'
def setUp(self):
super().setUp()
data = {
'basics': {
'name': 'foobar',
'repo': 'http://example.com/foobar',
'repo_type': 'git',
},
'extra': {
'description': 'Describe foobar',
'language': 'en',
'documentation_type': 'sphinx',
},
}
self.data = {}
for key in data:
self.data.update({('{}-{}'.format(key, k), v)
for (k, v) in list(data[key].items())})
self.data['{}-current_step'.format(self.wizard_class_slug)] = 'extra'
def test_profile_middleware_no_profile(self):
"""User without profile and isn't banned."""
req = self.request('/projects/import', method='post', data=self.data)
req.user = get(User, profile=None)
resp = ImportWizardView.as_view()(req)
self.assertEqual(resp.status_code, 302)
self.assertEqual(resp['location'], '/projects/foobar/')
@patch('readthedocs.projects.views.private.ProjectBasicsForm.clean')
def test_profile_middleware_spam(self, form):
"""User will be banned."""
form.side_effect = ProjectSpamError
req = self.request('/projects/import', method='post', data=self.data)
req.user = get(User)
resp = ImportWizardView.as_view()(req)
self.assertEqual(resp.status_code, 302)
self.assertEqual(resp['location'], '/')
self.assertTrue(req.user.profile.banned)
def test_profile_middleware_banned(self):
"""User is banned."""
req = self.request('/projects/import', method='post', data=self.data)
req.user = get(User)
req.user.profile.banned = True
req.user.profile.save()
self.assertTrue(req.user.profile.banned)
resp = ImportWizardView.as_view()(req)
self.assertEqual(resp.status_code, 302)
self.assertEqual(resp['location'], '/')
class TestBasicsForm(WizardTestCase):
wizard_class_slug = 'import_wizard_view'
wizard_class = ImportWizardView
url = '/dashboard/import/manual/'
def setUp(self):
self.user = get(User)
self.step_data['basics'] = {
'name': 'foobar',
'repo': 'http://example.com/foobar',
'repo_type': 'git',
}
def tearDown(self):
Project.objects.filter(slug='foobar').delete()
def request(self, *args, **kwargs):
kwargs['user'] = self.user
return super().request(*args, **kwargs)
def test_form_import_from_remote_repo(self):
self.client.force_login(self.user)
data = {
'name': 'pipdocs',
'repo': 'https://github.com/fail/sauce',
'repo_type': 'git',
'remote_repository': '1234',
}
resp = self.client.post(
'/dashboard/import/',
data,
)
self.assertEqual(resp.status_code, 200)
# The form is filled with the previous information
self.assertEqual(
resp.context['form'].initial,
data,
)
def test_form_pass(self):
"""Only submit the basics."""
resp = self.post_step('basics')
self.assertIsInstance(resp, HttpResponseRedirect)
self.assertEqual(resp.status_code, 302)
self.assertEqual(resp['location'], '/projects/foobar/')
proj = Project.objects.get(name='foobar')
self.assertIsNotNone(proj)
for (key, val) in list(self.step_data['basics'].items()):
self.assertEqual(getattr(proj, key), val)
self.assertEqual(proj.documentation_type, 'sphinx')
def test_remote_repository_is_added(self):
remote_repo = get(RemoteRepository, users=[self.user])
self.step_data['basics']['remote_repository'] = remote_repo.pk
resp = self.post_step('basics')
self.assertIsInstance(resp, HttpResponseRedirect)
self.assertEqual(resp.status_code, 302)
self.assertEqual(resp['location'], '/projects/foobar/')
proj = Project.objects.get(name='foobar')
self.assertIsNotNone(proj)
self.assertEqual(proj.remote_repository, remote_repo)
def test_remote_repository_is_not_added_for_wrong_user(self):
user = get(User)
remote_repo = get(RemoteRepository, users=[user])
self.step_data['basics']['remote_repository'] = remote_repo.pk
resp = self.post_step('basics')
self.assertWizardFailure(resp, 'remote_repository')
def test_form_missing(self):
"""Submit form with missing data, expect to get failures."""
self.step_data['basics'] = {'advanced': True}
resp = self.post_step('basics')
self.assertWizardFailure(resp, 'name')
self.assertWizardFailure(resp, 'repo_type')
class TestAdvancedForm(TestBasicsForm):
def setUp(self):
super().setUp()
self.step_data['basics']['advanced'] = True
self.step_data['extra'] = {
'description': 'Describe foobar',
'language': 'en',
'documentation_type': 'sphinx',
'tags': 'foo, bar, baz',
}
def test_form_pass(self):
"""Test all forms pass validation."""
resp = self.post_step('basics')
self.assertWizardResponse(resp, 'extra')
resp = self.post_step('extra', session=list(resp._request.session.items()))
self.assertIsInstance(resp, HttpResponseRedirect)
self.assertEqual(resp.status_code, 302)
self.assertEqual(resp['location'], '/projects/foobar/')
proj = Project.objects.get(name='foobar')
self.assertIsNotNone(proj)
data = self.step_data['basics']
del data['advanced']
del self.step_data['extra']['tags']
self.assertCountEqual(
[tag.name for tag in proj.tags.all()],
['bar', 'baz', 'foo'],
)
data.update(self.step_data['extra'])
for (key, val) in list(data.items()):
self.assertEqual(getattr(proj, key), val)
def test_form_missing_extra(self):
"""Submit extra form with missing data, expect to get failures."""
# Remove extra data to trigger validation errors
self.step_data['extra'] = {}
resp = self.post_step('basics')
self.assertWizardResponse(resp, 'extra')
resp = self.post_step('extra', session=list(resp._request.session.items()))
self.assertWizardFailure(resp, 'language')
self.assertWizardFailure(resp, 'documentation_type')
def test_remote_repository_is_added(self):
remote_repo = get(RemoteRepository, users=[self.user])
self.step_data['basics']['remote_repository'] = remote_repo.pk
resp = self.post_step('basics')
self.assertWizardResponse(resp, 'extra')
resp = self.post_step('extra', session=list(resp._request.session.items()))
self.assertIsInstance(resp, HttpResponseRedirect)
self.assertEqual(resp.status_code, 302)
self.assertEqual(resp['location'], '/projects/foobar/')
proj = Project.objects.get(name='foobar')
self.assertIsNotNone(proj)
self.assertEqual(proj.remote_repository, remote_repo)
@patch(
'readthedocs.projects.views.private.ProjectExtraForm.clean_description',
create=True,
)
def test_form_spam(self, mocked_validator):
"""Don't add project on a spammy description."""
self.user.date_joined = timezone.now() - timedelta(days=365)
self.user.save()
mocked_validator.side_effect = ProjectSpamError
with self.assertRaises(Project.DoesNotExist):
proj = Project.objects.get(name='foobar')
resp = self.post_step('basics')
self.assertWizardResponse(resp, 'extra')
resp = self.post_step('extra', session=list(resp._request.session.items()))
self.assertIsInstance(resp, HttpResponseRedirect)
self.assertEqual(resp.status_code, 302)
self.assertEqual(resp['location'], '/')
with self.assertRaises(Project.DoesNotExist):
proj = Project.objects.get(name='foobar')
self.assertFalse(self.user.profile.banned)
@patch(
'readthedocs.projects.views.private.ProjectExtraForm.clean_description',
create=True,
)
def test_form_spam_ban_user(self, mocked_validator):
"""Don't add spam and ban new user."""
self.user.date_joined = timezone.now()
self.user.save()
mocked_validator.side_effect = ProjectSpamError
with self.assertRaises(Project.DoesNotExist):
proj = Project.objects.get(name='foobar')
resp = self.post_step('basics')
self.assertWizardResponse(resp, 'extra')
resp = self.post_step('extra', session=list(resp._request.session.items()))
self.assertIsInstance(resp, HttpResponseRedirect)
self.assertEqual(resp.status_code, 302)
self.assertEqual(resp['location'], '/')
with self.assertRaises(Project.DoesNotExist):
proj = Project.objects.get(name='foobar')
self.assertTrue(self.user.profile.banned)
class TestImportDemoView(MockBuildTestCase):
"""Test project import demo view."""
fixtures = ['test_data', 'eric']
def setUp(self):
self.client.login(username='eric', password='test')
def test_import_demo_pass(self):
resp = self.client.get('/dashboard/import/manual/demo/')
self.assertEqual(resp.status_code, 302)
self.assertEqual(resp['Location'], '/projects/eric-demo/')
resp_redir = self.client.get(resp['Location'])
self.assertEqual(resp_redir.status_code, 200)
messages = list(resp_redir.context['messages'])
self.assertEqual(messages[0].level, message_const.SUCCESS)
def test_import_demo_already_imported(self):
"""Import demo project multiple times, expect failure 2nd post."""
self.test_import_demo_pass()
project = Project.objects.get(slug='eric-demo')
resp = self.client.get('/dashboard/import/manual/demo/')
self.assertEqual(resp.status_code, 302)
self.assertEqual(resp['Location'], '/projects/eric-demo/')
resp_redir = self.client.get(resp['Location'])
self.assertEqual(resp_redir.status_code, 200)
messages = list(resp_redir.context['messages'])
self.assertEqual(messages[0].level, message_const.SUCCESS)
self.assertEqual(
project,
Project.objects.get(slug='eric-demo'),
)
def test_import_demo_another_user_imported(self):
"""Import demo project after another user, expect success."""
self.test_import_demo_pass()
project = Project.objects.get(slug='eric-demo')
self.client.logout()
self.client.login(username='test', password='test')
resp = self.client.get('/dashboard/import/manual/demo/')
self.assertEqual(resp.status_code, 302)
self.assertEqual(resp['Location'], '/projects/test-demo/')
resp_redir = self.client.get(resp['Location'])
self.assertEqual(resp_redir.status_code, 200)
messages = list(resp_redir.context['messages'])
self.assertEqual(messages[0].level, message_const.SUCCESS)
def test_import_demo_imported_renamed(self):
"""If the demo project is renamed, don't import another."""
self.test_import_demo_pass()
project = Project.objects.get(slug='eric-demo')
project.name = 'eric-demo-foobar'
project.save()
resp = self.client.get('/dashboard/import/manual/demo/')
self.assertEqual(resp.status_code, 302)
self.assertEqual(resp['Location'], '/projects/eric-demo/')
resp_redir = self.client.get(resp['Location'])
self.assertEqual(resp_redir.status_code, 200)
messages = list(resp_redir.context['messages'])
self.assertEqual(messages[0].level, message_const.SUCCESS)
self.assertRegex(
messages[0].message,
r'already imported',
)
self.assertEqual(
project,
Project.objects.get(slug='eric-demo'),
)
def test_import_demo_imported_duplicate(self):
"""
If a project exists with same name, expect a failure importing demo.
This should be edge case, user would have to import a project (not the
demo project), named user-demo, and then manually enter the demo import
URL, as the onboarding isn't shown when projects > 0
"""
self.test_import_demo_pass()
project = Project.objects.get(slug='eric-demo')
project.repo = 'file:///foobar'
project.save()
# Setting the primary and verified email of the test user.
user = User.objects.get(username='eric')
user_email = get(EmailAddress, user=user, primary=True, verified=True)
resp = self.client.get('/dashboard/import/manual/demo/')
self.assertEqual(resp.status_code, 302)
self.assertEqual(resp['Location'], '/dashboard/')
resp_redir = self.client.get(resp['Location'])
self.assertEqual(resp_redir.status_code, 200)
messages = list(resp_redir.context['messages'])
self.assertEqual(messages[0].level, message_const.ERROR)
self.assertRegex(
messages[0].message,
r'There was a problem',
)
self.assertEqual(
project,
Project.objects.get(slug='eric-demo'),
)
class TestPublicViews(MockBuildTestCase):
def setUp(self):
self.pip = get(Project, slug='pip')
def test_project_download_media(self):
url = reverse('project_download_media', args=[self.pip.slug, 'pdf', LATEST])
response = self.client.get(url)
self.assertEqual(response.status_code, 302)
class TestPrivateViews(MockBuildTestCase):
def setUp(self):
self.user = new(User, username='eric')
self.user.set_password('test')
self.user.save()
self.client.login(username='eric', password='test')
def test_versions_page(self):
pip = get(Project, slug='pip', users=[self.user])
pip.versions.create(verbose_name='1.0')
response = self.client.get('/projects/pip/versions/')
self.assertEqual(response.status_code, 200)
# Test if the versions page works with a version that contains a slash.
# That broke in the past, see issue #1176.
pip.versions.create(verbose_name='1.0/with-slash')
response = self.client.get('/projects/pip/versions/')
self.assertEqual(response.status_code, 200)
def test_delete_project(self):
project = get(Project, slug='pip', users=[self.user])
response = self.client.get('/dashboard/pip/delete/')
self.assertEqual(response.status_code, 200)
with patch('readthedocs.projects.models.broadcast') as broadcast:
response = self.client.post('/dashboard/pip/delete/')
self.assertEqual(response.status_code, 302)
self.assertFalse(Project.objects.filter(slug='pip').exists())
broadcast.assert_called_with(
type='app',
task=tasks.remove_dirs,
args=[(project.doc_path,)],
)
def test_delete_superproject(self):
super_proj = get(Project, slug='pip', users=[self.user])
sub_proj = get(Project, slug='test-sub-project', users=[self.user])
self.assertFalse(super_proj.subprojects.all().exists())
super_proj.add_subproject(sub_proj)
response = self.client.get('/dashboard/pip/delete/')
self.assertEqual(response.status_code, 200)
self.assertContains(
response,
'This project <a href="/dashboard/pip/subprojects/">has subprojects</a> under it. '
'Deleting this project will make them to become regular projects. '
'This will break the URLs of all its subprojects and they will be served normally as other projects.',
count=1,
html=True,
)
def test_subproject_create(self):
project = get(Project, slug='pip', users=[self.user])
subproject = get(Project, users=[self.user])
with patch('readthedocs.projects.views.private.broadcast') as broadcast:
response = self.client.post(
'/dashboard/pip/subprojects/create/',
data={'child': subproject.pk},
)
self.assertEqual(response.status_code, 302)
broadcast.assert_called_with(
type='app',
task=tasks.symlink_subproject,
args=[project.pk],
)
class TestPrivateMixins(MockBuildTestCase):
def setUp(self):
self.project = get(Project, slug='kong')
self.domain = get(Domain, project=self.project)
def test_project_relation(self):
"""Class using project relation mixin class."""
class FoobarView(ProjectRelationMixin, ContextMixin):
model = Domain
def get_project_queryset(self):
# Don't test this as a view with a request.user
return Project.objects.all()
view = FoobarView()
view.kwargs = {'project_slug': 'kong'}
self.assertEqual(view.get_project(), self.project)
self.assertEqual(view.get_queryset().first(), self.domain)
self.assertEqual(view.get_context_data()['project'], self.project)
class TestBadges(TestCase):
"""Test a static badge asset is served for each build."""
def setUp(self):
self.BADGE_PATH = 'projects/badges/%s-%s.svg'
self.project = get(Project, slug='badgey')
self.version = Version.objects.get(project=self.project)
self.badge_url = reverse('project_badge', args=[self.project.slug])
def test_unknown_badge(self):
res = self.client.get(self.badge_url, {'version': self.version.slug})
self.assertContains(res, 'unknown')
# Unknown project
unknown_project_url = reverse('project_badge', args=['fake-project'])
res = self.client.get(unknown_project_url, {'version': 'latest'})
self.assertContains(res, 'unknown')
def test_passing_badge(self):
get(Build, project=self.project, version=self.version, success=True)
res = self.client.get(self.badge_url, {'version': self.version.slug})
self.assertContains(res, 'passing')
self.assertEqual(res['Content-Type'], 'image/svg+xml')
def test_failing_badge(self):
get(Build, project=self.project, version=self.version, success=False)
res = self.client.get(self.badge_url, {'version': self.version.slug})
self.assertContains(res, 'failing')
def test_plastic_failing_badge(self):
get(Build, project=self.project, version=self.version, success=False)
res = self.client.get(self.badge_url, {'version': self.version.slug, 'style': 'plastic'})
self.assertContains(res, 'failing')
# The plastic badge has slightly more rounding
self.assertContains(res, 'rx="4"')
def test_social_passing_badge(self):
get(Build, project=self.project, version=self.version, success=True)
res = self.client.get(self.badge_url, {'version': self.version.slug, 'style': 'social'})
self.assertContains(res, 'passing')
# The social badge (but not the other badges) has this element
self.assertContains(res, 'rlink')
def test_badge_redirect(self):
# Test that a project with an underscore redirects
badge_url = reverse('project_badge', args=['project_slug'])
resp = self.client.get(badge_url, {'version': 'latest'})
self.assertEqual(resp.status_code, 302)
self.assertTrue('project-slug' in resp['location'])
class TestTags(TestCase):
def test_project_filtering_work_with_tags_with_space_in_name(self):
pip = get(Project, slug='pip')
pip.tags.add('tag with space')
response = self.client.get('/projects/tags/tag-with-space/')
self.assertContains(response, '"/projects/pip/"')
|
'''While new node types are generally created within the initializePlugin
call of a plugin, there is actually no requirement that this is the case.
Pymel therefore needs to be able to handle nodes created at any time - at least
for pymel.nodetypes.MyNodeType style access - so this plugin allows dynamic
node creation at any time, to test this.'''
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from future.utils import PY2
import pymel.api.plugins as plugins
import maya.OpenMaya as om
import maya.OpenMayaMPx as mpx
createdNodes = []
class addDynamicNodeCommand(plugins.Command):
_name = 'addDynamicNode'
@classmethod
def createSyntax(cls):
syntax = om.MSyntax()
# the node type name
syntax.addArg(om.MSyntax.kString)
return syntax
def doIt(self, args):
argParser = om.MArgParser(self.syntax(), args)
nodeTypeName = argParser.commandArgumentString(0)
print("creating node type: {}".format(nodeTypeName))
createNode(nodeTypeName)
def createNode(nodeName, plugin=None):
if PY2 and isinstance(nodeName, unicode):
nodeName = nodeName.encode('ascii')
class NewNode(plugins.DependNode):
_name = nodeName
@classmethod
def initialize(cls):
nAttr = om.MFnNumericAttribute()
cls.aFloat = nAttr.create("aFloat", "af", om.MFnNumericData.kFloat, 0.0)
cls.addAttribute(cls.aFloat)
if plugin is None:
plugin = mpx.MFnPlugin.findPlugin('dynamicNodes')
NewNode.__name__ = nodeName
NewNode.register(plugin)
createdNodes.append(NewNode)
## initialize the script plug-in
def initializePlugin(mobject):
addDynamicNodeCommand.register(mobject)
createNode('initialNode', plugin=mobject)
# uninitialize the script plug-in
def uninitializePlugin(mobject):
for nodeType in createdNodes:
nodeType.deregister(mobject)
addDynamicNodeCommand.deregister(mobject)
|
/****************************************************************************
* Copyright 2017 Gorgon Meducer (Email:embedded_zhuoran@hotmail.com) *
* *
* Licensed under the Apache License, Version 2.0 (the "License"); *
* you may not use this file except in compliance with the License. *
* You may obtain a copy of the License at *
* *
* http://www.apache.org/licenses/LICENSE-2.0 *
* *
* Unless required by applicable law or agreed to in writing, software *
* distributed under the License is distributed on an "AS IS" BASIS, *
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. *
* See the License for the specific language governing permissions and *
* limitations under the License. *
* *
****************************************************************************/
#ifndef __TELEGRAPH_ENGINE_H__
#define __TELEGRAPH_ENGINE_H__
/*============================ INCLUDES ======================================*/
#include ".\app_cfg.h"
#if USE_SERVICE_TELEGRAPH_ENGINE == ENABLED
#include "..\..\memory\block\block.h"
#include "..\..\time\multiple_delay\multiple_delay.h"
/*============================ MACROS ========================================*/
/*============================ MACROFIED FUNCTIONS ===========================*/
#define TELEGRAPH_ENGINE_CFG(__ADDR, ...) \
do { \
telegraph_engine_cfg_t tCFG = { \
__VA_ARGS__ \
}; \
\
TELEGRAPH_ENGINE.Init((__ADDR), &tCFG); \
} while(false)
/*============================ TYPES =========================================*/
declare_class(telegraph_t)
declare_class(telegraph_engine_t)
//! \name telegraph report status
//! @{
typedef enum {
TELEGRAPH_ERROR = -1, //!< error detected during checking
TELEGRAPH_RECEIVED = 0, //!< expected telegraph is received
TELEGRAPH_TIMEOUT, //!< timeout
TELEGRAPH_CANCELLED, //!< telegraph is cancelled by user
} telegraph_report_t;
//! @}
//! \name telegraph report event handler prototype (delegate)
//! \param tStatus the reason to report
//! \param ptTelegraph target telegraph
//! \retval true it's safe to free this telegraph
//! \retval false do not free the telegraph
typedef bool telegraph_handler_t (telegraph_report_t tStatus, telegraph_t *ptTelegraph);
//! \name abstruct class telegraph, user telegraph should inherit from this class
//! @{
extern_class(telegraph_t,
which(implement(__single_list_node_t)),
telegraph_engine_t *ptEngine;
telegraph_handler_t *fnHandler;
multiple_delay_item_t *ptDelayItem;
uint32_t wTimeout;
block_t *ptOUTData;
block_t *ptINData;
)
end_extern_class(telegraph_t)
//! @}
typedef enum {
FRAME_UNKNOWN = -1,
FRAME_UNMATCH = 0,
FRAME_RECEIVED = 1,
} frame_parsing_report_t;
typedef frame_parsing_report_t telegraph_parser_t(
block_t **pptBlock, //! memory buffer
telegraph_t *ptItem); //! target telegraph
typedef fsm_rt_t telegraph_engine_low_level_write_io_t(telegraph_t *ptItem, void *pObj);
extern_simple_fsm(telegraph_engine_task,
def_params(
telegraph_t *ptCurrent;
)
)
//! \name telegraph engine
//! @{
extern_class(telegraph_engine_t,
which( inherit(fsm(telegraph_engine_task))
inherit(pool_t)),
struct {
telegraph_t *ptHead;
telegraph_t *ptTail;
} Listener;
struct {
telegraph_t *ptHead;
telegraph_t *ptTail;
} Transmitter;
telegraph_parser_t *fnDecoder;
multiple_delay_t *ptDelayService;
telegraph_engine_low_level_write_io_t *fnWriteIO;
void *pIOTag;
)
end_extern_class(telegraph_engine_t)
//! @}
typedef struct {
mem_block_t tTelegraphPool;
uint32_t wTelegraphSize;
telegraph_parser_t *fnDecoder;
multiple_delay_t *ptDelayService;
telegraph_engine_low_level_write_io_t *fnWriteIO;
void *pIOTag;
} telegraph_engine_cfg_t;
def_interface(i_telegraph_engine_t)
bool (*Init) ( telegraph_engine_t *ptObj,
telegraph_engine_cfg_t *ptCFG);
fsm_rt_t (*Task) ( telegraph_engine_t *ptObj);
struct {
block_t * (*Parse) ( block_t *ptBlock, telegraph_engine_t *ptObj);
} Dependent;
struct {
telegraph_t *(*New) ( telegraph_engine_t *ptObj,
telegraph_handler_t *fnHandler,
uint32_t wTimeout,
block_t *ptData);
bool (*TryToSend)( telegraph_t *ptTelegraph,
bool bPureListener);
bool (*Listen) ( telegraph_t *ptTelegraph);
struct {
struct {
block_t *(*Get)(telegraph_t *ptTelegraph);
void (*Reset)(telegraph_t *ptTelegraph);
}Input;
struct {
block_t *(*Get)(telegraph_t *ptTelegraph);
void (*Reset)(telegraph_t *ptTelegraph);
}Output;
} Data;
bool (*IsWriteOnly) (telegraph_t *ptTelegraph);
bool (*IsReadOnly) (telegraph_t *ptTelegraph);
} Telegraph;
end_def_interface(i_telegraph_engine_t)
/*============================ GLOBAL VARIABLES ==============================*/
extern const i_telegraph_engine_t TELEGRAPH_ENGINE;
/*============================ PROTOTYPES ====================================*/
#endif
#endif
/* EOF */
|
"use strict";
var __importDefault = (this && this.__importDefault) || function (mod) {
return (mod && mod.__esModule) ? mod : { "default": mod };
};
Object.defineProperty(exports, "__esModule", { value: true });
var config_1 = __importDefault(require("../../config"));
var AWS_DYNAMODB_READ_CAPACITY_UNITS = config_1.default.AWS_DYNAMODB_READ_CAPACITY_UNITS, AWS_DYNAMODB_WRITE_CAPACITY_UNITS = config_1.default.AWS_DYNAMODB_WRITE_CAPACITY_UNITS, tableNames = config_1.default.tableNames;
exports.collectionsTable = {
AttributeDefinitions: [
{
AttributeName: 'application',
AttributeType: 'S',
},
{
AttributeName: 'userCollectionVersion',
AttributeType: 'S',
},
],
KeySchema: [
{
AttributeName: 'application',
KeyType: 'HASH',
},
{
AttributeName: 'userCollectionVersion',
KeyType: 'RANGE',
},
],
ProvisionedThroughput: {
ReadCapacityUnits: AWS_DYNAMODB_READ_CAPACITY_UNITS,
WriteCapacityUnits: AWS_DYNAMODB_WRITE_CAPACITY_UNITS,
},
TableName: tableNames.collections,
StreamSpecification: {
StreamEnabled: false,
},
};
|
import os
import re
import sys
import logging
import platform
try:
os.environ['NON_PRODUCTION_CONTEXT']
except:
if platform.system() == 'Darwin':
application = r'Nuke\d+\.\d+v\d+.app'
elif platform.system() == 'Windows':
application = r'Nuke\d+\.\d+.exe'
else:
raise RuntimeError('OS {0} is not supported'.format(platform.system()))
match = re.search(application, sys.executable)
if not match:
raise RuntimeError('Import nuketemplate from within Nuke')
__version__ = '0.2.0'
__all__ = []
def create_logger():
logger = logging.getLogger(__name__)
logger.handlers = []
handler = logging.StreamHandler(stream=sys.stdout)
formatter = logging.Formatter(fmt='%(asctime)s: %(name)s: '
'%(levelname)s: %(message)s',
datefmt='%d/%m/%Y %I:%M:%S')
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.INFO)
logger.propagate = False
return logger
def import_nuke():
try:
import nuke
return nuke
except ImportError as e:
try:
os.environ['NON_PRODUCTION_CONTEXT']
except KeyError:
raise e
logger = create_logger()
|
export { default as Image } from './Image'
export { default as Logo } from './Logo'
|
$(document).ready(function(){
$("#formu-editar-grader").submit(function (e) {
e.preventDefault()
var datos = new FormData($(this)[0])
$.ajax({
url: 'ajax/ajaxGrader.php',
type: 'POST',
data: datos,
processData: false,
contentType: false,
success: function(respuesta) {
console.log(respuesta)
if (respuesta == "ok") {
Swal.fire(
'Excelente!',
'datos actualizados con exito!',
'success'
).then((result) => {
if (result.value) {
window.location = "grader"
}
})
}
}
})
})
$("body #mi_lista").on("click", ".btnEditarGrader", function(){
var idGrader = $(this).attr("idGrader")
var datos = new FormData()
datos.append("id_grader", idGrader)
datos.append("tipoOperacion", "editarGrader")
$.ajax({
url: 'ajax/ajaxGrader.php',
type: 'POST',
data: datos,
processData: false,
contentType: false,
success: function(respuesta) {
var valor = JSON.parse(respuesta)
$('#formu-editar-grader input[name="id_grader"]').val(valor.id_unidad_balanza)
$('#formu-editar-grader input[name="Cliente"]').val(valor.Cliente)
$('#formu-editar-grader input[name="Grader"]').val(valor.Grader)
if (valor.Calidad == null ){
$('#formu-editar-grader input[name="Calidad"]').val("")
$('#formu-editar-grader input[name="Calidad"]').prop('disabled', true);
}else{
$('#formu-editar-grader input[name="Calidad"]').prop('disabled', false);
$('#formu-editar-grader input[name="Calidad"]').val(valor.Calidad)
}
if (valor.Alimentacion == null ){
$('#formu-editar-grader input[name="Alimentacion"]').val("")
$('#formu-editar-grader input[name="Alimentacion"]').prop('disabled', true);
}else{
$('#formu-editar-grader input[name="Alimentacion"]').prop('disabled', false);
$('#formu-editar-grader input[name="Alimentacion"]').val(valor.Alimentacion)
}
if (valor.Aceleracion == null ){
$('#formu-editar-grader input[name="Aceleracion"]').val("")
$('#formu-editar-grader input[name="Aceleracion"]').prop('disabled', true);
}else{
$('#formu-editar-grader input[name="Aceleracion"]').prop('disabled', false);
$('#formu-editar-grader input[name="Aceleracion"]').val(valor.Aceleracion)
}
if (valor.Pesaje == null ){
$('#formu-editar-grader input[name="Pesaje"]').val("")
$('#formu-editar-grader input[name="Pesaje"]').prop('disabled', true);
}else{
$('#formu-editar-grader input[name="Pesaje"]').prop('disabled', false);
$('#formu-editar-grader input[name="Pesaje"]').val(valor.Pesaje)
}
if (valor.Descarga == null ){
$('#formu-editar-grader input[name="Descarga"]').val("")
$('#formu-editar-grader input[name="Descarga"]').prop('disabled', true);
}else{
$('#formu-editar-grader input[name="Descarga"]').prop('disabled', false);
$('#formu-editar-grader input[name="Descarga"]').val(valor.Descarga)
}
}
})
})
$("body #mi_lista").on("click", ".btnEliminarGrader", function(){
var idGrader = $(this).attr("idGrader")
var rutaImagen = $(this).attr("rutaImagen")
var datos = new FormData()
datos.append("id_grader", idGrader)
datos.append("rutaImagen", rutaImagen)
datos.append("tipoOperacion", "eliminarGrader")
Swal.fire({
title: '¿Estás seguro de eliminar?',
text: "Los cambios no son reversibles!",
icon: 'warning',
showCancelButton: true,
confirmButtonColor: '#3085d6',
cancelButtonColor: '#d33',
confirmButtonText: 'Si, Elimina!'
}).then((result) => {
if (result.value) {
$.ajax({
url: 'ajax/ajaxGrader.php',
type: 'POST',
data: datos,
processData: false,
contentType: false,
success: function(respuesta) {
if ( respuesta == "ok") {
Swal.fire(
'Eliminado!',
'Su archivo a sido eliminado.',
'success'
).then((result) => {
if (result.value) {
window.location = "grader"
}
})
}
}
})
}
})
})
})
|
# encoding: utf8
from unittest import TestCase
import alex.applications.PublicTransportInfoCS.hdc_policy as hdc_policy
from alex.utils.config import Config, as_project_path
import alex.applications.PublicTransportInfoCS.directions as directions
import alex.utils
from alex.components.dm.dddstate import DeterministicDiscriminativeDialogueState
from alex.components.slu.da import DialogueActConfusionNetwork, DialogueActItem
from alex.components.dm.ontology import Ontology
import mox
class TestPTICSHDCPolicy(TestCase):
def setUp(self):
super(TestPTICSHDCPolicy, self).setUp()
self.cfg = self._get_cfg()
self.ontology = Ontology(self.cfg['DM']['ontology'])
self.mox = mox.Mox()
def _get_cfg(self):
cfg = Config(config={
'PublicTransportInfoCS': {
'max_turns': 120,
},
'DM': {
'directions': {
'type': directions.GoogleDirectionsFinder,
},
'dialogue_policy': {
'PTICSHDCPolicy': {
'accept_prob': 0.5,
'accept_prob_being_requested': 0.5,
'accept_prob_being_confirmed': 0.5,
'accept_prob_ludait': 0.5,
'accept_prob_noninformed': 0.5,
'confirm_prob': 0.5,
'select_prob': 0.5,
'min_change_prob': 0.5
}
},
'dialogue_state': {
'type': DeterministicDiscriminativeDialogueState,
},
'DeterministicDiscriminativeDialogueState': {
'type': 'UFAL_DSTC_1.0_approx',
},
'ontology': as_project_path('applications/PublicTransportInfoCS/data/ontology.py'),
},
'Logging': {
'system_logger': alex.utils.DummyLogger(),
'session_logger': alex.utils.DummyLogger()
},
'weather': {
'dictionary': as_project_path('applications/PublicTransportInfoCS/weather_cs.cfg'),
'suffix': 'CZ',
'units': 'celsius',
}
})
return cfg
def _build_policy(self):
return hdc_policy.PTICSHDCPolicy(self.cfg, self.ontology)
def test_get_platform_res_da(self):
hdc_policy = self._build_policy()
state = DeterministicDiscriminativeDialogueState(self.cfg, self.ontology)
system_input = DialogueActConfusionNetwork()
res = hdc_policy.get_da(state)
user_input = DialogueActConfusionNetwork()
user_input.add(1.0, DialogueActItem(dai='info(task=find_platform)'))
user_input.add(1.0, DialogueActItem(dai='inform(from_stop=Praha)'))
user_input.add(1.0, DialogueActItem(dai='inform(to_stop=Brno)'))
state.update(user_input, system_input)
res = hdc_policy.get_da(state)
self.assert_('inform(not_supported)' in res)
def test_switching_tasks(self):
hdc_policy = self._build_policy()
self.mox.StubOutWithMock(hdc_policy.weather, 'get_weather')
self.mox.StubOutWithMock(hdc_policy, 'get_directions')
hdc_policy.weather.get_weather(city=u'Praha',
daily=False,
lat=u'50.0755381',
lon=u'14.4378005',
time=None).AndReturn(None)
hdc_policy.get_directions(mox.IgnoreArg(),
check_conflict=True).AndReturn([DialogueActItem(dai="inform(time=10:00)")])
self.mox.ReplayAll()
state = DeterministicDiscriminativeDialogueState(self.cfg, self.ontology)
system_input = DialogueActConfusionNetwork()
res = hdc_policy.get_da(state)
# User says she wants weather so the task should be weather.
user_input = self._build_user_input("inform(task=weather)")
state.update(user_input, system_input)
res = hdc_policy.get_da(state)
self.assertEqual(state['lta_task'].mpv(), 'weather')
# User wants to find a connection so the task should be find_connection.
user_input = self._build_user_input(u"inform(task=find_connection)",
u"inform(to_stop=Malostranská)",
u"inform(from_stop=Anděl)")
state.update(user_input, system_input)
res = hdc_policy.get_da(state)
self.assertEqual(state['lta_task'].mpv(), 'find_connection')
self.mox.VerifyAll()
def _build_user_input(self, *args):
user_input = DialogueActConfusionNetwork()
for arg in args:
user_input.add(1.0, DialogueActItem(dai=arg))
return user_input
|
import React from 'react';
import {View, Text, StyleSheet} from 'react-native';
const Scaffold = () => {
return (
<View style={styles.container}>
<Text style={styles.text}> Scaffold Screen </Text>
</View>
);
};
const styles = StyleSheet.create({
container: {
flex: 1,
alignItems: 'center',
justifyContent: 'center',
},
text: {
fontSize: 18.0,
color: '#333',
fontFamily: 'SpaceGrotesk',
},
});
export default Scaffold;
|
from __future__ import division
import numpy as np
import scipy as sp
import cvxpy as cp
import itertools, sys, pdb
class FTOCP(object):
""" Finite Time Optimal Control Problem (FTOCP)
Methods:
- solve: solves the FTOCP given the initial condition x0, terminal contraints (optinal) and terminal cost (optional)
- model: given x_t and u_t computes x_{t+1} = Ax_t + Bu_t
"""
def __init__(self, N, A, B, Q, R, Hx=None, gx=None, Hu=None, gu=None):
# Define variables
self.N = N # Horizon Length
# System Dynamics (x_{k+1} = A x_k + Bu_k)
self.A = A
self.B = B
self.n = A.shape[1]
self.d = B.shape[1]
# Linear state constraints (Hx*x <= gx)
self.Hx = Hx
self.gx = gx
# Linear input constraints (Hu*u <= gu)
self.Hu = Hu
self.gu = gu
# Cost (h(x,u) = x^TQx +u^TRu)
self.Q = Q
self.R = R
# FTOCP cost
self.costFTOCP = np.inf
# def stage_cost_fun(self, x, xf, u):
# # Using the cvxpy norm function here
# return cp.norm(self.Q**0.5*(x-xf))**2 + cp.norm(self.R**0.5*u)**2
#
# def term_cost_fun(self, x, xf):
# # Using the cvxpy norm function here
# return cp.norm(self.Q**0.5*(x-xf))**2
def solve(self, x0, xf=None, abs_t=None, expl_con=None, SS=None, Qfun=None, CVX=False, verbose=False):
"""This method solves a FTOCP given:
- x0: initial condition
- xf: (optional) goal condition, defaults to the origin
- abs_t: (required if circular or linear constraints are provided) absolute time step
- expl_con: (optional) allowed deviations, can be ellipsoidal or linear constraints
- SS: (optional) contains a set of state and the terminal constraint is ConvHull(SS)
- Qfun: (optional) cost associtated with the state stored in SS. Terminal cost is BarycentrcInterpolation(SS, Qfun)
- CVX: (optional)
"""
if xf is None:
xf = np.zeros(self.n)
else:
xf = np.reshape(xf, self.n)
if expl_con is not None:
if 'lin' in expl_con:
H = expl_con['lin'][0]
g = expl_con['lin'][1]
if 'ell' in expl_con:
ell_con = expl_con['ell']
# Initialize Variables
x = cp.Variable((self.n, self.N+1))
u = cp.Variable((self.d, self.N))
# If SS is given construct a matrix collecting all states and a vector collection all costs
if SS is not None:
# SS_vector = np.squeeze(list(itertools.chain.from_iterable(SS))).T # From a 3D list to a 2D array
# SS_vector = np.hstack(SS)
SS_vector = SS[-1] # Only use last trajectory
# SS_vector = SS[-1][:,abs_t:min(SS[-1].shape[1],abs_t+int(2*(self.N+1)))]
# Qfun_vector = np.expand_dims(np.array(list(itertools.chain.from_iterable(Qfun))), 0) # From a 2D list to a 1D array
Qfun_vector = np.array(Qfun[-1])
# Qfun_vector = np.array(Qfun[-1][abs_t:abs_t+SS_vector.shape[1]])
if CVX:
lambVar = cp.Variable((SS_vector.shape[1], 1), boolean=False) # Initialize vector of variables
else:
lambVar = cp.Variable((SS_vector.shape[1], 1), boolean=True) # Initialize vector of variables
gamVar = cp.Variable((self.N+1), boolean=True)
if not CVX:
M = 1000 # Big M multiplier
constr = [x[:,0] == x0] # Initial condition
for i in range(self.N):
if abs_t is not None:
t = min(abs_t+i, SS_vector.shape[1]-1)
constr += [x[:,i+1] == self.A*x[:,i] + self.B*u[:,i]] # Dynamics
if self.Hx is not None:
constr += [self.Hx*x[:,i] <= self.gx] # State constraints
if self.Hu is not None:
constr += [self.Hu*u[:,i] <= self.gu] # Input constraints
if not CVX:
# Big M reformulation of minimum time objective: \gamma = 1 when x has not reached x_f, \gamma = 0 when x has reached x_f
bigM_ub = xf+M*np.ones(self.n)*gamVar[i]
bigM_lb = xf-M*np.ones(self.n)*gamVar[i]
constr += [x[:,i] <= bigM_ub, -x[:,i] <= -bigM_lb]
# Constrain positions to be within mutual agreed deviations at each time step
if expl_con is not None and 'ell' in expl_con:
if abs_t is None:
raise(ValueError('Absolute time step must be given'))
constr += [cp.quad_form(x[:2,i]-SS_vector[:2,t], np.eye(2)) <= ell_con[t]**2]
if expl_con is not None and 'lin' in expl_con:
if abs_t is None:
raise(ValueError('Absolute time step must be given'))
constr += [H[t]*x[:2,i]+g[t] <= 0]
# Terminal Constraint if SS not empty
if SS is not None:
# Terminal state \in ConvHull(SS) or if \lambda is boolean, then terminal state is one of the points in the SS
constr += [SS_vector * lambVar[:,0] == x[:,self.N],
np.ones((1, SS_vector.shape[1])) * lambVar[:,0] == 1, # \lambda sum to 1 or only 1 \lambda is equal to 1
lambVar >= 0] # Multipliers are positive definite
if expl_con is not None and 'ell' in expl_con:
# Constrain last position to be within mutual agreed deviations
t = min(abs_t+self.N, SS_vector.shape[1]-1)
constr += [cp.quad_form(x[:2,self.N]-SS_vector[:2,t], np.eye(2)) <= ell_con[t]**2]
if expl_con is not None and 'lin' in expl_con:
t = min(abs_t+self.N, SS_vector.shape[1]-1)
constr += [H[t]*x[:2,self.N]+g[t] <= 0]
# Cost Function
cost = 0
for i in range(self.N):
if SS is not None:
cost += cp.quad_form(u[:,i], self.R)
else:
cost += cp.quad_form(x[:,i]-xf, self.Q) + cp.quad_form(u[:,i], self.R) # Running cost h(x,u) = x^TQx + u^TRu
if not CVX:
cost += gamVar[i]
# Terminal cost if SS not empty
if SS is not None:
cost += Qfun_vector * lambVar[:,0] # It terminal cost is given by interpolation using \lambda
else:
cost += cp.quad_form(x[:,self.N]-xf, self.Q)
# if not CVX:
# cost += gamVar[self.N]
# Solve the Finite Time Optimal Control Problem
problem = cp.Problem(cp.Minimize(cost), constr)
problem.solve(verbose=verbose)
if problem.status != cp.OPTIMAL:
if problem.status == cp.INFEASIBLE:
print('Optimization was infeasible for step %i' % abs_t)
elif problem.status == cp.UNBOUNDED:
print('Optimization was unbounded for step %i' % abs_t)
elif problem.status == cp.INFEASIBLE_INACCURATE:
print('Optimization was infeasible inaccurate for step %i' % abs_t)
elif problem.status == cp.UNBOUNDED_INACCURATE:
print('Optimization was unbounded inaccurate for step %i' % abs_t)
elif problem.status == cp.OPTIMAL_INACCURATE:
print('Optimization was optimal inaccurate for step %i' % abs_t)
pdb.set_trace()
return (None, None)
# if SS is not None:
# if cost.value > self.costFTOCP:
# print('The cost is not decreasing at step %i' % abs_t)
# print('This iteration: %g' % cost.value)
# print('Last iteration: %g' % self.costFTOCP)
# pdb.set_trace()
self.costFTOCP = cost.value
if x.value is None or u.value is None:
print('Optimization variables returned None')
print(problem.status)
pdb.set_trace()
return(x.value, u.value)
def model(self, x, u):
# Compute state evolution
return self.A.dot(x) + self.B.dot(u)
def update_model(self, A=None, B=None):
if A is not None:
self.A = A
if B is not None:
self.B = B
def update_cost(self, Q=None, R=None):
if Q is not None:
self.Q = Q
if R is not None:
self.R = R
def update_constraints(self, Hx=None, gx=None, Hu=None, gu=None):
if Hx is not None and gx is not None:
self.Hx = Hx
self.gx = gx
if Hu is not None and gu is not None:
self.Hu = Hu
self.gu = gu
|
#pragma once
#include <fcntl.h>
#include <unistd.h>
#include <algorithm>
#include <atomic>
#include <chrono>
#include <csignal>
#include <ctime>
#include <map>
#include <memory>
#include <string>
#include <thread>
#ifndef sighandler_t
typedef void (*sighandler_t)(int sig);
#endif
void set_thread_name(const char* name);
int set_realtime_priority(int level);
int set_core_affinity(int core);
namespace util {
// ***** Time helpers *****
struct tm get_time();
bool time_valid(struct tm sys_time);
// ***** math helpers *****
// map x from [a1, a2] to [b1, b2]
template <typename T>
T map_val(T x, T a1, T a2, T b1, T b2) {
x = std::clamp(x, a1, a2);
T ra = a2 - a1;
T rb = b2 - b1;
return (x - a1) * rb / ra + b1;
}
// ***** string helpers *****
inline bool starts_with(const std::string& s, const std::string& prefix) {
return s.compare(0, prefix.size(), prefix) == 0;
}
template <typename... Args>
std::string string_format(const std::string& format, Args... args) {
size_t size = snprintf(nullptr, 0, format.c_str(), args...) + 1;
std::unique_ptr<char[]> buf(new char[size]);
snprintf(buf.get(), size, format.c_str(), args...);
return std::string(buf.get(), buf.get() + size - 1);
}
std::string getenv_default(const char* env_var, const char* suffix, const char* default_val);
std::string tohex(const uint8_t* buf, size_t buf_size);
std::string hexdump(const std::string& in);
std::string base_name(std::string const& path);
std::string dir_name(std::string const& path);
bool is_valid_dongle_id(std::string const& dongle_id);
// **** file fhelpers *****
std::string read_file(const std::string& fn);
int read_files_in_dir(const std::string& path, std::map<std::string, std::string>* contents);
int write_file(const char* path, const void* data, size_t size, int flags = O_WRONLY, mode_t mode = 0777);
std::string readlink(const std::string& path);
bool file_exists(const std::string& fn);
inline void sleep_for(const int milliseconds) {
std::this_thread::sleep_for(std::chrono::milliseconds(milliseconds));
}
} // namespace util
class ExitHandler {
public:
ExitHandler() {
std::signal(SIGINT, (sighandler_t)set_do_exit);
std::signal(SIGTERM, (sighandler_t)set_do_exit);
#ifndef __APPLE__
std::signal(SIGPWR, (sighandler_t)set_do_exit);
#endif
};
inline static std::atomic<bool> power_failure = false;
inline static std::atomic<int> signal = 0;
inline operator bool() { return do_exit; }
inline ExitHandler& operator=(bool v) {
signal = 0;
do_exit = v;
return *this;
}
private:
static void set_do_exit(int sig) {
#ifndef __APPLE__
power_failure = (sig == SIGPWR);
#endif
signal = sig;
do_exit = true;
}
inline static std::atomic<bool> do_exit = false;
};
struct unique_fd {
unique_fd(int fd = -1) : fd_(fd) {}
unique_fd& operator=(unique_fd&& uf) {
fd_ = uf.fd_;
uf.fd_ = -1;
return *this;
}
~unique_fd() {
if (fd_ != -1) close(fd_);
}
operator int() const { return fd_; }
int fd_;
};
class FirstOrderFilter {
public:
FirstOrderFilter(float x0, float ts, float dt) {
k_ = (dt / ts) / (1.0 + dt / ts);
x_ = x0;
}
inline float update(float x) {
x_ = (1. - k_) * x_ + k_ * x;
return x_;
}
inline void reset(float x) { x_ = x; }
private:
float x_, k_;
};
|
"""Miscellaneous methods not covered in the documentation."""
class Misc:
def verify(self, case='', level='', **kwargs):
"""Enter the verification run mode.
.. note::
This command is only valid at the ``/BEGIN`` level, obtained
with ``mapdl.finish()``.
Parameters
----------
case : str, optional
Optional title of the verification manual file. Also accepts
``'OFF'`` to disable the verification run mode.
level : int, optional
Verification level ranging from 1 to 6 defaulting to 4.
Returns
--------
Examples
--------
Enter the verification routine with the default option.
>>> mapdl.finish()
>>> mapdl.verify('VM1')
'*** VERIFICATION RUN - CASE VM1 *** OPTION= 4'
"""
return self.run(f'/VERIFY,{case},{level}', **kwargs)
|
'use strict'
// Setup
import gravity from './../index-es6.js'
var sun = document.getElementById('sun')
var earth = document.getElementById('earth')
var testCss = 'position:fixed;top:0;left:0;bottom:0;right:0;visibility:hidden;'
var testElId = 'youbemysun-test-el'
var testEl = document.getElementById(testElId)
var steps = 1
var xDir = steps
var yDir = steps
// Helpers
function windowSize () {
if (!testEl) testEl = document.getElementById(testElId)
if (!testEl) {
testEl = document.createElement('div')
testEl.id = testElId
testEl.style.cssText = testCss
document.body.appendChild(testEl)
}
return {
width: testEl.offsetWidth,
height: testEl.offsetHeight
}
}
// Main
function positionEarth () {
var pos = gravity(sun, earth, steps)
earth.style.left = pos.x + 'px'
earth.style.top = pos.y + 'px'
}
function reposition () {
var coords = sun.getBoundingClientRect()
var winSize = windowSize()
var pageWidth = winSize.width
var pageHeight = winSize.height
var oldLeft = parseInt(sun.style.left, 10)
var oldTop = parseInt(sun.style.top, 10)
if (!oldLeft || !oldTop) {
oldLeft = coords.left
oldTop = coords.top
}
if (xDir > 0 && (oldLeft + coords.width + xDir) > pageWidth) xDir = -steps
else if ((oldLeft - xDir) < 0) xDir = steps
if (yDir > 0 && (oldTop + coords.height + yDir) > pageHeight) yDir = -steps
else if ((oldTop - yDir) < 0) yDir = steps
var left = oldLeft + xDir
var top = oldTop + yDir
sun.style.left = left + 'px'
sun.style.top = top + 'px'
positionEarth()
}
positionEarth()
setInterval(reposition, 10)
|
# Copyright 2015-2016 Palo Alto Networks, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import gevent
import gevent.monkey
gevent.monkey.patch_all(thread=False, select=False)
import os.path
import logging
import signal
import multiprocessing
import argparse
import os
import math
import psutil
import minemeld.chassis
import minemeld.mgmtbus
import minemeld.comm
import minemeld.run.config
from minemeld import __version__
LOG = logging.getLogger(__name__)
def _run_chassis(fabricconfig, mgmtbusconfig, fts):
try:
# lower priority to make master and web
# more "responsive"
os.nice(5)
c = minemeld.chassis.Chassis(
fabricconfig['class'],
fabricconfig['config'],
mgmtbusconfig
)
c.configure(fts)
gevent.signal(signal.SIGUSR1, c.stop)
while not c.fts_init():
if c.poweroff.wait(timeout=0.1) is not None:
break
gevent.sleep(1)
LOG.info('Nodes initialized')
try:
c.poweroff.wait()
LOG.info('power off')
except KeyboardInterrupt:
LOG.error("We should not be here !")
c.stop()
except:
LOG.exception('Exception in chassis main procedure')
raise
def _check_disk_space(num_nodes):
free_disk_per_node = int(os.environ.get(
'MM_DISK_SPACE_PER_NODE',
10*1024 # default: 10MB per node
))
needed_disk = free_disk_per_node*num_nodes*1024
free_disk = psutil.disk_usage('.').free
LOG.debug('Disk space - needed: {} available: {}'.format(needed_disk, free_disk))
if free_disk <= needed_disk:
LOG.critical(
('Not enough space left on the device, available: {} needed: {}'
' - please delete traces, logs and old engine versions and restart').format(
free_disk, needed_disk
)
)
return None
return free_disk
def _parse_args():
parser = argparse.ArgumentParser(
description="Low-latency threat indicators processor"
)
parser.add_argument(
'--version',
action='version',
version=__version__
)
parser.add_argument(
'--multiprocessing',
default=0,
type=int,
action='store',
metavar='NP',
help='enable multiprocessing. NP is the number of chassis, '
'0 to use two chassis per machine core (default)'
)
parser.add_argument(
'--nodes-per-chassis',
default=15.0,
type=float,
action='store',
metavar='NPC',
help='number of nodes per chassis (default 15)'
)
parser.add_argument(
'--verbose',
action='store_true',
help='verbose'
)
parser.add_argument(
'config',
action='store',
metavar='CONFIG',
help='path of the config file or of the config directory'
)
return parser.parse_args()
def _setup_environment(config):
# make config dir available to nodes
cdir = config
if not os.path.isdir(cdir):
cdir = os.path.dirname(config)
os.environ['MM_CONFIG_DIR'] = cdir
if not 'REQUESTS_CA_BUNDLE' in os.environ and 'MM_CA_BUNDLE' in os.environ:
os.environ['REQUESTS_CA_BUNDLE'] = os.environ['MM_CA_BUNDLE']
def main():
mbusmaster = None
processes_lock = None
processes = None
disk_space_monitor_glet = None
def _cleanup():
if mbusmaster is not None:
mbusmaster.checkpoint_graph()
if processes_lock is None:
return
with processes_lock:
if processes is None:
return
for p in processes:
if not p.is_alive():
continue
try:
os.kill(p.pid, signal.SIGUSR1)
except OSError:
continue
while sum([int(t.is_alive()) for t in processes]) != 0:
gevent.sleep(1)
def _sigint_handler():
LOG.info('SIGINT received')
_cleanup()
signal_received.set()
def _sigterm_handler():
LOG.info('SIGTERM received')
_cleanup()
signal_received.set()
def _disk_space_monitor(num_nodes):
while True:
if _check_disk_space(num_nodes=num_nodes) is None:
_cleanup()
signal_received.set()
break
gevent.sleep(60)
args = _parse_args()
# logging
loglevel = logging.INFO
if args.verbose:
loglevel = logging.DEBUG
logging.basicConfig(
level=loglevel,
format="%(asctime)s (%(process)d)%(module)s.%(funcName)s"
" %(levelname)s: %(message)s",
datefmt="%Y-%m-%dT%H:%M:%S"
)
LOG.info("Starting mm-run.py version %s", __version__)
LOG.info("mm-run.py arguments: %s", args)
_setup_environment(args.config)
# load and validate config
config = minemeld.run.config.load_config(args.config)
LOG.info("mm-run.py config: %s", config)
if _check_disk_space(num_nodes=len(config.nodes)) is None:
LOG.critical('Not enough disk space available, exit')
return 2
np = args.multiprocessing
if np == 0:
np = multiprocessing.cpu_count()
LOG.info('multiprocessing: #cores: %d', multiprocessing.cpu_count())
LOG.info("multiprocessing: max #chassis: %d", np)
npc = args.nodes_per_chassis
if npc <= 0:
LOG.critical('nodes-per-chassis should be a positive integer')
return 2
np = min(
int(math.ceil(len(config.nodes)/npc)),
np
)
LOG.info("Number of chassis: %d", np)
ftlists = [{} for j in range(np)]
j = 0
for ft in config.nodes:
pn = j % len(ftlists)
ftlists[pn][ft] = config.nodes[ft]
j += 1
# cleanup
if config.mgmtbus['transport']['class'] != config.fabric['class']:
raise ValueError('mgmtbus class and fabric class should match')
minemeld.comm.cleanup(config.fabric['class'], config.fabric['config'])
signal.signal(signal.SIGINT, signal.SIG_IGN)
signal.signal(signal.SIGTERM, signal.SIG_IGN)
processes = []
for g in ftlists:
if len(g) == 0:
continue
p = multiprocessing.Process(
target=_run_chassis,
args=(
config.fabric,
config.mgmtbus,
g
)
)
processes.append(p)
p.start()
processes_lock = gevent.lock.BoundedSemaphore()
signal_received = gevent.event.Event()
gevent.signal(signal.SIGINT, _sigint_handler)
gevent.signal(signal.SIGTERM, _sigterm_handler)
try:
mbusmaster = minemeld.mgmtbus.master_factory(
config=config.mgmtbus['master'],
comm_class=config.mgmtbus['transport']['class'],
comm_config=config.mgmtbus['transport']['config'],
nodes=config.nodes.keys(),
num_chassis=len(processes)
)
mbusmaster.start()
mbusmaster.wait_for_chassis(timeout=10)
# here nodes are all CONNECTED, fabric and mgmtbus up, with mgmtbus
# dispatching and fabric not dispatching
mbusmaster.start_status_monitor()
mbusmaster.init_graph(config)
# here nodes are all INIT
mbusmaster.start_chassis()
# here nodes should all be starting
except Exception:
LOG.exception('Exception initializing graph')
_cleanup()
raise
disk_space_monitor_glet = gevent.spawn(_disk_space_monitor, len(config.nodes))
try:
while not signal_received.wait(timeout=1.0):
with processes_lock:
r = [int(t.is_alive()) for t in processes]
if sum(r) != len(processes):
LOG.info("One of the chassis has stopped, exit")
break
except KeyboardInterrupt:
LOG.info("Ctrl-C received, exiting")
except:
LOG.exception("Exception in main loop")
if disk_space_monitor_glet is not None:
disk_space_monitor_glet.kill()
|
/* select - Module containing unix select(2) call.
Under Unix, the file descriptors are small integers.
Under Win32, select only exists for sockets, and sockets may
have any value except INVALID_SOCKET.
*/
#include "Python.h"
#include <structmember.h>
#ifdef HAVE_SYS_DEVPOLL_H
#include <sys/resource.h>
#include <sys/devpoll.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#endif
#ifdef __APPLE__
/* Perform runtime testing for a broken poll on OSX to make it easier
* to use the same binary on multiple releases of the OS.
*/
#undef HAVE_BROKEN_POLL
#endif
/* Windows #defines FD_SETSIZE to 64 if FD_SETSIZE isn't already defined.
64 is too small (too many people have bumped into that limit).
Here we boost it.
Users who want even more than the boosted limit should #define
FD_SETSIZE higher before this; e.g., via compiler /D switch.
*/
#if defined(MS_WINDOWS) && !defined(FD_SETSIZE)
#define FD_SETSIZE 512
#endif
#if defined(HAVE_POLL_H)
#include <poll.h>
#elif defined(HAVE_SYS_POLL_H)
#include <sys/poll.h>
#endif
#ifdef __sgi
/* This is missing from unistd.h */
extern void bzero(void *, int);
#endif
#ifdef HAVE_SYS_TYPES_H
#include <sys/types.h>
#endif
#ifdef MS_WINDOWS
# define WIN32_LEAN_AND_MEAN
# include <winsock.h>
#else
# define SOCKET int
#endif
/* list of Python objects and their file descriptor */
typedef struct {
PyObject *obj; /* owned reference */
SOCKET fd;
int sentinel; /* -1 == sentinel */
} pylist;
static void
reap_obj(pylist fd2obj[FD_SETSIZE + 1])
{
int i;
for (i = 0; i < FD_SETSIZE + 1 && fd2obj[i].sentinel >= 0; i++) {
Py_CLEAR(fd2obj[i].obj);
}
fd2obj[0].sentinel = -1;
}
/* returns -1 and sets the Python exception if an error occurred, otherwise
returns a number >= 0
*/
static int
seq2set(PyObject *seq, fd_set *set, pylist fd2obj[FD_SETSIZE + 1])
{
int max = -1;
int index = 0;
Py_ssize_t i;
PyObject* fast_seq = NULL;
PyObject* o = NULL;
fd2obj[0].obj = (PyObject*)0; /* set list to zero size */
FD_ZERO(set);
fast_seq = PySequence_Fast(seq, "arguments 1-3 must be sequences");
if (!fast_seq)
return -1;
for (i = 0; i < PySequence_Fast_GET_SIZE(fast_seq); i++) {
SOCKET v;
/* any intervening fileno() calls could decr this refcnt */
if (!(o = PySequence_Fast_GET_ITEM(fast_seq, i)))
goto finally;
Py_INCREF(o);
v = PyObject_AsFileDescriptor( o );
if (v == -1) goto finally;
#if defined(_MSC_VER)
max = 0; /* not used for Win32 */
#else /* !_MSC_VER */
if (!_PyIsSelectable_fd(v)) {
PyErr_SetString(PyExc_ValueError,
"filedescriptor out of range in select()");
goto finally;
}
if (v > max)
max = v;
#endif /* _MSC_VER */
FD_SET(v, set);
/* add object and its file descriptor to the list */
if (index >= FD_SETSIZE) {
PyErr_SetString(PyExc_ValueError,
"too many file descriptors in select()");
goto finally;
}
fd2obj[index].obj = o;
fd2obj[index].fd = v;
fd2obj[index].sentinel = 0;
fd2obj[++index].sentinel = -1;
}
Py_DECREF(fast_seq);
return max+1;
finally:
Py_XDECREF(o);
Py_DECREF(fast_seq);
return -1;
}
/* returns NULL and sets the Python exception if an error occurred */
static PyObject *
set2list(fd_set *set, pylist fd2obj[FD_SETSIZE + 1])
{
int i, j, count=0;
PyObject *list, *o;
SOCKET fd;
for (j = 0; fd2obj[j].sentinel >= 0; j++) {
if (FD_ISSET(fd2obj[j].fd, set))
count++;
}
list = PyList_New(count);
if (!list)
return NULL;
i = 0;
for (j = 0; fd2obj[j].sentinel >= 0; j++) {
fd = fd2obj[j].fd;
if (FD_ISSET(fd, set)) {
o = fd2obj[j].obj;
fd2obj[j].obj = NULL;
/* transfer ownership */
if (PyList_SetItem(list, i, o) < 0)
goto finally;
i++;
}
}
return list;
finally:
Py_DECREF(list);
return NULL;
}
#undef SELECT_USES_HEAP
#if FD_SETSIZE > 1024
#define SELECT_USES_HEAP
#endif /* FD_SETSIZE > 1024 */
static PyObject *
select_select(PyObject *self, PyObject *args)
{
#ifdef SELECT_USES_HEAP
pylist *rfd2obj, *wfd2obj, *efd2obj;
#else /* !SELECT_USES_HEAP */
/* XXX: All this should probably be implemented as follows:
* - find the highest descriptor we're interested in
* - add one
* - that's the size
* See: Stevens, APitUE, $12.5.1
*/
pylist rfd2obj[FD_SETSIZE + 1];
pylist wfd2obj[FD_SETSIZE + 1];
pylist efd2obj[FD_SETSIZE + 1];
#endif /* SELECT_USES_HEAP */
PyObject *ifdlist, *ofdlist, *efdlist;
PyObject *ret = NULL;
PyObject *tout = Py_None;
fd_set ifdset, ofdset, efdset;
struct timeval tv, *tvp;
int imax, omax, emax, max;
int n;
/* convert arguments */
if (!PyArg_UnpackTuple(args, "select", 3, 4,
&ifdlist, &ofdlist, &efdlist, &tout))
return NULL;
if (tout == Py_None)
tvp = (struct timeval *)0;
else if (!PyNumber_Check(tout)) {
PyErr_SetString(PyExc_TypeError,
"timeout must be a float or None");
return NULL;
}
else {
/* On OpenBSD 5.4, timeval.tv_sec is a long.
* Example: long is 64-bit, whereas time_t is 32-bit. */
time_t sec;
/* On OS X 64-bit, timeval.tv_usec is an int (and thus still 4
bytes as required), but no longer defined by a long. */
long usec;
if (_PyTime_ObjectToTimeval(tout, &sec, &usec,
_PyTime_ROUND_UP) == -1)
return NULL;
#ifdef MS_WINDOWS
/* On Windows, timeval.tv_sec is a long (32 bit),
* whereas time_t can be 64-bit. */
assert(sizeof(tv.tv_sec) == sizeof(long));
#if SIZEOF_TIME_T > SIZEOF_LONG
if (sec > LONG_MAX) {
PyErr_SetString(PyExc_OverflowError,
"timeout is too large");
return NULL;
}
#endif
tv.tv_sec = (long)sec;
#else
assert(sizeof(tv.tv_sec) >= sizeof(sec));
tv.tv_sec = sec;
#endif
tv.tv_usec = usec;
if (tv.tv_sec < 0) {
PyErr_SetString(PyExc_ValueError, "timeout must be non-negative");
return NULL;
}
tvp = &tv;
}
#ifdef SELECT_USES_HEAP
/* Allocate memory for the lists */
rfd2obj = PyMem_NEW(pylist, FD_SETSIZE + 1);
wfd2obj = PyMem_NEW(pylist, FD_SETSIZE + 1);
efd2obj = PyMem_NEW(pylist, FD_SETSIZE + 1);
if (rfd2obj == NULL || wfd2obj == NULL || efd2obj == NULL) {
if (rfd2obj) PyMem_DEL(rfd2obj);
if (wfd2obj) PyMem_DEL(wfd2obj);
if (efd2obj) PyMem_DEL(efd2obj);
return PyErr_NoMemory();
}
#endif /* SELECT_USES_HEAP */
/* Convert sequences to fd_sets, and get maximum fd number
* propagates the Python exception set in seq2set()
*/
rfd2obj[0].sentinel = -1;
wfd2obj[0].sentinel = -1;
efd2obj[0].sentinel = -1;
if ((imax=seq2set(ifdlist, &ifdset, rfd2obj)) < 0)
goto finally;
if ((omax=seq2set(ofdlist, &ofdset, wfd2obj)) < 0)
goto finally;
if ((emax=seq2set(efdlist, &efdset, efd2obj)) < 0)
goto finally;
max = imax;
if (omax > max) max = omax;
if (emax > max) max = emax;
Py_BEGIN_ALLOW_THREADS
n = select(max, &ifdset, &ofdset, &efdset, tvp);
Py_END_ALLOW_THREADS
#ifdef MS_WINDOWS
if (n == SOCKET_ERROR) {
PyErr_SetExcFromWindowsErr(PyExc_OSError, WSAGetLastError());
}
#else
if (n < 0) {
PyErr_SetFromErrno(PyExc_OSError);
}
#endif
else {
/* any of these three calls can raise an exception. it's more
convenient to test for this after all three calls... but
is that acceptable?
*/
ifdlist = set2list(&ifdset, rfd2obj);
ofdlist = set2list(&ofdset, wfd2obj);
efdlist = set2list(&efdset, efd2obj);
if (PyErr_Occurred())
ret = NULL;
else
ret = PyTuple_Pack(3, ifdlist, ofdlist, efdlist);
Py_XDECREF(ifdlist);
Py_XDECREF(ofdlist);
Py_XDECREF(efdlist);
}
finally:
reap_obj(rfd2obj);
reap_obj(wfd2obj);
reap_obj(efd2obj);
#ifdef SELECT_USES_HEAP
PyMem_DEL(rfd2obj);
PyMem_DEL(wfd2obj);
PyMem_DEL(efd2obj);
#endif /* SELECT_USES_HEAP */
return ret;
}
#if defined(HAVE_POLL) && !defined(HAVE_BROKEN_POLL)
/*
* poll() support
*/
typedef struct {
PyObject_HEAD
PyObject *dict;
int ufd_uptodate;
int ufd_len;
struct pollfd *ufds;
int poll_running;
} pollObject;
static PyTypeObject poll_Type;
/* Update the malloc'ed array of pollfds to match the dictionary
contained within a pollObject. Return 1 on success, 0 on an error.
*/
static int
update_ufd_array(pollObject *self)
{
Py_ssize_t i, pos;
PyObject *key, *value;
struct pollfd *old_ufds = self->ufds;
self->ufd_len = PyDict_Size(self->dict);
PyMem_RESIZE(self->ufds, struct pollfd, self->ufd_len);
if (self->ufds == NULL) {
self->ufds = old_ufds;
PyErr_NoMemory();
return 0;
}
i = pos = 0;
while (PyDict_Next(self->dict, &pos, &key, &value)) {
assert(i < self->ufd_len);
/* Never overflow */
self->ufds[i].fd = (int)PyLong_AsLong(key);
self->ufds[i].events = (short)(unsigned short)PyLong_AsLong(value);
i++;
}
assert(i == self->ufd_len);
self->ufd_uptodate = 1;
return 1;
}
static int
ushort_converter(PyObject *obj, void *ptr)
{
unsigned long uval;
uval = PyLong_AsUnsignedLong(obj);
if (uval == (unsigned long)-1 && PyErr_Occurred())
return 0;
if (uval > USHRT_MAX) {
PyErr_SetString(PyExc_OverflowError,
"Python int too large for C unsigned short");
return 0;
}
*(unsigned short *)ptr = Py_SAFE_DOWNCAST(uval, unsigned long, unsigned short);
return 1;
}
PyDoc_STRVAR(poll_register_doc,
"register(fd [, eventmask] ) -> None\n\n\
Register a file descriptor with the polling object.\n\
fd -- either an integer, or an object with a fileno() method returning an\n\
int.\n\
events -- an optional bitmask describing the type of events to check for");
static PyObject *
poll_register(pollObject *self, PyObject *args)
{
PyObject *o, *key, *value;
int fd;
unsigned short events = POLLIN | POLLPRI | POLLOUT;
int err;
if (!PyArg_ParseTuple(args, "O|O&:register", &o, ushort_converter, &events))
return NULL;
fd = PyObject_AsFileDescriptor(o);
if (fd == -1) return NULL;
/* Add entry to the internal dictionary: the key is the
file descriptor, and the value is the event mask. */
key = PyLong_FromLong(fd);
if (key == NULL)
return NULL;
value = PyLong_FromLong(events);
if (value == NULL) {
Py_DECREF(key);
return NULL;
}
err = PyDict_SetItem(self->dict, key, value);
Py_DECREF(key);
Py_DECREF(value);
if (err < 0)
return NULL;
self->ufd_uptodate = 0;
Py_INCREF(Py_None);
return Py_None;
}
PyDoc_STRVAR(poll_modify_doc,
"modify(fd, eventmask) -> None\n\n\
Modify an already registered file descriptor.\n\
fd -- either an integer, or an object with a fileno() method returning an\n\
int.\n\
events -- an optional bitmask describing the type of events to check for");
static PyObject *
poll_modify(pollObject *self, PyObject *args)
{
PyObject *o, *key, *value;
int fd;
unsigned short events;
int err;
if (!PyArg_ParseTuple(args, "OO&:modify", &o, ushort_converter, &events))
return NULL;
fd = PyObject_AsFileDescriptor(o);
if (fd == -1) return NULL;
/* Modify registered fd */
key = PyLong_FromLong(fd);
if (key == NULL)
return NULL;
if (PyDict_GetItem(self->dict, key) == NULL) {
errno = ENOENT;
PyErr_SetFromErrno(PyExc_OSError);
Py_DECREF(key);
return NULL;
}
value = PyLong_FromLong(events);
if (value == NULL) {
Py_DECREF(key);
return NULL;
}
err = PyDict_SetItem(self->dict, key, value);
Py_DECREF(key);
Py_DECREF(value);
if (err < 0)
return NULL;
self->ufd_uptodate = 0;
Py_INCREF(Py_None);
return Py_None;
}
PyDoc_STRVAR(poll_unregister_doc,
"unregister(fd) -> None\n\n\
Remove a file descriptor being tracked by the polling object.");
static PyObject *
poll_unregister(pollObject *self, PyObject *o)
{
PyObject *key;
int fd;
fd = PyObject_AsFileDescriptor( o );
if (fd == -1)
return NULL;
/* Check whether the fd is already in the array */
key = PyLong_FromLong(fd);
if (key == NULL)
return NULL;
if (PyDict_DelItem(self->dict, key) == -1) {
Py_DECREF(key);
/* This will simply raise the KeyError set by PyDict_DelItem
if the file descriptor isn't registered. */
return NULL;
}
Py_DECREF(key);
self->ufd_uptodate = 0;
Py_INCREF(Py_None);
return Py_None;
}
PyDoc_STRVAR(poll_poll_doc,
"poll( [timeout] ) -> list of (fd, event) 2-tuples\n\n\
Polls the set of registered file descriptors, returning a list containing \n\
any descriptors that have events or errors to report.");
static PyObject *
poll_poll(pollObject *self, PyObject *args)
{
PyObject *result_list = NULL, *tout = NULL;
int timeout = 0, poll_result, i, j;
PyObject *value = NULL, *num = NULL;
if (!PyArg_UnpackTuple(args, "poll", 0, 1, &tout)) {
return NULL;
}
/* Check values for timeout */
if (tout == NULL || tout == Py_None)
timeout = -1;
else if (!PyNumber_Check(tout)) {
PyErr_SetString(PyExc_TypeError,
"timeout must be an integer or None");
return NULL;
}
else {
tout = PyNumber_Long(tout);
if (!tout)
return NULL;
timeout = _PyLong_AsInt(tout);
Py_DECREF(tout);
if (timeout == -1 && PyErr_Occurred())
return NULL;
}
/* Avoid concurrent poll() invocation, issue 8865 */
if (self->poll_running) {
PyErr_SetString(PyExc_RuntimeError,
"concurrent poll() invocation");
return NULL;
}
/* Ensure the ufd array is up to date */
if (!self->ufd_uptodate)
if (update_ufd_array(self) == 0)
return NULL;
self->poll_running = 1;
/* call poll() */
Py_BEGIN_ALLOW_THREADS
poll_result = poll(self->ufds, self->ufd_len, timeout);
Py_END_ALLOW_THREADS
self->poll_running = 0;
if (poll_result < 0) {
PyErr_SetFromErrno(PyExc_OSError);
return NULL;
}
/* build the result list */
result_list = PyList_New(poll_result);
if (!result_list)
return NULL;
else {
for (i = 0, j = 0; j < poll_result; j++) {
/* skip to the next fired descriptor */
while (!self->ufds[i].revents) {
i++;
}
/* if we hit a NULL return, set value to NULL
and break out of loop; code at end will
clean up result_list */
value = PyTuple_New(2);
if (value == NULL)
goto error;
num = PyLong_FromLong(self->ufds[i].fd);
if (num == NULL) {
Py_DECREF(value);
goto error;
}
PyTuple_SET_ITEM(value, 0, num);
/* The &0xffff is a workaround for AIX. 'revents'
is a 16-bit short, and IBM assigned POLLNVAL
to be 0x8000, so the conversion to int results
in a negative number. See SF bug #923315. */
num = PyLong_FromLong(self->ufds[i].revents & 0xffff);
if (num == NULL) {
Py_DECREF(value);
goto error;
}
PyTuple_SET_ITEM(value, 1, num);
if ((PyList_SetItem(result_list, j, value)) == -1) {
Py_DECREF(value);
goto error;
}
i++;
}
}
return result_list;
error:
Py_DECREF(result_list);
return NULL;
}
static PyMethodDef poll_methods[] = {
{"register", (PyCFunction)poll_register,
METH_VARARGS, poll_register_doc},
{"modify", (PyCFunction)poll_modify,
METH_VARARGS, poll_modify_doc},
{"unregister", (PyCFunction)poll_unregister,
METH_O, poll_unregister_doc},
{"poll", (PyCFunction)poll_poll,
METH_VARARGS, poll_poll_doc},
{NULL, NULL} /* sentinel */
};
static pollObject *
newPollObject(void)
{
pollObject *self;
self = PyObject_New(pollObject, &poll_Type);
if (self == NULL)
return NULL;
/* ufd_uptodate is a Boolean, denoting whether the
array pointed to by ufds matches the contents of the dictionary. */
self->ufd_uptodate = 0;
self->ufds = NULL;
self->poll_running = 0;
self->dict = PyDict_New();
if (self->dict == NULL) {
Py_DECREF(self);
return NULL;
}
return self;
}
static void
poll_dealloc(pollObject *self)
{
if (self->ufds != NULL)
PyMem_DEL(self->ufds);
Py_XDECREF(self->dict);
PyObject_Del(self);
}
static PyTypeObject poll_Type = {
/* The ob_type field must be initialized in the module init function
* to be portable to Windows without using C++. */
PyVarObject_HEAD_INIT(NULL, 0)
"select.poll", /*tp_name*/
sizeof(pollObject), /*tp_basicsize*/
0, /*tp_itemsize*/
/* methods */
(destructor)poll_dealloc, /*tp_dealloc*/
0, /*tp_print*/
0, /*tp_getattr*/
0, /*tp_setattr*/
0, /*tp_reserved*/
0, /*tp_repr*/
0, /*tp_as_number*/
0, /*tp_as_sequence*/
0, /*tp_as_mapping*/
0, /*tp_hash*/
0, /*tp_call*/
0, /*tp_str*/
0, /*tp_getattro*/
0, /*tp_setattro*/
0, /*tp_as_buffer*/
Py_TPFLAGS_DEFAULT, /*tp_flags*/
0, /*tp_doc*/
0, /*tp_traverse*/
0, /*tp_clear*/
0, /*tp_richcompare*/
0, /*tp_weaklistoffset*/
0, /*tp_iter*/
0, /*tp_iternext*/
poll_methods, /*tp_methods*/
};
#ifdef HAVE_SYS_DEVPOLL_H
typedef struct {
PyObject_HEAD
int fd_devpoll;
int max_n_fds;
int n_fds;
struct pollfd *fds;
} devpollObject;
static PyTypeObject devpoll_Type;
static PyObject *
devpoll_err_closed(void)
{
PyErr_SetString(PyExc_ValueError, "I/O operation on closed devpoll object");
return NULL;
}
static int devpoll_flush(devpollObject *self)
{
int size, n;
if (!self->n_fds) return 0;
size = sizeof(struct pollfd)*self->n_fds;
self->n_fds = 0;
Py_BEGIN_ALLOW_THREADS
n = write(self->fd_devpoll, self->fds, size);
Py_END_ALLOW_THREADS
if (n == -1 ) {
PyErr_SetFromErrno(PyExc_IOError);
return -1;
}
if (n < size) {
/*
** Data writed to /dev/poll is a binary data structure. It is not
** clear what to do if a partial write occurred. For now, raise
** an exception and see if we actually found this problem in
** the wild.
** See http://bugs.python.org/issue6397.
*/
PyErr_Format(PyExc_IOError, "failed to write all pollfds. "
"Please, report at http://bugs.python.org/. "
"Data to report: Size tried: %d, actual size written: %d.",
size, n);
return -1;
}
return 0;
}
static PyObject *
internal_devpoll_register(devpollObject *self, PyObject *args, int remove)
{
PyObject *o;
int fd;
unsigned short events = POLLIN | POLLPRI | POLLOUT;
if (self->fd_devpoll < 0)
return devpoll_err_closed();
if (!PyArg_ParseTuple(args, "O|O&:register", &o, ushort_converter, &events))
return NULL;
fd = PyObject_AsFileDescriptor(o);
if (fd == -1) return NULL;
if (remove) {
self->fds[self->n_fds].fd = fd;
self->fds[self->n_fds].events = POLLREMOVE;
if (++self->n_fds == self->max_n_fds) {
if (devpoll_flush(self))
return NULL;
}
}
self->fds[self->n_fds].fd = fd;
self->fds[self->n_fds].events = (signed short)events;
if (++self->n_fds == self->max_n_fds) {
if (devpoll_flush(self))
return NULL;
}
Py_RETURN_NONE;
}
PyDoc_STRVAR(devpoll_register_doc,
"register(fd [, eventmask] ) -> None\n\n\
Register a file descriptor with the polling object.\n\
fd -- either an integer, or an object with a fileno() method returning an\n\
int.\n\
events -- an optional bitmask describing the type of events to check for");
static PyObject *
devpoll_register(devpollObject *self, PyObject *args)
{
return internal_devpoll_register(self, args, 0);
}
PyDoc_STRVAR(devpoll_modify_doc,
"modify(fd[, eventmask]) -> None\n\n\
Modify a possible already registered file descriptor.\n\
fd -- either an integer, or an object with a fileno() method returning an\n\
int.\n\
events -- an optional bitmask describing the type of events to check for");
static PyObject *
devpoll_modify(devpollObject *self, PyObject *args)
{
return internal_devpoll_register(self, args, 1);
}
PyDoc_STRVAR(devpoll_unregister_doc,
"unregister(fd) -> None\n\n\
Remove a file descriptor being tracked by the polling object.");
static PyObject *
devpoll_unregister(devpollObject *self, PyObject *o)
{
int fd;
if (self->fd_devpoll < 0)
return devpoll_err_closed();
fd = PyObject_AsFileDescriptor( o );
if (fd == -1)
return NULL;
self->fds[self->n_fds].fd = fd;
self->fds[self->n_fds].events = POLLREMOVE;
if (++self->n_fds == self->max_n_fds) {
if (devpoll_flush(self))
return NULL;
}
Py_RETURN_NONE;
}
PyDoc_STRVAR(devpoll_poll_doc,
"poll( [timeout] ) -> list of (fd, event) 2-tuples\n\n\
Polls the set of registered file descriptors, returning a list containing \n\
any descriptors that have events or errors to report.");
static PyObject *
devpoll_poll(devpollObject *self, PyObject *args)
{
struct dvpoll dvp;
PyObject *result_list = NULL, *tout = NULL;
int poll_result, i;
long timeout;
PyObject *value, *num1, *num2;
if (self->fd_devpoll < 0)
return devpoll_err_closed();
if (!PyArg_UnpackTuple(args, "poll", 0, 1, &tout)) {
return NULL;
}
/* Check values for timeout */
if (tout == NULL || tout == Py_None)
timeout = -1;
else if (!PyNumber_Check(tout)) {
PyErr_SetString(PyExc_TypeError,
"timeout must be an integer or None");
return NULL;
}
else {
tout = PyNumber_Long(tout);
if (!tout)
return NULL;
timeout = PyLong_AsLong(tout);
Py_DECREF(tout);
if (timeout == -1 && PyErr_Occurred())
return NULL;
}
if ((timeout < -1) || (timeout > INT_MAX)) {
PyErr_SetString(PyExc_OverflowError,
"timeout is out of range");
return NULL;
}
if (devpoll_flush(self))
return NULL;
dvp.dp_fds = self->fds;
dvp.dp_nfds = self->max_n_fds;
dvp.dp_timeout = timeout;
/* call devpoll() */
Py_BEGIN_ALLOW_THREADS
poll_result = ioctl(self->fd_devpoll, DP_POLL, &dvp);
Py_END_ALLOW_THREADS
if (poll_result < 0) {
PyErr_SetFromErrno(PyExc_IOError);
return NULL;
}
/* build the result list */
result_list = PyList_New(poll_result);
if (!result_list)
return NULL;
else {
for (i = 0; i < poll_result; i++) {
num1 = PyLong_FromLong(self->fds[i].fd);
num2 = PyLong_FromLong(self->fds[i].revents);
if ((num1 == NULL) || (num2 == NULL)) {
Py_XDECREF(num1);
Py_XDECREF(num2);
goto error;
}
value = PyTuple_Pack(2, num1, num2);
Py_DECREF(num1);
Py_DECREF(num2);
if (value == NULL)
goto error;
if ((PyList_SetItem(result_list, i, value)) == -1) {
Py_DECREF(value);
goto error;
}
}
}
return result_list;
error:
Py_DECREF(result_list);
return NULL;
}
static int
devpoll_internal_close(devpollObject *self)
{
int save_errno = 0;
if (self->fd_devpoll >= 0) {
int fd = self->fd_devpoll;
self->fd_devpoll = -1;
Py_BEGIN_ALLOW_THREADS
if (close(fd) < 0)
save_errno = errno;
Py_END_ALLOW_THREADS
}
return save_errno;
}
static PyObject*
devpoll_close(devpollObject *self)
{
errno = devpoll_internal_close(self);
if (errno < 0) {
PyErr_SetFromErrno(PyExc_OSError);
return NULL;
}
Py_RETURN_NONE;
}
PyDoc_STRVAR(devpoll_close_doc,
"close() -> None\n\
\n\
Close the devpoll file descriptor. Further operations on the devpoll\n\
object will raise an exception.");
static PyObject*
devpoll_get_closed(devpollObject *self)
{
if (self->fd_devpoll < 0)
Py_RETURN_TRUE;
else
Py_RETURN_FALSE;
}
static PyObject*
devpoll_fileno(devpollObject *self)
{
if (self->fd_devpoll < 0)
return devpoll_err_closed();
return PyLong_FromLong(self->fd_devpoll);
}
PyDoc_STRVAR(devpoll_fileno_doc,
"fileno() -> int\n\
\n\
Return the file descriptor.");
static PyMethodDef devpoll_methods[] = {
{"register", (PyCFunction)devpoll_register,
METH_VARARGS, devpoll_register_doc},
{"modify", (PyCFunction)devpoll_modify,
METH_VARARGS, devpoll_modify_doc},
{"unregister", (PyCFunction)devpoll_unregister,
METH_O, devpoll_unregister_doc},
{"poll", (PyCFunction)devpoll_poll,
METH_VARARGS, devpoll_poll_doc},
{"close", (PyCFunction)devpoll_close, METH_NOARGS,
devpoll_close_doc},
{"fileno", (PyCFunction)devpoll_fileno, METH_NOARGS,
devpoll_fileno_doc},
{NULL, NULL} /* sentinel */
};
static PyGetSetDef devpoll_getsetlist[] = {
{"closed", (getter)devpoll_get_closed, NULL,
"True if the devpoll object is closed"},
{0},
};
static devpollObject *
newDevPollObject(void)
{
devpollObject *self;
int fd_devpoll, limit_result;
struct pollfd *fds;
struct rlimit limit;
Py_BEGIN_ALLOW_THREADS
/*
** If we try to process more that getrlimit()
** fds, the kernel will give an error, so
** we set the limit here. It is a dynamic
** value, because we can change rlimit() anytime.
*/
limit_result = getrlimit(RLIMIT_NOFILE, &limit);
if (limit_result != -1)
fd_devpoll = _Py_open("/dev/poll", O_RDWR);
Py_END_ALLOW_THREADS
if (limit_result == -1) {
PyErr_SetFromErrno(PyExc_OSError);
return NULL;
}
if (fd_devpoll == -1) {
PyErr_SetFromErrnoWithFilename(PyExc_IOError, "/dev/poll");
return NULL;
}
fds = PyMem_NEW(struct pollfd, limit.rlim_cur);
if (fds == NULL) {
close(fd_devpoll);
PyErr_NoMemory();
return NULL;
}
self = PyObject_New(devpollObject, &devpoll_Type);
if (self == NULL) {
close(fd_devpoll);
PyMem_DEL(fds);
return NULL;
}
self->fd_devpoll = fd_devpoll;
self->max_n_fds = limit.rlim_cur;
self->n_fds = 0;
self->fds = fds;
return self;
}
static void
devpoll_dealloc(devpollObject *self)
{
(void)devpoll_internal_close(self);
PyMem_DEL(self->fds);
PyObject_Del(self);
}
static PyTypeObject devpoll_Type = {
/* The ob_type field must be initialized in the module init function
* to be portable to Windows without using C++. */
PyVarObject_HEAD_INIT(NULL, 0)
"select.devpoll", /*tp_name*/
sizeof(devpollObject), /*tp_basicsize*/
0, /*tp_itemsize*/
/* methods */
(destructor)devpoll_dealloc, /*tp_dealloc*/
0, /*tp_print*/
0, /*tp_getattr*/
0, /*tp_setattr*/
0, /*tp_reserved*/
0, /*tp_repr*/
0, /*tp_as_number*/
0, /*tp_as_sequence*/
0, /*tp_as_mapping*/
0, /*tp_hash*/
0, /*tp_call*/
0, /*tp_str*/
0, /*tp_getattro*/
0, /*tp_setattro*/
0, /*tp_as_buffer*/
Py_TPFLAGS_DEFAULT, /*tp_flags*/
0, /*tp_doc*/
0, /*tp_traverse*/
0, /*tp_clear*/
0, /*tp_richcompare*/
0, /*tp_weaklistoffset*/
0, /*tp_iter*/
0, /*tp_iternext*/
devpoll_methods, /*tp_methods*/
0, /* tp_members */
devpoll_getsetlist, /* tp_getset */
};
#endif /* HAVE_SYS_DEVPOLL_H */
PyDoc_STRVAR(poll_doc,
"Returns a polling object, which supports registering and\n\
unregistering file descriptors, and then polling them for I/O events.");
static PyObject *
select_poll(PyObject *self, PyObject *unused)
{
return (PyObject *)newPollObject();
}
#ifdef HAVE_SYS_DEVPOLL_H
PyDoc_STRVAR(devpoll_doc,
"Returns a polling object, which supports registering and\n\
unregistering file descriptors, and then polling them for I/O events.");
static PyObject *
select_devpoll(PyObject *self, PyObject *unused)
{
return (PyObject *)newDevPollObject();
}
#endif
#ifdef __APPLE__
/*
* On some systems poll() sets errno on invalid file descriptors. We test
* for this at runtime because this bug may be fixed or introduced between
* OS releases.
*/
static int select_have_broken_poll(void)
{
int poll_test;
int filedes[2];
struct pollfd poll_struct = { 0, POLLIN|POLLPRI|POLLOUT, 0 };
/* Create a file descriptor to make invalid */
if (pipe(filedes) < 0) {
return 1;
}
poll_struct.fd = filedes[0];
close(filedes[0]);
close(filedes[1]);
poll_test = poll(&poll_struct, 1, 0);
if (poll_test < 0) {
return 1;
} else if (poll_test == 0 && poll_struct.revents != POLLNVAL) {
return 1;
}
return 0;
}
#endif /* __APPLE__ */
#endif /* HAVE_POLL */
#ifdef HAVE_EPOLL
/* **************************************************************************
* epoll interface for Linux 2.6
*
* Written by Christian Heimes
* Inspired by Twisted's _epoll.pyx and select.poll()
*/
#ifdef HAVE_SYS_EPOLL_H
#include <sys/epoll.h>
#endif
typedef struct {
PyObject_HEAD
SOCKET epfd; /* epoll control file descriptor */
} pyEpoll_Object;
static PyTypeObject pyEpoll_Type;
#define pyepoll_CHECK(op) (PyObject_TypeCheck((op), &pyEpoll_Type))
static PyObject *
pyepoll_err_closed(void)
{
PyErr_SetString(PyExc_ValueError, "I/O operation on closed epoll object");
return NULL;
}
static int
pyepoll_internal_close(pyEpoll_Object *self)
{
int save_errno = 0;
if (self->epfd >= 0) {
int epfd = self->epfd;
self->epfd = -1;
Py_BEGIN_ALLOW_THREADS
if (close(epfd) < 0)
save_errno = errno;
Py_END_ALLOW_THREADS
}
return save_errno;
}
static PyObject *
newPyEpoll_Object(PyTypeObject *type, int sizehint, int flags, SOCKET fd)
{
pyEpoll_Object *self;
assert(type != NULL && type->tp_alloc != NULL);
self = (pyEpoll_Object *) type->tp_alloc(type, 0);
if (self == NULL)
return NULL;
if (fd == -1) {
Py_BEGIN_ALLOW_THREADS
#ifdef HAVE_EPOLL_CREATE1
flags |= EPOLL_CLOEXEC;
if (flags)
self->epfd = epoll_create1(flags);
else
#endif
self->epfd = epoll_create(sizehint);
Py_END_ALLOW_THREADS
}
else {
self->epfd = fd;
}
if (self->epfd < 0) {
Py_DECREF(self);
PyErr_SetFromErrno(PyExc_OSError);
return NULL;
}
#ifndef HAVE_EPOLL_CREATE1
if (fd == -1 && _Py_set_inheritable(self->epfd, 0, NULL) < 0) {
Py_DECREF(self);
return NULL;
}
#endif
return (PyObject *)self;
}
static PyObject *
pyepoll_new(PyTypeObject *type, PyObject *args, PyObject *kwds)
{
int flags = 0, sizehint = FD_SETSIZE - 1;
static char *kwlist[] = {"sizehint", "flags", NULL};
if (!PyArg_ParseTupleAndKeywords(args, kwds, "|ii:epoll", kwlist,
&sizehint, &flags))
return NULL;
if (sizehint < 0) {
PyErr_SetString(PyExc_ValueError, "negative sizehint");
return NULL;
}
return newPyEpoll_Object(type, sizehint, flags, -1);
}
static void
pyepoll_dealloc(pyEpoll_Object *self)
{
(void)pyepoll_internal_close(self);
Py_TYPE(self)->tp_free(self);
}
static PyObject*
pyepoll_close(pyEpoll_Object *self)
{
errno = pyepoll_internal_close(self);
if (errno < 0) {
PyErr_SetFromErrno(PyExc_OSError);
return NULL;
}
Py_RETURN_NONE;
}
PyDoc_STRVAR(pyepoll_close_doc,
"close() -> None\n\
\n\
Close the epoll control file descriptor. Further operations on the epoll\n\
object will raise an exception.");
static PyObject*
pyepoll_get_closed(pyEpoll_Object *self)
{
if (self->epfd < 0)
Py_RETURN_TRUE;
else
Py_RETURN_FALSE;
}
static PyObject*
pyepoll_fileno(pyEpoll_Object *self)
{
if (self->epfd < 0)
return pyepoll_err_closed();
return PyLong_FromLong(self->epfd);
}
PyDoc_STRVAR(pyepoll_fileno_doc,
"fileno() -> int\n\
\n\
Return the epoll control file descriptor.");
static PyObject*
pyepoll_fromfd(PyObject *cls, PyObject *args)
{
SOCKET fd;
if (!PyArg_ParseTuple(args, "i:fromfd", &fd))
return NULL;
return newPyEpoll_Object((PyTypeObject*)cls, FD_SETSIZE - 1, 0, fd);
}
PyDoc_STRVAR(pyepoll_fromfd_doc,
"fromfd(fd) -> epoll\n\
\n\
Create an epoll object from a given control fd.");
static PyObject *
pyepoll_internal_ctl(int epfd, int op, PyObject *pfd, unsigned int events)
{
struct epoll_event ev;
int result;
int fd;
if (epfd < 0)
return pyepoll_err_closed();
fd = PyObject_AsFileDescriptor(pfd);
if (fd == -1) {
return NULL;
}
switch (op) {
case EPOLL_CTL_ADD:
case EPOLL_CTL_MOD:
ev.events = events;
ev.data.fd = fd;
Py_BEGIN_ALLOW_THREADS
result = epoll_ctl(epfd, op, fd, &ev);
Py_END_ALLOW_THREADS
break;
case EPOLL_CTL_DEL:
/* In kernel versions before 2.6.9, the EPOLL_CTL_DEL
* operation required a non-NULL pointer in event, even
* though this argument is ignored. */
Py_BEGIN_ALLOW_THREADS
result = epoll_ctl(epfd, op, fd, &ev);
if (errno == EBADF) {
/* fd already closed */
result = 0;
errno = 0;
}
Py_END_ALLOW_THREADS
break;
default:
result = -1;
errno = EINVAL;
}
if (result < 0) {
PyErr_SetFromErrno(PyExc_OSError);
return NULL;
}
Py_RETURN_NONE;
}
static PyObject *
pyepoll_register(pyEpoll_Object *self, PyObject *args, PyObject *kwds)
{
PyObject *pfd;
unsigned int events = EPOLLIN | EPOLLOUT | EPOLLPRI;
static char *kwlist[] = {"fd", "eventmask", NULL};
if (!PyArg_ParseTupleAndKeywords(args, kwds, "O|I:register", kwlist,
&pfd, &events)) {
return NULL;
}
return pyepoll_internal_ctl(self->epfd, EPOLL_CTL_ADD, pfd, events);
}
PyDoc_STRVAR(pyepoll_register_doc,
"register(fd[, eventmask]) -> None\n\
\n\
Registers a new fd or raises an OSError if the fd is already registered.\n\
fd is the target file descriptor of the operation.\n\
events is a bit set composed of the various EPOLL constants; the default\n\
is EPOLL_IN | EPOLL_OUT | EPOLL_PRI.\n\
\n\
The epoll interface supports all file descriptors that support poll.");
static PyObject *
pyepoll_modify(pyEpoll_Object *self, PyObject *args, PyObject *kwds)
{
PyObject *pfd;
unsigned int events;
static char *kwlist[] = {"fd", "eventmask", NULL};
if (!PyArg_ParseTupleAndKeywords(args, kwds, "OI:modify", kwlist,
&pfd, &events)) {
return NULL;
}
return pyepoll_internal_ctl(self->epfd, EPOLL_CTL_MOD, pfd, events);
}
PyDoc_STRVAR(pyepoll_modify_doc,
"modify(fd, eventmask) -> None\n\
\n\
fd is the target file descriptor of the operation\n\
events is a bit set composed of the various EPOLL constants");
static PyObject *
pyepoll_unregister(pyEpoll_Object *self, PyObject *args, PyObject *kwds)
{
PyObject *pfd;
static char *kwlist[] = {"fd", NULL};
if (!PyArg_ParseTupleAndKeywords(args, kwds, "O:unregister", kwlist,
&pfd)) {
return NULL;
}
return pyepoll_internal_ctl(self->epfd, EPOLL_CTL_DEL, pfd, 0);
}
PyDoc_STRVAR(pyepoll_unregister_doc,
"unregister(fd) -> None\n\
\n\
fd is the target file descriptor of the operation.");
static PyObject *
pyepoll_poll(pyEpoll_Object *self, PyObject *args, PyObject *kwds)
{
double dtimeout = -1.;
int timeout;
int maxevents = -1;
int nfds, i;
PyObject *elist = NULL, *etuple = NULL;
struct epoll_event *evs = NULL;
static char *kwlist[] = {"timeout", "maxevents", NULL};
if (self->epfd < 0)
return pyepoll_err_closed();
if (!PyArg_ParseTupleAndKeywords(args, kwds, "|di:poll", kwlist,
&dtimeout, &maxevents)) {
return NULL;
}
if (dtimeout < 0) {
timeout = -1;
}
else if (dtimeout * 1000.0 > INT_MAX) {
PyErr_SetString(PyExc_OverflowError,
"timeout is too large");
return NULL;
}
else {
/* epoll_wait() has a resolution of 1 millisecond, round away from zero
to wait *at least* dtimeout seconds. */
timeout = (int)ceil(dtimeout * 1000.0);
}
if (maxevents == -1) {
maxevents = FD_SETSIZE-1;
}
else if (maxevents < 1) {
PyErr_Format(PyExc_ValueError,
"maxevents must be greater than 0, got %d",
maxevents);
return NULL;
}
evs = PyMem_New(struct epoll_event, maxevents);
if (evs == NULL) {
PyErr_NoMemory();
return NULL;
}
Py_BEGIN_ALLOW_THREADS
nfds = epoll_wait(self->epfd, evs, maxevents, timeout);
Py_END_ALLOW_THREADS
if (nfds < 0) {
PyErr_SetFromErrno(PyExc_OSError);
goto error;
}
elist = PyList_New(nfds);
if (elist == NULL) {
goto error;
}
for (i = 0; i < nfds; i++) {
etuple = Py_BuildValue("iI", evs[i].data.fd, evs[i].events);
if (etuple == NULL) {
Py_CLEAR(elist);
goto error;
}
PyList_SET_ITEM(elist, i, etuple);
}
error:
PyMem_Free(evs);
return elist;
}
PyDoc_STRVAR(pyepoll_poll_doc,
"poll([timeout=-1[, maxevents=-1]]) -> [(fd, events), (...)]\n\
\n\
Wait for events on the epoll file descriptor for a maximum time of timeout\n\
in seconds (as float). -1 makes poll wait indefinitely.\n\
Up to maxevents are returned to the caller.");
static PyObject *
pyepoll_enter(pyEpoll_Object *self, PyObject *args)
{
if (self->epfd < 0)
return pyepoll_err_closed();
Py_INCREF(self);
return (PyObject *)self;
}
static PyObject *
pyepoll_exit(PyObject *self, PyObject *args)
{
_Py_IDENTIFIER(close);
return _PyObject_CallMethodId(self, &PyId_close, NULL);
}
static PyMethodDef pyepoll_methods[] = {
{"fromfd", (PyCFunction)pyepoll_fromfd,
METH_VARARGS | METH_CLASS, pyepoll_fromfd_doc},
{"close", (PyCFunction)pyepoll_close, METH_NOARGS,
pyepoll_close_doc},
{"fileno", (PyCFunction)pyepoll_fileno, METH_NOARGS,
pyepoll_fileno_doc},
{"modify", (PyCFunction)pyepoll_modify,
METH_VARARGS | METH_KEYWORDS, pyepoll_modify_doc},
{"register", (PyCFunction)pyepoll_register,
METH_VARARGS | METH_KEYWORDS, pyepoll_register_doc},
{"unregister", (PyCFunction)pyepoll_unregister,
METH_VARARGS | METH_KEYWORDS, pyepoll_unregister_doc},
{"poll", (PyCFunction)pyepoll_poll,
METH_VARARGS | METH_KEYWORDS, pyepoll_poll_doc},
{"__enter__", (PyCFunction)pyepoll_enter, METH_NOARGS,
NULL},
{"__exit__", (PyCFunction)pyepoll_exit, METH_VARARGS,
NULL},
{NULL, NULL},
};
static PyGetSetDef pyepoll_getsetlist[] = {
{"closed", (getter)pyepoll_get_closed, NULL,
"True if the epoll handler is closed"},
{0},
};
PyDoc_STRVAR(pyepoll_doc,
"select.epoll(sizehint=-1, flags=0)\n\
\n\
Returns an epolling object\n\
\n\
sizehint must be a positive integer or -1 for the default size. The\n\
sizehint is used to optimize internal data structures. It doesn't limit\n\
the maximum number of monitored events.");
static PyTypeObject pyEpoll_Type = {
PyVarObject_HEAD_INIT(NULL, 0)
"select.epoll", /* tp_name */
sizeof(pyEpoll_Object), /* tp_basicsize */
0, /* tp_itemsize */
(destructor)pyepoll_dealloc, /* tp_dealloc */
0, /* tp_print */
0, /* tp_getattr */
0, /* tp_setattr */
0, /* tp_reserved */
0, /* tp_repr */
0, /* tp_as_number */
0, /* tp_as_sequence */
0, /* tp_as_mapping */
0, /* tp_hash */
0, /* tp_call */
0, /* tp_str */
PyObject_GenericGetAttr, /* tp_getattro */
0, /* tp_setattro */
0, /* tp_as_buffer */
Py_TPFLAGS_DEFAULT, /* tp_flags */
pyepoll_doc, /* tp_doc */
0, /* tp_traverse */
0, /* tp_clear */
0, /* tp_richcompare */
0, /* tp_weaklistoffset */
0, /* tp_iter */
0, /* tp_iternext */
pyepoll_methods, /* tp_methods */
0, /* tp_members */
pyepoll_getsetlist, /* tp_getset */
0, /* tp_base */
0, /* tp_dict */
0, /* tp_descr_get */
0, /* tp_descr_set */
0, /* tp_dictoffset */
0, /* tp_init */
0, /* tp_alloc */
pyepoll_new, /* tp_new */
0, /* tp_free */
};
#endif /* HAVE_EPOLL */
#ifdef HAVE_KQUEUE
/* **************************************************************************
* kqueue interface for BSD
*
* Copyright (c) 2000 Doug White, 2006 James Knight, 2007 Christian Heimes
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#ifdef HAVE_SYS_EVENT_H
#include <sys/event.h>
#endif
PyDoc_STRVAR(kqueue_event_doc,
"kevent(ident, filter=KQ_FILTER_READ, flags=KQ_EV_ADD, fflags=0, data=0, udata=0)\n\
\n\
This object is the equivalent of the struct kevent for the C API.\n\
\n\
See the kqueue manpage for more detailed information about the meaning\n\
of the arguments.\n\
\n\
One minor note: while you might hope that udata could store a\n\
reference to a python object, it cannot, because it is impossible to\n\
keep a proper reference count of the object once it's passed into the\n\
kernel. Therefore, I have restricted it to only storing an integer. I\n\
recommend ignoring it and simply using the 'ident' field to key off\n\
of. You could also set up a dictionary on the python side to store a\n\
udata->object mapping.");
typedef struct {
PyObject_HEAD
struct kevent e;
} kqueue_event_Object;
static PyTypeObject kqueue_event_Type;
#define kqueue_event_Check(op) (PyObject_TypeCheck((op), &kqueue_event_Type))
typedef struct {
PyObject_HEAD
SOCKET kqfd; /* kqueue control fd */
} kqueue_queue_Object;
static PyTypeObject kqueue_queue_Type;
#define kqueue_queue_Check(op) (PyObject_TypeCheck((op), &kqueue_queue_Type))
#if (SIZEOF_UINTPTR_T != SIZEOF_VOID_P)
# error uintptr_t does not match void *!
#elif (SIZEOF_UINTPTR_T == SIZEOF_LONG_LONG)
# define T_UINTPTRT T_ULONGLONG
# define T_INTPTRT T_LONGLONG
# define PyLong_AsUintptr_t PyLong_AsUnsignedLongLong
# define UINTPTRT_FMT_UNIT "K"
# define INTPTRT_FMT_UNIT "L"
#elif (SIZEOF_UINTPTR_T == SIZEOF_LONG)
# define T_UINTPTRT T_ULONG
# define T_INTPTRT T_LONG
# define PyLong_AsUintptr_t PyLong_AsUnsignedLong
# define UINTPTRT_FMT_UNIT "k"
# define INTPTRT_FMT_UNIT "l"
#elif (SIZEOF_UINTPTR_T == SIZEOF_INT)
# define T_UINTPTRT T_UINT
# define T_INTPTRT T_INT
# define PyLong_AsUintptr_t PyLong_AsUnsignedLong
# define UINTPTRT_FMT_UNIT "I"
# define INTPTRT_FMT_UNIT "i"
#else
# error uintptr_t does not match int, long, or long long!
#endif
/*
* kevent is not standard and its members vary across BSDs.
*/
#if !defined(__OpenBSD__)
# define IDENT_TYPE T_UINTPTRT
# define IDENT_CAST Py_intptr_t
# define DATA_TYPE T_INTPTRT
# define DATA_FMT_UNIT INTPTRT_FMT_UNIT
# define IDENT_AsType PyLong_AsUintptr_t
#else
# define IDENT_TYPE T_UINT
# define IDENT_CAST int
# define DATA_TYPE T_INT
# define DATA_FMT_UNIT "i"
# define IDENT_AsType PyLong_AsUnsignedLong
#endif
/* Unfortunately, we can't store python objects in udata, because
* kevents in the kernel can be removed without warning, which would
* forever lose the refcount on the object stored with it.
*/
#define KQ_OFF(x) offsetof(kqueue_event_Object, x)
static struct PyMemberDef kqueue_event_members[] = {
{"ident", IDENT_TYPE, KQ_OFF(e.ident)},
{"filter", T_SHORT, KQ_OFF(e.filter)},
{"flags", T_USHORT, KQ_OFF(e.flags)},
{"fflags", T_UINT, KQ_OFF(e.fflags)},
{"data", DATA_TYPE, KQ_OFF(e.data)},
{"udata", T_UINTPTRT, KQ_OFF(e.udata)},
{NULL} /* Sentinel */
};
#undef KQ_OFF
static PyObject *
kqueue_event_repr(kqueue_event_Object *s)
{
char buf[1024];
PyOS_snprintf(
buf, sizeof(buf),
"<select.kevent ident=%zu filter=%d flags=0x%x fflags=0x%x "
"data=0x%zd udata=%p>",
(size_t)(s->e.ident), s->e.filter, s->e.flags,
s->e.fflags, (Py_ssize_t)(s->e.data), s->e.udata);
return PyUnicode_FromString(buf);
}
static int
kqueue_event_init(kqueue_event_Object *self, PyObject *args, PyObject *kwds)
{
PyObject *pfd;
static char *kwlist[] = {"ident", "filter", "flags", "fflags",
"data", "udata", NULL};
static char *fmt = "O|hHI" DATA_FMT_UNIT UINTPTRT_FMT_UNIT ":kevent";
EV_SET(&(self->e), 0, EVFILT_READ, EV_ADD, 0, 0, 0); /* defaults */
if (!PyArg_ParseTupleAndKeywords(args, kwds, fmt, kwlist,
&pfd, &(self->e.filter), &(self->e.flags),
&(self->e.fflags), &(self->e.data), &(self->e.udata))) {
return -1;
}
if (PyLong_Check(pfd)
#if IDENT_TYPE == T_UINT
&& PyLong_AsUnsignedLong(pfd) <= UINT_MAX
#endif
) {
self->e.ident = IDENT_AsType(pfd);
}
else {
self->e.ident = PyObject_AsFileDescriptor(pfd);
}
if (PyErr_Occurred()) {
return -1;
}
return 0;
}
static PyObject *
kqueue_event_richcompare(kqueue_event_Object *s, kqueue_event_Object *o,
int op)
{
Py_intptr_t result = 0;
if (!kqueue_event_Check(o)) {
if (op == Py_EQ || op == Py_NE) {
PyObject *res = op == Py_EQ ? Py_False : Py_True;
Py_INCREF(res);
return res;
}
PyErr_Format(PyExc_TypeError,
"can't compare %.200s to %.200s",
Py_TYPE(s)->tp_name, Py_TYPE(o)->tp_name);
return NULL;
}
if (((result = (IDENT_CAST)(s->e.ident - o->e.ident)) == 0) &&
((result = s->e.filter - o->e.filter) == 0) &&
((result = s->e.flags - o->e.flags) == 0) &&
((result = (int)(s->e.fflags - o->e.fflags)) == 0) &&
((result = s->e.data - o->e.data) == 0) &&
((result = s->e.udata - o->e.udata) == 0)
) {
result = 0;
}
switch (op) {
case Py_EQ:
result = (result == 0);
break;
case Py_NE:
result = (result != 0);
break;
case Py_LE:
result = (result <= 0);
break;
case Py_GE:
result = (result >= 0);
break;
case Py_LT:
result = (result < 0);
break;
case Py_GT:
result = (result > 0);
break;
}
return PyBool_FromLong((long)result);
}
static PyTypeObject kqueue_event_Type = {
PyVarObject_HEAD_INIT(NULL, 0)
"select.kevent", /* tp_name */
sizeof(kqueue_event_Object), /* tp_basicsize */
0, /* tp_itemsize */
0, /* tp_dealloc */
0, /* tp_print */
0, /* tp_getattr */
0, /* tp_setattr */
0, /* tp_reserved */
(reprfunc)kqueue_event_repr, /* tp_repr */
0, /* tp_as_number */
0, /* tp_as_sequence */
0, /* tp_as_mapping */
0, /* tp_hash */
0, /* tp_call */
0, /* tp_str */
0, /* tp_getattro */
0, /* tp_setattro */
0, /* tp_as_buffer */
Py_TPFLAGS_DEFAULT, /* tp_flags */
kqueue_event_doc, /* tp_doc */
0, /* tp_traverse */
0, /* tp_clear */
(richcmpfunc)kqueue_event_richcompare, /* tp_richcompare */
0, /* tp_weaklistoffset */
0, /* tp_iter */
0, /* tp_iternext */
0, /* tp_methods */
kqueue_event_members, /* tp_members */
0, /* tp_getset */
0, /* tp_base */
0, /* tp_dict */
0, /* tp_descr_get */
0, /* tp_descr_set */
0, /* tp_dictoffset */
(initproc)kqueue_event_init, /* tp_init */
0, /* tp_alloc */
0, /* tp_new */
0, /* tp_free */
};
static PyObject *
kqueue_queue_err_closed(void)
{
PyErr_SetString(PyExc_ValueError, "I/O operation on closed kqueue object");
return NULL;
}
static int
kqueue_queue_internal_close(kqueue_queue_Object *self)
{
int save_errno = 0;
if (self->kqfd >= 0) {
int kqfd = self->kqfd;
self->kqfd = -1;
Py_BEGIN_ALLOW_THREADS
if (close(kqfd) < 0)
save_errno = errno;
Py_END_ALLOW_THREADS
}
return save_errno;
}
static PyObject *
newKqueue_Object(PyTypeObject *type, SOCKET fd)
{
kqueue_queue_Object *self;
assert(type != NULL && type->tp_alloc != NULL);
self = (kqueue_queue_Object *) type->tp_alloc(type, 0);
if (self == NULL) {
return NULL;
}
if (fd == -1) {
Py_BEGIN_ALLOW_THREADS
self->kqfd = kqueue();
Py_END_ALLOW_THREADS
}
else {
self->kqfd = fd;
}
if (self->kqfd < 0) {
Py_DECREF(self);
PyErr_SetFromErrno(PyExc_OSError);
return NULL;
}
if (fd == -1) {
if (_Py_set_inheritable(self->kqfd, 0, NULL) < 0) {
Py_DECREF(self);
return NULL;
}
}
return (PyObject *)self;
}
static PyObject *
kqueue_queue_new(PyTypeObject *type, PyObject *args, PyObject *kwds)
{
if ((args != NULL && PyObject_Size(args)) ||
(kwds != NULL && PyObject_Size(kwds))) {
PyErr_SetString(PyExc_ValueError,
"select.kqueue doesn't accept arguments");
return NULL;
}
return newKqueue_Object(type, -1);
}
static void
kqueue_queue_dealloc(kqueue_queue_Object *self)
{
kqueue_queue_internal_close(self);
Py_TYPE(self)->tp_free(self);
}
static PyObject*
kqueue_queue_close(kqueue_queue_Object *self)
{
errno = kqueue_queue_internal_close(self);
if (errno < 0) {
PyErr_SetFromErrno(PyExc_OSError);
return NULL;
}
Py_RETURN_NONE;
}
PyDoc_STRVAR(kqueue_queue_close_doc,
"close() -> None\n\
\n\
Close the kqueue control file descriptor. Further operations on the kqueue\n\
object will raise an exception.");
static PyObject*
kqueue_queue_get_closed(kqueue_queue_Object *self)
{
if (self->kqfd < 0)
Py_RETURN_TRUE;
else
Py_RETURN_FALSE;
}
static PyObject*
kqueue_queue_fileno(kqueue_queue_Object *self)
{
if (self->kqfd < 0)
return kqueue_queue_err_closed();
return PyLong_FromLong(self->kqfd);
}
PyDoc_STRVAR(kqueue_queue_fileno_doc,
"fileno() -> int\n\
\n\
Return the kqueue control file descriptor.");
static PyObject*
kqueue_queue_fromfd(PyObject *cls, PyObject *args)
{
SOCKET fd;
if (!PyArg_ParseTuple(args, "i:fromfd", &fd))
return NULL;
return newKqueue_Object((PyTypeObject*)cls, fd);
}
PyDoc_STRVAR(kqueue_queue_fromfd_doc,
"fromfd(fd) -> kqueue\n\
\n\
Create a kqueue object from a given control fd.");
static PyObject *
kqueue_queue_control(kqueue_queue_Object *self, PyObject *args)
{
int nevents = 0;
int gotevents = 0;
int nchanges = 0;
int i = 0;
PyObject *otimeout = NULL;
PyObject *ch = NULL;
PyObject *it = NULL, *ei = NULL;
PyObject *result = NULL;
struct kevent *evl = NULL;
struct kevent *chl = NULL;
struct timespec timeout;
struct timespec *ptimeoutspec;
if (self->kqfd < 0)
return kqueue_queue_err_closed();
if (!PyArg_ParseTuple(args, "Oi|O:control", &ch, &nevents, &otimeout))
return NULL;
if (nevents < 0) {
PyErr_Format(PyExc_ValueError,
"Length of eventlist must be 0 or positive, got %d",
nevents);
return NULL;
}
if (otimeout == Py_None || otimeout == NULL) {
ptimeoutspec = NULL;
}
else if (PyNumber_Check(otimeout)) {
if (_PyTime_ObjectToTimespec(otimeout, &timeout.tv_sec,
&timeout.tv_nsec, _PyTime_ROUND_UP) == -1)
return NULL;
if (timeout.tv_sec < 0) {
PyErr_SetString(PyExc_ValueError,
"timeout must be positive or None");
return NULL;
}
ptimeoutspec = &timeout;
}
else {
PyErr_Format(PyExc_TypeError,
"timeout argument must be an number "
"or None, got %.200s",
Py_TYPE(otimeout)->tp_name);
return NULL;
}
if (ch != NULL && ch != Py_None) {
it = PyObject_GetIter(ch);
if (it == NULL) {
PyErr_SetString(PyExc_TypeError,
"changelist is not iterable");
return NULL;
}
nchanges = PyObject_Size(ch);
if (nchanges < 0) {
goto error;
}
chl = PyMem_New(struct kevent, nchanges);
if (chl == NULL) {
PyErr_NoMemory();
goto error;
}
i = 0;
while ((ei = PyIter_Next(it)) != NULL) {
if (!kqueue_event_Check(ei)) {
Py_DECREF(ei);
PyErr_SetString(PyExc_TypeError,
"changelist must be an iterable of "
"select.kevent objects");
goto error;
} else {
chl[i++] = ((kqueue_event_Object *)ei)->e;
}
Py_DECREF(ei);
}
}
Py_CLEAR(it);
/* event list */
if (nevents) {
evl = PyMem_New(struct kevent, nevents);
if (evl == NULL) {
PyErr_NoMemory();
goto error;
}
}
Py_BEGIN_ALLOW_THREADS
gotevents = kevent(self->kqfd, chl, nchanges,
evl, nevents, ptimeoutspec);
Py_END_ALLOW_THREADS
if (gotevents == -1) {
PyErr_SetFromErrno(PyExc_OSError);
goto error;
}
result = PyList_New(gotevents);
if (result == NULL) {
goto error;
}
for (i = 0; i < gotevents; i++) {
kqueue_event_Object *ch;
ch = PyObject_New(kqueue_event_Object, &kqueue_event_Type);
if (ch == NULL) {
goto error;
}
ch->e = evl[i];
PyList_SET_ITEM(result, i, (PyObject *)ch);
}
PyMem_Free(chl);
PyMem_Free(evl);
return result;
error:
PyMem_Free(chl);
PyMem_Free(evl);
Py_XDECREF(result);
Py_XDECREF(it);
return NULL;
}
PyDoc_STRVAR(kqueue_queue_control_doc,
"control(changelist, max_events[, timeout=None]) -> eventlist\n\
\n\
Calls the kernel kevent function.\n\
- changelist must be a list of kevent objects describing the changes\n\
to be made to the kernel's watch list or None.\n\
- max_events lets you specify the maximum number of events that the\n\
kernel will return.\n\
- timeout is the maximum time to wait in seconds, or else None,\n\
to wait forever. timeout accepts floats for smaller timeouts, too.");
static PyMethodDef kqueue_queue_methods[] = {
{"fromfd", (PyCFunction)kqueue_queue_fromfd,
METH_VARARGS | METH_CLASS, kqueue_queue_fromfd_doc},
{"close", (PyCFunction)kqueue_queue_close, METH_NOARGS,
kqueue_queue_close_doc},
{"fileno", (PyCFunction)kqueue_queue_fileno, METH_NOARGS,
kqueue_queue_fileno_doc},
{"control", (PyCFunction)kqueue_queue_control,
METH_VARARGS , kqueue_queue_control_doc},
{NULL, NULL},
};
static PyGetSetDef kqueue_queue_getsetlist[] = {
{"closed", (getter)kqueue_queue_get_closed, NULL,
"True if the kqueue handler is closed"},
{0},
};
PyDoc_STRVAR(kqueue_queue_doc,
"Kqueue syscall wrapper.\n\
\n\
For example, to start watching a socket for input:\n\
>>> kq = kqueue()\n\
>>> sock = socket()\n\
>>> sock.connect((host, port))\n\
>>> kq.control([kevent(sock, KQ_FILTER_WRITE, KQ_EV_ADD)], 0)\n\
\n\
To wait one second for it to become writeable:\n\
>>> kq.control(None, 1, 1000)\n\
\n\
To stop listening:\n\
>>> kq.control([kevent(sock, KQ_FILTER_WRITE, KQ_EV_DELETE)], 0)");
static PyTypeObject kqueue_queue_Type = {
PyVarObject_HEAD_INIT(NULL, 0)
"select.kqueue", /* tp_name */
sizeof(kqueue_queue_Object), /* tp_basicsize */
0, /* tp_itemsize */
(destructor)kqueue_queue_dealloc, /* tp_dealloc */
0, /* tp_print */
0, /* tp_getattr */
0, /* tp_setattr */
0, /* tp_reserved */
0, /* tp_repr */
0, /* tp_as_number */
0, /* tp_as_sequence */
0, /* tp_as_mapping */
0, /* tp_hash */
0, /* tp_call */
0, /* tp_str */
0, /* tp_getattro */
0, /* tp_setattro */
0, /* tp_as_buffer */
Py_TPFLAGS_DEFAULT, /* tp_flags */
kqueue_queue_doc, /* tp_doc */
0, /* tp_traverse */
0, /* tp_clear */
0, /* tp_richcompare */
0, /* tp_weaklistoffset */
0, /* tp_iter */
0, /* tp_iternext */
kqueue_queue_methods, /* tp_methods */
0, /* tp_members */
kqueue_queue_getsetlist, /* tp_getset */
0, /* tp_base */
0, /* tp_dict */
0, /* tp_descr_get */
0, /* tp_descr_set */
0, /* tp_dictoffset */
0, /* tp_init */
0, /* tp_alloc */
kqueue_queue_new, /* tp_new */
0, /* tp_free */
};
#endif /* HAVE_KQUEUE */
/* ************************************************************************ */
PyDoc_STRVAR(select_doc,
"select(rlist, wlist, xlist[, timeout]) -> (rlist, wlist, xlist)\n\
\n\
Wait until one or more file descriptors are ready for some kind of I/O.\n\
The first three arguments are sequences of file descriptors to be waited for:\n\
rlist -- wait until ready for reading\n\
wlist -- wait until ready for writing\n\
xlist -- wait for an ``exceptional condition''\n\
If only one kind of condition is required, pass [] for the other lists.\n\
A file descriptor is either a socket or file object, or a small integer\n\
gotten from a fileno() method call on one of those.\n\
\n\
The optional 4th argument specifies a timeout in seconds; it may be\n\
a floating point number to specify fractions of seconds. If it is absent\n\
or None, the call will never time out.\n\
\n\
The return value is a tuple of three lists corresponding to the first three\n\
arguments; each contains the subset of the corresponding file descriptors\n\
that are ready.\n\
\n\
*** IMPORTANT NOTICE ***\n\
On Windows, only sockets are supported; on Unix, all file\n\
descriptors can be used.");
static PyMethodDef select_methods[] = {
{"select", select_select, METH_VARARGS, select_doc},
#if defined(HAVE_POLL) && !defined(HAVE_BROKEN_POLL)
{"poll", select_poll, METH_NOARGS, poll_doc},
#endif /* HAVE_POLL */
#ifdef HAVE_SYS_DEVPOLL_H
{"devpoll", select_devpoll, METH_NOARGS, devpoll_doc},
#endif
{0, 0}, /* sentinel */
};
PyDoc_STRVAR(module_doc,
"This module supports asynchronous I/O on multiple file descriptors.\n\
\n\
*** IMPORTANT NOTICE ***\n\
On Windows, only sockets are supported; on Unix, all file descriptors.");
static struct PyModuleDef selectmodule = {
PyModuleDef_HEAD_INIT,
"select",
module_doc,
-1,
select_methods,
NULL,
NULL,
NULL,
NULL
};
PyMODINIT_FUNC
PyInit_select(void)
{
PyObject *m;
m = PyModule_Create(&selectmodule);
if (m == NULL)
return NULL;
Py_INCREF(PyExc_OSError);
PyModule_AddObject(m, "error", PyExc_OSError);
#ifdef PIPE_BUF
#ifdef HAVE_BROKEN_PIPE_BUF
#undef PIPE_BUF
#define PIPE_BUF 512
#endif
PyModule_AddIntMacro(m, PIPE_BUF);
#endif
#if defined(HAVE_POLL) && !defined(HAVE_BROKEN_POLL)
#ifdef __APPLE__
if (select_have_broken_poll()) {
if (PyObject_DelAttrString(m, "poll") == -1) {
PyErr_Clear();
}
} else {
#else
{
#endif
if (PyType_Ready(&poll_Type) < 0)
return NULL;
PyModule_AddIntMacro(m, POLLIN);
PyModule_AddIntMacro(m, POLLPRI);
PyModule_AddIntMacro(m, POLLOUT);
PyModule_AddIntMacro(m, POLLERR);
PyModule_AddIntMacro(m, POLLHUP);
PyModule_AddIntMacro(m, POLLNVAL);
#ifdef POLLRDNORM
PyModule_AddIntMacro(m, POLLRDNORM);
#endif
#ifdef POLLRDBAND
PyModule_AddIntMacro(m, POLLRDBAND);
#endif
#ifdef POLLWRNORM
PyModule_AddIntMacro(m, POLLWRNORM);
#endif
#ifdef POLLWRBAND
PyModule_AddIntMacro(m, POLLWRBAND);
#endif
#ifdef POLLMSG
PyModule_AddIntMacro(m, POLLMSG);
#endif
}
#endif /* HAVE_POLL */
#ifdef HAVE_SYS_DEVPOLL_H
if (PyType_Ready(&devpoll_Type) < 0)
return NULL;
#endif
#ifdef HAVE_EPOLL
Py_TYPE(&pyEpoll_Type) = &PyType_Type;
if (PyType_Ready(&pyEpoll_Type) < 0)
return NULL;
Py_INCREF(&pyEpoll_Type);
PyModule_AddObject(m, "epoll", (PyObject *) &pyEpoll_Type);
PyModule_AddIntMacro(m, EPOLLIN);
PyModule_AddIntMacro(m, EPOLLOUT);
PyModule_AddIntMacro(m, EPOLLPRI);
PyModule_AddIntMacro(m, EPOLLERR);
PyModule_AddIntMacro(m, EPOLLHUP);
PyModule_AddIntMacro(m, EPOLLET);
#ifdef EPOLLONESHOT
/* Kernel 2.6.2+ */
PyModule_AddIntMacro(m, EPOLLONESHOT);
#endif
/* PyModule_AddIntConstant(m, "EPOLL_RDHUP", EPOLLRDHUP); */
#ifdef EPOLLRDNORM
PyModule_AddIntMacro(m, EPOLLRDNORM);
#endif
#ifdef EPOLLRDBAND
PyModule_AddIntMacro(m, EPOLLRDBAND);
#endif
#ifdef EPOLLWRNORM
PyModule_AddIntMacro(m, EPOLLWRNORM);
#endif
#ifdef EPOLLWRBAND
PyModule_AddIntMacro(m, EPOLLWRBAND);
#endif
#ifdef EPOLLMSG
PyModule_AddIntMacro(m, EPOLLMSG);
#endif
#ifdef EPOLL_CLOEXEC
PyModule_AddIntMacro(m, EPOLL_CLOEXEC);
#endif
#endif /* HAVE_EPOLL */
#ifdef HAVE_KQUEUE
kqueue_event_Type.tp_new = PyType_GenericNew;
Py_TYPE(&kqueue_event_Type) = &PyType_Type;
if(PyType_Ready(&kqueue_event_Type) < 0)
return NULL;
Py_INCREF(&kqueue_event_Type);
PyModule_AddObject(m, "kevent", (PyObject *)&kqueue_event_Type);
Py_TYPE(&kqueue_queue_Type) = &PyType_Type;
if(PyType_Ready(&kqueue_queue_Type) < 0)
return NULL;
Py_INCREF(&kqueue_queue_Type);
PyModule_AddObject(m, "kqueue", (PyObject *)&kqueue_queue_Type);
/* event filters */
PyModule_AddIntConstant(m, "KQ_FILTER_READ", EVFILT_READ);
PyModule_AddIntConstant(m, "KQ_FILTER_WRITE", EVFILT_WRITE);
PyModule_AddIntConstant(m, "KQ_FILTER_AIO", EVFILT_AIO);
PyModule_AddIntConstant(m, "KQ_FILTER_VNODE", EVFILT_VNODE);
PyModule_AddIntConstant(m, "KQ_FILTER_PROC", EVFILT_PROC);
#ifdef EVFILT_NETDEV
PyModule_AddIntConstant(m, "KQ_FILTER_NETDEV", EVFILT_NETDEV);
#endif
PyModule_AddIntConstant(m, "KQ_FILTER_SIGNAL", EVFILT_SIGNAL);
PyModule_AddIntConstant(m, "KQ_FILTER_TIMER", EVFILT_TIMER);
/* event flags */
PyModule_AddIntConstant(m, "KQ_EV_ADD", EV_ADD);
PyModule_AddIntConstant(m, "KQ_EV_DELETE", EV_DELETE);
PyModule_AddIntConstant(m, "KQ_EV_ENABLE", EV_ENABLE);
PyModule_AddIntConstant(m, "KQ_EV_DISABLE", EV_DISABLE);
PyModule_AddIntConstant(m, "KQ_EV_ONESHOT", EV_ONESHOT);
PyModule_AddIntConstant(m, "KQ_EV_CLEAR", EV_CLEAR);
PyModule_AddIntConstant(m, "KQ_EV_SYSFLAGS", EV_SYSFLAGS);
PyModule_AddIntConstant(m, "KQ_EV_FLAG1", EV_FLAG1);
PyModule_AddIntConstant(m, "KQ_EV_EOF", EV_EOF);
PyModule_AddIntConstant(m, "KQ_EV_ERROR", EV_ERROR);
/* READ WRITE filter flag */
PyModule_AddIntConstant(m, "KQ_NOTE_LOWAT", NOTE_LOWAT);
/* VNODE filter flags */
PyModule_AddIntConstant(m, "KQ_NOTE_DELETE", NOTE_DELETE);
PyModule_AddIntConstant(m, "KQ_NOTE_WRITE", NOTE_WRITE);
PyModule_AddIntConstant(m, "KQ_NOTE_EXTEND", NOTE_EXTEND);
PyModule_AddIntConstant(m, "KQ_NOTE_ATTRIB", NOTE_ATTRIB);
PyModule_AddIntConstant(m, "KQ_NOTE_LINK", NOTE_LINK);
PyModule_AddIntConstant(m, "KQ_NOTE_RENAME", NOTE_RENAME);
PyModule_AddIntConstant(m, "KQ_NOTE_REVOKE", NOTE_REVOKE);
/* PROC filter flags */
PyModule_AddIntConstant(m, "KQ_NOTE_EXIT", NOTE_EXIT);
PyModule_AddIntConstant(m, "KQ_NOTE_FORK", NOTE_FORK);
PyModule_AddIntConstant(m, "KQ_NOTE_EXEC", NOTE_EXEC);
PyModule_AddIntConstant(m, "KQ_NOTE_PCTRLMASK", NOTE_PCTRLMASK);
PyModule_AddIntConstant(m, "KQ_NOTE_PDATAMASK", NOTE_PDATAMASK);
PyModule_AddIntConstant(m, "KQ_NOTE_TRACK", NOTE_TRACK);
PyModule_AddIntConstant(m, "KQ_NOTE_CHILD", NOTE_CHILD);
PyModule_AddIntConstant(m, "KQ_NOTE_TRACKERR", NOTE_TRACKERR);
/* NETDEV filter flags */
#ifdef EVFILT_NETDEV
PyModule_AddIntConstant(m, "KQ_NOTE_LINKUP", NOTE_LINKUP);
PyModule_AddIntConstant(m, "KQ_NOTE_LINKDOWN", NOTE_LINKDOWN);
PyModule_AddIntConstant(m, "KQ_NOTE_LINKINV", NOTE_LINKINV);
#endif
#endif /* HAVE_KQUEUE */
return m;
}
|
from graph import Graph
def test_maximum_flow():
""" [2] ----- [5]
/ + / | +
[1] [4] | [7]
+ / + | /
[3] ----- [6]
"""
edges = [
(1, 2, 18),
(1, 3, 10),
(2, 4, 7),
(2, 5, 6),
(3, 4, 2),
(3, 6, 8),
(4, 5, 10),
(4, 6, 10),
(5, 6, 16),
(5, 7, 9),
(6, 7, 18)
]
g = Graph(from_list=edges)
flow, g2 = g.maximum_flow(1, 7)
assert flow == 23, flow
def test_maximum_flow01():
edges = [
(1, 2, 1)
]
g = Graph(from_list=edges)
flow, g2 = g.maximum_flow(start=1, end=2)
assert flow == 1, flow
def test_maximum_flow02():
edges = [
(1, 2, 10),
(2, 3, 1), # bottleneck.
(3, 4, 10)
]
g = Graph(from_list=edges)
flow, g2 = g.maximum_flow(start=1, end=4)
assert flow == 1, flow
def test_maximum_flow03():
edges = [
(1, 2, 10),
(1, 3, 10),
(2, 4, 1), # bottleneck 1
(3, 5, 1), # bottleneck 2
(4, 6, 10),
(5, 6, 10)
]
g = Graph(from_list=edges)
flow, g2 = g.maximum_flow(start=1, end=6)
assert flow == 2, flow
def test_maximum_flow04():
edges = [
(1, 2, 10),
(1, 3, 10),
(2, 4, 1), # bottleneck 1
(2, 5, 1), # bottleneck 2
(3, 5, 1), # bottleneck 3
(3, 4, 1), # bottleneck 4
(4, 6, 10),
(5, 6, 10)
]
g = Graph(from_list=edges)
flow, g2 = g.maximum_flow(start=1, end=6)
assert flow == 4, flow
def test_maximum_flow05():
edges = [
(1, 2, 10),
(1, 3, 1),
(2, 3, 1)
]
g = Graph(from_list=edges)
flow, g2 = g.maximum_flow(start=1, end=3)
assert flow == 2, flow
def test_maximum_flow06():
edges = [
(1, 2, 1),
(1, 3, 1),
(2, 4, 1),
(3, 4, 1),
(4, 5, 2),
(5, 6, 1),
(5, 7, 1),
(6, 8, 1),
(7, 8, 1)
]
g = Graph(from_list=edges)
flow, g2 = g.maximum_flow(start=1, end=8)
assert flow == 2, flow
assert set(g2.edges()) == set(edges)
|
/**
* Layout component that queries for data
* with Gatsby's useStaticQuery component
*
* See: https://www.gatsbyjs.com/docs/use-static-query/
*/
import * as React from "react"
import PropTypes from "prop-types"
import { useStaticQuery, graphql } from "gatsby"
import Header from "./Header"
import "./layout.css"
const Layout = ({ children }) => {
const data = useStaticQuery(graphql`
query SiteTitleQuery {
site {
siteMetadata {
title
}
}
}
`)
return (
<>
<Header siteTitle={data.site.siteMetadata?.title || `Title`} />
<div
style={{
margin: `0 auto`,
maxWidth: 960,
}}
>
<main>{children}</main>
</div>
</>
)
}
Layout.propTypes = {
children: PropTypes.node.isRequired,
}
export default Layout
|
module.exports = async () => {
/*******************************************
*** Set up
******************************************/
require("dotenv").config();
const Web3 = require("web3");
const HDWalletProvider = require("@truffle/hdwallet-provider");
// Contract abstraction
const truffleContract = require("truffle-contract");
const contract = truffleContract(
require("../build/contracts/BridgeBank.json")
);
/*******************************************
*** Constants
******************************************/
const NETWORK_ROPSTEN =
process.argv[4] === "--network" && process.argv[5] === "ropsten";
/*******************************************
*** Web3 provider
*** Set contract provider based on --network flag
******************************************/
let provider;
if (NETWORK_ROPSTEN) {
provider = new HDWalletProvider(
process.env.MNEMONIC,
"https://ropsten.infura.io/v3/".concat(process.env.INFURA_PROJECT_ID)
);
} else {
provider = new Web3.providers.HttpProvider(process.env.LOCAL_PROVIDER);
}
const web3 = new Web3(provider);
contract.setProvider(web3.currentProvider);
/*******************************************
*** Contract interaction
******************************************/
const address = await contract.deployed().then(function(instance) {
return instance.address;
});
return console.log("BridgeBank deployed contract address: ", address);
};
|
import React from 'react';
import { useNavigation } from '@react-navigation/native';
import styled from 'styled-components';
import { useDispatch, useSelector } from 'react-redux';
import * as Progress from 'react-native-progress';
import { pickFile } from './uploadMedia';
import { theme } from '../../../../constants/StyledComponentsTheme';
import { RoundCloseButton } from '../../../../components';
import noProfile from '../../../../assets/noProfile.png';
const UserImageContainer = styled.View`
flex: 1;
padding: 15px;
background-color: white;
border-width: 0.5px;
border-style: dotted;
border-color: ${props => props.theme.$lightGray};
`;
const Button = styled.TouchableHighlight`
flex: 1;
border-radius: ${props => props.theme.$smallBorderRadius}px;
`;
const ButtonContainer = styled.View`
flex: 1;
justify-content: center;
align-items: center;
`;
const UserImage = styled.Image`
height: 100%;
width: 100%;
border-radius: ${props => props.theme.$smallBorderRadius}px;
`;
const ProgressBarContainer = styled.View`
position: absolute;
height: auto;
width: auto;
justify-content: center;
align-items: center;
background-color: white;
border-radius: 50px;
`;
export default function PictureItem({ PictureItem }) {
const dispatch = useDispatch();
const navigation = useNavigation();
const { userImages } = useSelector(state => state.user.userData);
const imageSource = PictureItem.imageUrl ? { uri: PictureItem.imageUrl } : noProfile;
const customButtonStyle = {
position: 'absolute',
height: 40,
width: 40,
right: 0,
top: 0,
backgroundColor: 'white'
};
const handleDeletePicture = () => {
navigation.push('GenericYesNoModal', {
title: 'Excluir imagem?',
subtitle: 'Esta ação não pode ser desfeita!',
acceptText: 'Excluir',
denyText: 'Cancelar',
selectedMethod: 'genericYesNoModalDeleteUserImage',
selectedUserImageId: PictureItem.id
});
}
const DeleteImageButton = () => {
return PictureItem.imageUrl ? PictureItem.uploaded &&
<RoundCloseButton
customIconStyle={{ fontSize: 23, color: theme.$red }}
customButtonStyle={customButtonStyle}
onPress={handleDeletePicture}
/> : null
}
const UploadProgressBar = () => {
return PictureItem.progress > 0 ?
<ProgressBarContainer>
<Progress.Circle
progress={PictureItem.progress / 100}
color={theme.$primaryColor}
textStyle={{ fontSize: 12 }}
showsText
/>
</ProgressBarContainer> : null
}
const pickImages = () => pickFile(userImages.length, dispatch);
return <UserImageContainer>
<Button underlayColor={theme.$darkGray} onPress={pickImages}>
<ButtonContainer>
<UserImage source={imageSource} />
<UploadProgressBar />
</ButtonContainer>
</Button>
<DeleteImageButton />
</UserImageContainer>
}
|
#!/usr/bin/env python3
"""
A tool for extracting useful information from youtube video's, like comments, or subtitles.
Author: Willem Hengeveld <itsme@xs4all.nl>
"""
import urllib.request
import urllib.parse
import http.cookiejar
import re
import json
import sys
import html
import datetime
from collections import defaultdict
from xml.parsers.expat import ParserCreate
import http.client
def load_socks_proxy(proxyarg):
m = re.match(r'(?:(\w+)://)?(\S+):(\d+)', proxyarg)
if not m:
return
method, host, port = m.groups()
port = int(port)
if not method or not method.startswith('socks'):
return
import socks
socks.setdefaultproxy(socks.SOCKS4 if method.startswith('socks4') else socks.SOCKS5, host, port)
def create_connection(address, timeout=None, source_address=None):
sock = socks.socksocket()
sock.connect(address)
return sock
import socket
socket.create_connection = create_connection
socket.socket = socks.socksocket
def decode_proxy(proxyarg):
if m:= re.match(r'(?:(\w+)://)?(\S+):(\d+)', proxyarg):
method, host, port = m.groups()
port = int(port)
if not method or method.startswith('http'):
return { 'http': proxyarg, 'https': proxyarg }
def cvdate(txt):
"""
Convert a string with a date in ymd format to a date object.
"""
ymd = txt.split("-")
if len(ymd)!=3:
print("WARNING: invalid date format: %s" % txt)
return
y, m, d = [int(_) for _ in ymd]
return datetime.date(y, m, d)
def cvseconds(txt):
"""
Convert string containing a number of seconds to a timedelta object.
"""
return datetime.timedelta(seconds=int(txt))
def getitembymember(a, member):
"""
Get the first item from 'a' which has an element named 'member'
"""
for item in a:
if member in item:
return item
def getitem(d, *path):
"""
Traverse a nested python object, path items select which object is selected:
* a tuple: selects a dictionary from a list which contains the specified key
* an integer: select the specified item from a list.
* a string: select the specified item from a dictionary.
"""
for k in path:
if d is None:
return
if type(k) == tuple:
d = getitembymember(d, *k)
elif type(k) == int:
d = d[k]
else:
d = d.get(k)
return d
def extracttext(entry):
return entry.get("simpleText") or "".join(r.get('text', "") for r in entry.get("runs"))
def getcontinuation(p):
p = getitem(p, "continuations", 0, "nextContinuationData")
if not p:
return
return p["continuation"], p["clickTrackingParams"]
class Youtube:
"""
Class which knows how to get information from youtune video's
"""
def __init__(self, args):
self.args = args
cj = http.cookiejar.CookieJar()
cj.set_cookie(http.cookiejar.Cookie(version=0, name="CONSENT", value="YES+cb.20210420-15-p1.en+FX+374", port=None, port_specified=False, domain=".youtube.com", domain_specified=True, domain_initial_dot=True, path="/", path_specified=True, secure=False, expires=None, discard=False, comment=None, comment_url=None, rest={}))
self.cp = urllib.request.HTTPCookieProcessor(cj)
handlers = [self.cp]
if args.proxy:
proxies = decode_proxy(args.proxy)
if proxies:
handlers.append(urllib.request.ProxyHandler(proxies))
if args.debug:
handlers.append(urllib.request.HTTPSHandler(debuglevel=1))
self.opener = urllib.request.build_opener(*handlers)
self.innertubeapikey = "AIzaSyAO_FJ2SlqU8Q4STEHLGCilw_Y9_11qcW8" # "INNERTUBE_API_KEY": "AIzaSyAO_FJ2SlqU8Q4STEHLGCilw_Y9_11qcW8",
self.clientname = "1" # "INNERTUBE_CONTEXT_CLIENT_NAME": 1,
self.clientversion = "2.20210422.04.00" # "INNERTUBE_CONTEXT_CLIENT_VERSION": "2.20210404.08.00",
self.idtoken = "QUFFLUhqa1oySl9mbm9mODhfdENjQWdDcENvazM2RS1qZ3w=" # "ID_TOKEN": "QUFFLUhqa1oySl9mbm9mODhfdENjQWdDcENvazM2RS1qZ3w=",
html = self.httpreq("https://www.youtube.com/")
cfg = self.getytcfg(html.decode('utf-8'))
self.innertubeapikey = cfg.get("INNERTUBE_API_KEY")
self.clientname = cfg.get("INNERTUBE_CONTEXT_CLIENT_NAME")
self.clientversion = cfg.get("INNERTUBE_CONTEXT_CLIENT_VERSION")
def httpreq(self, url, data=None):
"""
Does GET or POST request to youtube.
"""
hdrs = {
"x-youtube-client-name": "1",
"x-youtube-client-version": self.clientversion,
#"X-Youtube-Identity-Token": self.idtoken,
"User-Agent": "Mozilla/5.0 (Mac) Gecko/20100101 Firefox/76.0",
}
if type(data)==bytes and data[:1] in (b'{', b'['):
hdrs["Content-Type"] = "application/json"
req = urllib.request.Request(url, headers=hdrs)
kwargs = dict()
if data is not None:
kwargs["data"] = data
response = self.opener.open(req, **kwargs)
try:
page = response.read()
except http.client.IncompleteRead as e:
page = e.partial
print("EXCEPTION FOUND: http.client.IncompleteRead")
pass
return page
def getcomments(self, contclick, xsrf, replies=False):
"""
Returns comments for the specified continuation parameter.
"""
cont, click = contclick
url = "https://www.youtube.com/comment_service_ajax"
query = {
"pbj": 1,
"ctoken": cont,
"continuation": cont, # -- it turns out we don't need this 2nd copy of the token.
"itct": click,
"type": "next",
}
if replies:
query["action_get_comment_replies"] = 1
else:
query["action_get_comments"] = 1
postdata = urllib.parse.urlencode(xsrf)
return self.httpreq(url + "?" + urllib.parse.urlencode(query), postdata.encode('ascii') )
def getchat(self, cont, live=False):
"""
Returns chat for the specified continuation parameter.
"""
if live:
url = "https://www.youtube.com/live_chat"
else:
url = "https://www.youtube.com/live_chat_replay"
query = {
"pbj": 1,
"continuation": cont,
}
return self.httpreq(url + "?" + urllib.parse.urlencode(query))
def getchat2(self, cont, offset, live=False):
"""
Returns chat for the specified continuation parameter.
"""
if live:
url = "https://www.youtube.com/youtubei/v1/live_chat_replay/get_live_chat"
else:
url = "https://www.youtube.com/youtubei/v1/live_chat_replay/get_live_chat_replay"
query = {
"pbj": 1,
"continuation": cont,
"playerOffsetMs": offset,
"hidden": False,
"commandMetadata": "[object Object]",
}
return self.httpreq(url + "?" + urllib.parse.urlencode(query))
def getlivechat(self, cont):
url = "https://www.youtube.com/youtubei/v1/live_chat/get_live_chat"
query = { "key": self.innertubeapikey, }
postdata = {
"context": { "client": { "clientName": "WEB", "clientVersion": self.clientversion } },
"continuation": cont
}
return self.httpreq(url + "?" + urllib.parse.urlencode(query), json.dumps(postdata).encode('utf-8'))
def getsearch(self, cont):
"""
Returns next batch of search results
"""
url = "https://www.youtube.com/youtubei/v1/search"
query = {
"key": self.innertubeapikey
}
postdata = {
"context": { "client": { "clientName": "WEB", "clientVersion": self.clientversion } },
"continuation": cont,
}
postdata = json.dumps(postdata)
return self.httpreq(url + "?" + urllib.parse.urlencode(query), postdata.encode('ascii'))
def browse(self, contclick):
"""
Returns videos for the specified continuation parameter.
"""
cont, click = contclick
url = "https://www.youtube.com/browse_ajax"
query = {
"ctoken": cont,
"continuation": cont,
"itct": click,
}
return self.httpreq(url + "?" + urllib.parse.urlencode(query))
def getpageinfo(self, yturl):
"""
Returns the youtube configuration object.
"""
ytcfgtext = self.httpreq(yturl + ("&" if yturl.find('?')>=0 else "?") + "pbj=1")
if self.args.debug:
print("============ youtube config")
print(ytcfgtext.decode('utf-8'))
print()
try:
return json.loads(ytcfgtext.lstrip(b")]}'"))
except Exception as e:
if self.args.verbose:
print("EXCEPTION in getpageinfo: %s" % e)
if self.args.debug:
raise
return
def getytcfg(self, ythtml):
ytcfg = {}
for m in re.finditer(r'ytcfg\.set\((\{.*?\})\)', ythtml):
jsontxt = m.group(1).replace("'", '"').replace('",}', '"}')
ytcfg.update(json.loads(jsontxt))
return ytcfg
def getconfigfromhtml(self, ythtml):
"""
Alternative method of extracting the config object.
By parsing the html page returned by youtube.
"""
if self.args.debug:
print("============ youtube page")
print(ythtml.decode('utf-8'))
print()
m = re.search(br'ytplayer.config = (.*?);ytplayer.load', ythtml)
if not m:
print("could not find config")
return
cfgtext = m.group(1)
if self.args.debug:
print("========== config json")
print(cfgtext.decode('utf-8'))
print()
cfg = json.loads(cfgtext)
playertext = cfg['args']['player_response']
if self.args.debug:
print("========== player json")
print(playertext)
print()
return json.loads(playertext)
def extractsearchconfig(self, html):
if self.args.debug:
print("============ youtube page")
print(html.decode('utf-8'))
print()
m = re.search(br'window["ytInitialData"] = (.*);', html)
if not m:
print("could not find config")
return
cfgtext = m.group(1)
if self.args.debug:
print("========== config json")
print(cfgtext.decode('utf-8'))
print()
return json.loads(cfgtext)
def filterhtml(html):
"""
extract 4 different dictionaries from the html page.
-- ytInitialPlayerResponse
-- ytcfg.set()
-- ytplayer.web_player_context_config
-- ytInitialData
"""
result = {}
for m in re.finditer(r'ytcfg\.set\(([^{}]*?),([^{}]*?)\)', html):
#print("yt1", m.groups())
pass
result["ytcfg"] = {}
for m in re.finditer(r'ytcfg\.set\((\{.*?\})\)', html):
#print("yt2", m.group(1))
jsontxt = m.group(1).replace("'", '"').replace('",}', '"}')
result["ytcfg"].update(json.loads(jsontxt))
# TIMING_INFO.cver: "2.20210111.08.00",
if m := re.search(r'<script type="application/ld\+json"[^>]*>(\{.*?\})</script>', html):
#print("ld", m.group(1))
result["ldjson"] = json.loads(m.group(1))
if m := re.search(r'ytplayer.web_player_context_config = (\{.*?\});', html):
#print("cfg", m.group(1))
result["playercg"] = json.loads(m.group(1))
# device.interfaceVersion: "2.20210111.08.00",
# "innertubeApiKey": "AIzaSyAO_FJ2SlqU8Q4STEHLGCilw_Y9_11qcW8",
# "innertubeContextClientVersion": "2.20210111.08.00",
if m := re.search(r'setMessage\((\{.*?\})\);', html):
#print("msg", m.group(1))
result["msg"] = json.loads(m.group(1))
if m := re.search(r'<script[^>]*>var ytInitialPlayerResponse = (\{.*?\});', html):
#print("initplayer", m.group(1))
result["initplayer"] = json.loads(m.group(1))
# note: this is the same as pbj.[].playerResponse
if m := re.search(r'<script[^>]*>var ytInitialData = (\{.*?\});</script>', html):
#print("initdata", m.group(1))
result["initdata"] = json.loads(m.group(1))
# note: this is the same as pbj.[].response
if m := re.search(r'<script[^>]*>window\["ytInitialData"\] = (\{.*?\});</script>', html):
#print("initdata", m.group(1))
result["initdata"] = json.loads(m.group(1))
# note: this is the same as pbj.[].response
return result
class LivechatReader:
"""
class reads a livechat or livechat replay.
"""
def __init__(self, args, yt, cfg, live=False):
self.args = args
self.yt = yt
self.live = live
self.cont = self.getchatinfo(cfg)
def getcontinuation(self, p):
p = getitem(p, "continuations", 0, "reloadContinuationData")
if not p:
return
return p["continuation"]
def getchatinfo(self, cfg):
"""
Find the base parameters for querying the video's comments.
"""
item = getitem(cfg, "initdata", "contents", "twoColumnWatchNextResults", "conversationBar", "liveChatRenderer")
if not item:
return
return self.getcontinuation(item)
def recursechat(self):
if not self.cont:
print("no live chat replay found")
return
ms = 0
while True:
#cmtjson = self.yt.getchat2(self.cont, ms, self.live)
cmtjson = self.yt.getchat(self.cont, self.live)
if self.args.debug:
print("============ chat req")
print(cmtjson.decode('utf-8'))
print()
if cmtjson.startswith(b"<!DOCTYPE"):
js = filterhtml(cmtjson.decode('utf-8'))
if self.args.debug:
print("============ chat req extracted json")
print(json.dumps(js))
print()
else:
js = json.loads(cmtjson)
cmtlist, newms = self.extractchat(js["initdata"])
if newms==ms:
break
for author, time, comment in cmtlist:
print("--->", time, author)
print(extracttext(comment))
ms = newms
print("========== live ===========")
self.monitorchat(js["initdata"])
def extractchat(self, js):
actions = getitem(js, "continuationContents", "liveChatContinuation", "actions")
if not actions:
return [], None
cmtlist = []
ms = None
def addchatitem(item):
msg = getitem(item, "message")
author = getitem(item, "authorName", "simpleText")
time = getitem(item, "timestampText", "simpleText")
if time is None:
timeusec = getitem(item, "timestampUsec")
if timeusec is not None:
dt = datetime.datetime.fromtimestamp(int(timeusec)/1000000)
time = dt.strftime("%Y-%m-%d %H:%M:%S")
cmtlist.append((author, time, msg))
for act in actions:
replayactions = getitem(act, "replayChatItemAction", "actions")
ms = getitem(act, "replayChatItemAction", "videoOffsetTimeMsec")
if replayactions:
for ract in replayactions:
item = getitem(ract, "addChatItemAction", "item", "liveChatTextMessageRenderer")
if item:
addchatitem(item)
item = getitem(act, "addChatItemAction", "item", "liveChatTextMessageRenderer")
if item:
addchatitem(item)
return cmtlist, ms
def monitorchat(self, js):
while True:
cont = getitem(js, "continuationContents", "liveChatContinuation", "continuations", 0, "invalidationContinuationData", "continuation")
respjson = self.yt.getlivechat(cont)
if self.args.debug:
print("============ comment req")
print(respjson.decode('utf-8'))
print()
js = json.loads(respjson)
cmtlist, newms = self.extractchat(js)
for author, time, comment in cmtlist:
print("--->", time, author)
print(extracttext(comment))
sys.stdout.flush()
import time
time.sleep(1)
class CommentReader:
"""
class which can recursively print comments
"""
def __init__(self, args, yt, cfg):
self.args = args
self.yt = yt
self.contclick, self.xsrf = self.getcommentinfo(cfg)
def recursecomments(self, cc=None, level=0):
if not cc:
cc = self.contclick
while cc:
cmtjson = self.yt.getcomments(cc, self.xsrf, replies=(level>0))
if self.args.debug:
print("============ comment req")
print(cmtjson.decode('utf-8'))
print()
if not cmtjson:
raise Exception("empty response")
js = json.loads(cmtjson)
if type(js)==list:
# this is for 'replies', which return an array instead of a dict as the top-level response.
js = getitem(js, ("response",))
cmtlist, cc = self.extractcomments(js)
for author, when, comment, likes, replies, subcc in cmtlist:
if self.args.verbose:
print("---" * (level+1) + ">", "%s ; %s ; %s likes ; %s replies" % (author, when, likes, replies))
else:
print("---" * (level+1) + ">", author)
print(extracttext(comment))
if subcc:
self.recursecomments(subcc, level+1)
def getcommentinfo(self, cfg):
"""
Find the base parameters for querying the video's comments.
"""
item = getitem(cfg, "initdata", "contents", "twoColumnWatchNextResults", "results", "results", "contents")
cont = getcontinuation(getitem(item, ("itemSectionRenderer",), "itemSectionRenderer"))
xsrftoken = getitem(cfg, "ytcfg", "XSRF_TOKEN")
xsrffield = getitem(cfg, "ytcfg", "XSRF_FIELD_NAME")
xsrfdict = { xsrffield: xsrftoken } if xsrftoken else {}
return cont, xsrfdict
def getcomment(self, p):
"""
Return info for a single comment.
"""
if "commentThreadRenderer" in p:
p = p["commentThreadRenderer"]
c = p
r = p
if "comment" in c:
c = c["comment"]
if "commentRenderer" in c:
c = c["commentRenderer"]
if "replies" in r:
r = r["replies"]
author = getitem(c, "authorText", "simpleText")
content = getitem(c, "contentText")
likes = getitem(c, "likeCount")
nrreplies = getitem(c, "replyCount")
when = extracttext(getitem(c, "publishedTimeText"))
replies = getitem(r, "commentRepliesRenderer")
if replies:
cont = getcontinuation(replies)
else:
cont = None
return author, when, content, int(likes or 0), int(nrreplies or 0), cont
def extractcomments(self, js):
"""
Extract a list of comments from comment dictionary
"""
p = getitem(js, "response", "continuationContents")
if not p:
print("non contents found in continuation")
return [], None
if "itemSectionContinuation" in p:
p = p["itemSectionContinuation"]
elif "commentRepliesContinuation" in p:
p = p["commentRepliesContinuation"]
cmtlist = []
contents = p.get("contents")
if contents:
for c in contents:
cmtlist.append(self.getcomment(c))
# header.commentsHeaderRenderer -> commentsCount at same level as 'contents'
return cmtlist, getcontinuation(p)
class SearchReader:
def __init__(self, args, yt, cfg):
self.args = args
self.yt = yt
self.cfg = cfg
def getresults(self, js):
ct = getitem(js, "contents", "twoColumnSearchResultsRenderer", "primaryContents", "sectionListRenderer", "contents")
if not ct:
ct = getitem(js, "onResponseReceivedCommands", 0, "appendContinuationItemsAction", "continuationItems")
resultlist = getitem(ct, ("itemSectionRenderer",), "itemSectionRenderer", "contents")
cont = getitem(ct, ("continuationItemRenderer",), "continuationItemRenderer", "continuationEndpoint", "continuationCommand", "token")
return resultlist, cont
def recursesearch(self):
resultlist, cont = self.getresults(getitem(self.cfg, "initdata"))
while True:
for item in resultlist:
if video := item.get("videoRenderer"):
vid = getitem(video, "videoId")
pub = getitem(video, "publishedTimeText", "simpleText")
title = getitem(video, "title")
# title -> runs
# descriptionSnippet -> runs
# publishedTimeText -> simpleText
# lengthText -> simpleText
# viewCountText -> simpleText
# ownerText -> runs
print("%s - %s" % (vid, extracttext(title)))
elif chan := item.get("channelRenderer"):
cid = getitem(chan, "channelId")
title = getitem(chan, "title", "simpleText")
# "videoCountText" -> runs
# subscriberCountText -> simpleText
# descriptionSnippet -> runs
print("%s - %s" % (cid, title))
jstext = self.yt.getsearch(cont)
js = json.loads(jstext)
resultlist, cont = self.getresults(js)
class DetailReader:
"""
Extract some details for a video from the config.
"""
def __init__(self, args, yt, cfg):
self.args = args
self.yt = yt
self.cfg = cfg
def output(self):
vd = getitem(self.cfg, "initplayer", "videoDetails")
mf = getitem(self.cfg, "initplayer", "microformat", "playerMicroformatRenderer")
twocol = getitem(self.cfg, "initdata", "contents", "twoColumnWatchNextResults", "results", "results", "contents")
sentiment = getitem(twocol, ("videoPrimaryInfoRenderer",), "videoPrimaryInfoRenderer", "sentimentBar", "sentimentBarRenderer", "tooltip")
if not mf:
print("microformat not found")
return
vc = int(mf.get("viewCount"))
ls = cvseconds(mf.get("lengthSeconds"))
pd = cvdate(mf.get("publishDate"))
ud = cvdate(mf.get("uploadDate"))
desc = getitem(mf, "description", "simpleText")
vid = vd.get("videoId")
title = getitem(mf, "title", "simpleText")
owner = getitem(mf, "ownerChannelName")
print("%s - %s" % (vid, title))
print("By: %s" % (owner))
print()
print("viewcount: %d, length: %s, sentiment: %s, published: %s%s" % (vc, ls, sentiment, pd, "" if pd==ud else ", uploaded at: %s" % ud))
print()
print("%s" % desc)
print()
class SubtitleReader:
"""
class which can print a video's subtitles
"""
def __init__(self, args, yt, cfg):
self.args = args
self.yt = yt
self.cfg = cfg
def languagematches(self, language, ct):
"""
Match a captionTrack record to the language filter.
"""
if language == 'asr' and ct.get('kind') == 'asr':
return True
if ct["name"]["simpleText"] == language:
return True
if ct["languageCode"] == language:
return True
def output(self):
js = getitem(self.cfg, "initplayer")
p = getitem(js, "captions", "playerCaptionsTracklistRenderer", "captionTracks")
if not p:
print("no subtitles found")
return
captiontracks = p
# filter subtitles based on language
if self.args.language:
captiontracks = self.filtertracks(self.args.language, captiontracks)
for ct in captiontracks:
if len(captiontracks) > 1:
print("### %s ###" % ct["name"]["simpleText"])
self.outputsubtitles(ct["baseUrl"])
if len(captiontracks) > 1:
print()
def filtertracks(self, language, captiontracks):
matchedtracks = defaultdict(list)
for ct in captiontracks:
if not self.languagematches(language, ct):
continue
matchedtracks[ct["languageCode"]].append(ct)
filteredlist = []
for lang, tracks in matchedtracks.items():
if len(tracks) > 1:
# prefer non automated translation
tracks = filter(lambda ct:ct.get("kind") != "asr", tracks)
filteredlist.extend(tracks)
return filteredlist
def outputsubtitles(self, cturl):
ttxml = self.yt.httpreq(cturl)
if self.args.debug:
print("========== timedtext xml")
print(ttxml.decode('utf-8'))
print()
tt = self.extractxmltext(ttxml)
if self.args.srt:
self.output_srt(tt)
elif self.args.verbose:
for t0, t1, txt in tt:
print("%s %s" % (self.formattime(t0), txt))
else:
for t0, t1, txt in tt:
print(txt)
@staticmethod
def formattime(t):
m = int(t/60) ; t -= 60*m
h = int(m/60) ; m -= 60*h
return "%d:%02d:%06.3f" % (h, m, t)
@staticmethod
def srttime(t):
return SubtitleReader.formattime(t).replace('.', ',')
@staticmethod
def output_srt(tt):
n = 1
for t0, t1, txt in tt:
print(n)
print("%s --> %s" % (SubtitleReader.srttime(t0), SubtitleReader.srttime(t1)))
print(txt)
print()
@staticmethod
def unhtml(htmltext):
"""
Removes html font tags, and decodes html entities
"""
return html.unescape(re.sub(r'</?font[^>]*>', '', htmltext))
def extractxmltext(self, xml):
"""
Returns a list of tuples: time, endtime, text
"""
lines = []
tstart = None
tend = None
text = None
def handle_begin_element(elem, attr):
nonlocal text, tstart, tend
if elem == 'text':
text = ""
tstart = float(attr.get('start'))
tend = tstart + float(attr.get('dur'))
def handle_end_element(elem):
nonlocal text
if elem == 'text':
lines.append((tstart, tend, self.unhtml(text)))
text = None
def handle_data(data):
nonlocal text
if text is not None:
text += data
parser = ParserCreate()
parser.StartElementHandler = handle_begin_element
parser.EndElementHandler = handle_end_element
parser.CharacterDataHandler = handle_data
parser.Parse(xml, 1)
return lines
class PlaylistReader:
"""
class which can print a playlist's contents.
"""
def __init__(self, args, yt, cfg):
self.args = args
self.yt = yt
self.cfg = cfg
def output(self):
# ==== [ 'playlistVideoRenderer', 1, 'contents', 'playlistVideoListRenderer', 0, 'contents', 'itemSectionRenderer', 0, 'contents', 'sectionListRenderer', 'content', 'tabRenderer', 0, 'tabs', 'twoColumnBrowseResultsRenderer', 'contents', 'response', 1]
# ==== ['gridVideoRenderer', 1, 'items', 'horizontalListRenderer', 'content', 'shelfRenderer', 0,
# 'contents', 'itemSectionRenderer', 1, 'contents', 'sectionListRenderer', 'content', 'tabRenderer', 0,
# 'tabs', 'twoColumnBrowseResultsRenderer', 'contents', 'response', 1]
playlist = getitem(self.cfg, "initdata", "contents", "twoColumnWatchNextResults", "playlist")
if playlist:
print("Title: %s" % getitem(playlist, "playlist", "title"))
for entry in getitem(playlist, "playlist", "contents"):
vid = getitem(entry, "playlistPanelVideoRenderer", "videoId")
title = getitem(entry, "playlistPanelVideoRenderer", "title", "simpleText")
length = getitem(entry, "playlistPanelVideoRenderer", "lengthText", "simpleText")
if args.verbose:
print("%s - %s %s" % (vid, length, title))
else:
print("%s - %s" % (vid, title))
return
tabs = getitem(self.cfg, "initdata", "contents", "twoColumnBrowseResultsRenderer", "tabs", 0, "tabRenderer", "content")
ct1 = getitem(tabs, "sectionListRenderer", "contents", 0, "itemSectionRenderer", "contents", 0)
playlist = getitem(ct1, "playlistVideoListRenderer")
list_tag = "contents"
entry_tag = "playlistVideoRenderer"
if not playlist:
playlist = getitem(ctl, "shelfRenderer", "content", 'horizontalListRenderer')
list_tag = "items"
entry_tag = "gridVideoRenderer"
if playlist:
cont = None
for entry in playlist[list_tag]:
vid = getitem(entry, entry_tag, "videoId")
title = getitem(entry, entry_tag, "title")
if vid and title:
print("%s - %s" % (vid, extracttext(title)))
c = getitem(entry, "continuationItemRenderer", "continuationEndpoint", "continuationCommand", "token")
if c:
cl = getitem(entry, "continuationItemRenderer", "continuationEndpoint", "clickTrackingParams")
cont = c, cl
if not cont:
cont = getcontinuation(playlist)
while cont:
browsejson = self.yt.browse(cont)
if self.args.debug:
print("============ browse req")
print(browsejson.decode('utf-8'))
print()
js = json.loads(browsejson)
cont = None
playlist = getitem(js, "initdata", "continuationContents", "gridContinuation")
if playlist:
for entry in getitem(playlist, "items"):
vid = getitem(entry, "gridVideoRenderer", "videoId")
title = getitem(entry, "gridVideoRenderer", "title")
print("%s - %s" % (vid, extracttext(title)))
playlist = getitem(js, "initdata", "continuationContents", "playlistVideoListContinuation")
item_tag = "contents"
if not playlist:
playlist = getitem(js, "initdata", "onResponseReceivedActions", 0, "appendContinuationItemsAction", )
item_tag = "continuationItems"
if playlist:
for entry in getitem(playlist, item_tag):
vid = getitem(entry, "playlistVideoRenderer", "videoId")
title = getitem(entry, "playlistVideoRenderer", "title")
if vid and title:
print("%s - %s" % (vid, extracttext(title)))
c = getitem(entry, "continuationItemRenderer", "continuationEndpoint", "continuationCommand", "token")
if c:
cl = getitem(entry, "continuationItemRenderer", "continuationEndpoint", "clickTrackingParams")
cont = c, cl
if not playlist:
break
if not cont:
cont = getcontinuation(playlist)
return
def parse_youtube_link(url):
"""
Recognize different types of youtube urls:
http://, https://
youtu.be/<videoid>[?list=<listid>]
(?:www.)?youtube.com...
/channel/<channelid>
/c/<channelname>
/playlist?list=<listid>
/watch?v=<videoid> [&t=pos] [&list=<listid>]
/watch/<videoid>
/v/<videoid>
/embed/<videoid>
/user/<username>
/watch_videos?video_ids=<videoid>,<videoid>,...
/results?search_query=...
"""
m = re.match(r'^(?:https?://)?(?:www\.)?(?:(?:youtu\.be|youtube\.com)/)?(.*)', url)
if not m:
raise Exception("youtube link not matched")
path = m.group(1)
if m := re.match(r'^user/([^/?]+)', path):
yield 'username', m.group(1)
elif m := re.match(r'^(\w+)/([A-Za-z0-9_-]+)(.*)', path):
idtype = m.group(1)
if idtype in ('v', 'embed', 'watch'):
idtype = 'video'
elif idtype in ('channel'):
idtype = 'channel'
elif idtype in ('c'):
idtype = 'channelname'
elif idtype in ('playlist'):
idtype = 'playlist'
else:
raise Exception("unknown id type")
idvalue = m.group(2)
yield idtype, idvalue
if idtype == 'channel':
yield 'playlist', 'UU' + idvalue[2:]
idargs = urllib.parse.parse_qs(m.group(3))
if idvalue := idargs.get('v'):
if idvalue[0]:
yield 'video', idvalue[0]
if idvalue := idargs.get('list'):
if idvalue[0]:
yield 'playlist', idvalue[0]
elif m := re.match(r'^(v|embed|watch|channel|playlist)(?:\?(.*))?$', path):
idtype = m.group(1)
if idtype in ('v', 'embed', 'watch'):
idtype = 'video'
elif idtype in ('channel'):
idtype = 'channel'
elif idtype in ('playlist'):
idtype = 'playlist'
idargs = urllib.parse.parse_qs(m.group(2))
if idvalue := idargs.get('v'):
if idvalue[0]:
yield 'video', idvalue[0]
if idvalue := idargs.get('list'):
if idvalue[0]:
yield 'playlist', idvalue[0]
elif m := re.match(r'^results\?(.*)$', path):
idargs = urllib.parse.parse_qs(m.group(1))
if idvalue := idargs.get('search_query'):
if idvalue[0]:
yield 'search', idvalue[0]
elif m := re.match(r'^[A-Za-z0-9_-]+$', path):
if len(path)==11:
yield 'video', path
else:
yield 'playlist', path
else:
raise Exception("unknown id")
def channelurl_from_userpage(cfg):
return getitem(cfg, "initdata", "metadata", "channelMetadataRenderer", "channelUrl")
# or "initplayer", "microformat", "playerMicroformatRenderer", "externalChannelId"
# or "initplayer", "videoDetails", "channelId"
def check_error(cfg):
status = getitem(cfg, "initplayer", "playabilityStatus")
if not status:
return
if status["status"] == "ERROR":
print(status["reason"])
return True
def main():
import io
sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='utf8')
import argparse
parser = argparse.ArgumentParser(description='Extract Youtube comments')
parser.add_argument('--debug', '-d', action='store_true', help='print all intermediate steps')
parser.add_argument('--verbose', '-v', action='store_true', help='prefix each line with the timestamp')
parser.add_argument('--comments', '-c', action='store_true', help='Print video comments')
parser.add_argument('--subtitles', '-t', action='store_true', help='Print video subtitles')
parser.add_argument('--language', type=str, help='Output only subtitles in the specified language')
parser.add_argument('--playlist', '-l', action='store_true', help='Print playlist items')
parser.add_argument('--info', '-i', action='store_true', help='Print video info')
parser.add_argument('--srt', action='store_true', help='Output subtitles in .srt format.')
parser.add_argument('--query', '-q', action='store_true', help='List videos matching the specified query')
parser.add_argument('--livechat', action='store_true', help='Follow livechat contents')
parser.add_argument('--replay', action='store_true', help='Print livechat replay')
parser.add_argument('--proxy', type=str, help='Specify a proxy to use.')
parser.add_argument('ytids', nargs='+', type=str, help='One or more Youtube URLs, or IDs, or a query')
args = parser.parse_args()
if args.proxy and args.proxy.startswith('socks'):
load_socks_proxy(args.proxy)
yt = Youtube(args)
for url in args.ytids:
if len(args.ytids) > 1:
print("==>", url, "<==")
if args.query:
# note: the 'url' variable holds the query.
# convert it to a query url so the parse link function can decode it.
url = "https://www.youtube.com/results?" + urllib.parse.urlencode({"search_query": url})
# analyze url for id's, like videoid, channelid, playlistid or search query.
for idtype, idvalue in parse_youtube_link(url):
# reformat the url in a way that i am sure returns the right json data.
if idtype == 'video':
url = "https://www.youtube.com/watch?v=%s" % idvalue
elif idtype == 'playlist':
url = "https://www.youtube.com/playlist?list=%s" % idvalue
elif idtype == 'channel':
url = "https://www.youtube.com/channel/%s" % idvalue
elif idtype == 'username':
url = "https://www.youtube.com/user/%s" % idvalue
elif idtype == 'search':
url = "https://www.youtube.com/results?" + urllib.parse.urlencode({"search_query": idvalue})
#cfg = yt.getpageinfo(url)
#if check_error(cfg):
# continue
html = yt.httpreq(url)
if args.debug:
print("============ youtube html")
print(html.decode('utf-8'))
print()
cfg = filterhtml(html.decode('utf-8'))
if args.debug:
print("============ youtube extracted config")
print(json.dumps(cfg))
print()
if idtype=='username':
url = channelurl_from_userpage(cfg)
args.ytids.append(url)
# note: the new url is processed in next loop iteration.
if args.comments and idtype=='video':
cmt = CommentReader(args, yt, cfg)
cmt.recursecomments()
if args.subtitles and idtype=='video':
txt = SubtitleReader(args, yt, cfg)
txt.output()
if (args.replay or args.livechat) and idtype=='video':
txt = LivechatReader(args, yt, cfg, live=args.livechat)
txt.recursechat()
if args.playlist and idtype=='playlist':
lst = PlaylistReader(args, yt, cfg)
lst.output()
if (args.playlist or args.query) and idtype == 'search':
q = SearchReader(args, yt, cfg)
q.recursesearch()
if args.info and idtype=='video':
lst = DetailReader(args, yt, cfg)
lst.output()
if __name__ == '__main__':
main()
|
from torch.utils.data import DataLoader, ConcatDataset, Subset
def get_transform(dataset):
if isinstance(dataset, DataLoader):
return get_transform(dataset.dataset)
if isinstance(dataset, ConcatDataset):
return get_transform(dataset.datasets[0])
if isinstance(dataset, Subset):
return get_transform(dataset.dataset)
return dataset.transform
|
# Builtins
import logging
import copy
# External libs
import numpy as np
# Locals
import oggm.cfg as cfg
from oggm import utils
from oggm import entity_task
import oggm.core.massbalance as mbmods
from oggm.core.flowline import FluxBasedModel
# Constants
# Module logger
log = logging.getLogger(__name__)
def _find_inital_glacier(final_model, firstguess_mb, y0, y1,
rtol=0.01, atol=10, max_ite=100,
init_bias=0., equi_rate=0.0005,
ref_area=None):
""" Iterative search for a plausible starting time glacier"""
# Objective
if ref_area is None:
ref_area = final_model.area_m2
log.info('iterative_initial_glacier_search '
'in year %d. Ref area to catch: %.3f km2. '
'Tolerance: %.2f %%',
np.int64(y0), ref_area * 1e-6, rtol * 100)
# are we trying to grow or to shrink the glacier?
prev_model = copy.deepcopy(final_model)
prev_fls = copy.deepcopy(prev_model.fls)
prev_model.reset_y0(y0)
prev_model.run_until(y1)
prev_area = prev_model.area_m2
# Just in case we already hit the correct starting state
if np.allclose(prev_area, ref_area, atol=atol, rtol=rtol):
model = copy.deepcopy(final_model)
model.reset_y0(y0)
log.info('iterative_initial_glacier_search: inital '
'starting glacier converges '
'to itself with a final dif of %.2f %%',
utils.rel_err(ref_area, prev_area) * 100)
return 0, None, model
if prev_area < ref_area:
sign_mb = -1.
log.info('iterative_initial_glacier_search, ite: %d. '
'Glacier would be too '
'small of %.2f %%. Continue', 0,
utils.rel_err(ref_area, prev_area) * 100)
else:
log.info('iterative_initial_glacier_search, ite: %d. '
'Glacier would be too '
'big of %.2f %%. Continue', 0,
utils.rel_err(ref_area, prev_area) * 100)
sign_mb = 1.
# Log prefix
logtxt = 'iterative_initial_glacier_search'
# Loop until 100 iterations
c = 0
bias_step = 0.1
mb_bias = init_bias - bias_step
reduce_step = 0.01
mb = copy.deepcopy(firstguess_mb)
mb.temp_bias = sign_mb * mb_bias
grow_model = FluxBasedModel(copy.deepcopy(final_model.fls), mb_model=mb,
fs=final_model.fs,
glen_a=final_model.glen_a,
min_dt=final_model.min_dt,
max_dt=final_model.max_dt)
while True and (c < max_ite):
c += 1
# Grow
mb_bias += bias_step
mb.temp_bias = sign_mb * mb_bias
log.info(logtxt + ', ite: %d. New bias: %.2f', c, sign_mb * mb_bias)
grow_model.reset_flowlines(copy.deepcopy(prev_fls))
grow_model.reset_y0(0.)
grow_model.run_until_equilibrium(rate=equi_rate)
log.info(logtxt + ', ite: %d. Grew to equilibrium for %d years, '
'new area: %.3f km2', c, grow_model.yr,
grow_model.area_km2)
# Shrink
new_fls = copy.deepcopy(grow_model.fls)
new_model = copy.deepcopy(final_model)
new_model.reset_flowlines(copy.deepcopy(new_fls))
new_model.reset_y0(y0)
new_model.run_until(y1)
new_area = new_model.area_m2
# Maybe we done?
if np.allclose(new_area, ref_area, atol=atol, rtol=rtol):
new_model.reset_flowlines(new_fls)
new_model.reset_y0(y0)
log.info(logtxt + ', ite: %d. Converged with a '
'final dif of %.2f %%', c,
utils.rel_err(ref_area, new_area)*100)
return c, mb_bias, new_model
# See if we did a step to far or if we have to continue growing
do_cont_1 = (sign_mb < 0.) and (new_area < ref_area)
do_cont_2 = (sign_mb > 0.) and (new_area > ref_area)
if do_cont_1 or do_cont_2:
# Reset the previous state and continue
prev_fls = new_fls
log.info(logtxt + ', ite: %d. Dif of %.2f %%. '
'Continue', c,
utils.rel_err(ref_area, new_area)*100)
continue
# Ok. We went too far. Reduce the bias step but keep previous state
mb_bias -= bias_step
bias_step /= reduce_step
log.info(logtxt + ', ite: %d. Went too far.', c)
if bias_step < 0.1:
break
raise RuntimeError('Did not converge after {} iterations'.format(c))
@entity_task(log, writes=['model_run'])
def iterative_initial_glacier_search(gdir, y0=None, init_bias=0., rtol=0.005,
write_steps=True):
"""Iterative search for the glacier in year y0.
this is outdated and doesn't really work.
"""
fs = cfg.PARAMS['fs']
glen_a = cfg.PARAMS['glen_a']
if y0 is None:
y0 = cfg.PARAMS['y0']
y1 = gdir.rgi_date.year
mb = mbmods.PastMassBalance(gdir)
fls = gdir.read_pickle('model_flowlines')
model = FluxBasedModel(fls, mb_model=mb, y0=0., fs=fs, glen_a=glen_a)
assert np.isclose(model.area_km2, gdir.rgi_area_km2, rtol=0.05)
mb = mbmods.BackwardsMassBalanceModel(gdir)
ref_area = gdir.rgi_area_m2
ite, bias, past_model = _find_inital_glacier(model, mb, y0, y1,
rtol=rtol,
init_bias=init_bias,
ref_area=ref_area)
path = gdir.get_filepath('model_run', delete=True)
if write_steps:
past_model.run_until_and_store(y1, path=path)
else:
past_model.to_netcdf(path)
def test_find_t0(self):
from oggm.tests.funcs import init_hef
from oggm.core import flowline
import pandas as pd
import matplotlib.pyplot as plt
do_plot = True
gdir = init_hef(border=80)
flowline.init_present_time_glacier(gdir)
glacier = gdir.read_pickle('model_flowlines')
df = pd.read_csv(utils.get_demo_file('hef_lengths.csv'), index_col=0)
df.columns = ['Leclercq']
df = df.loc[1950:]
vol_ref = flowline.FlowlineModel(glacier).volume_km3
init_bias = 94. # so that "went too far" comes once on travis
rtol = 0.005
flowline.iterative_initial_glacier_search(gdir, y0=df.index[0],
init_bias=init_bias,
rtol=rtol, write_steps=True)
past_model = flowline.FileModel(gdir.get_filepath('model_run'))
vol_start = past_model.volume_km3
bef_fls = copy.deepcopy(past_model.fls)
mylen = past_model.length_m_ts()
df['oggm'] = mylen[12::12].values
df = df-df.iloc[-1]
past_model.run_until(2003)
vol_end = past_model.volume_km3
np.testing.assert_allclose(vol_ref, vol_end, rtol=0.05)
rmsd = utils.rmsd(df.Leclercq, df.oggm)
self.assertTrue(rmsd < 1000.)
if do_plot: # pragma: no cover
df.plot()
plt.ylabel('Glacier length (relative to 2003)')
plt.show()
plt.figure()
lab = 'ref (vol={:.2f}km3)'.format(vol_ref)
plt.plot(glacier[-1].surface_h, 'k', label=lab)
lab = 'oggm start (vol={:.2f}km3)'.format(vol_start)
plt.plot(bef_fls[-1].surface_h, 'b', label=lab)
lab = 'oggm end (vol={:.2f}km3)'.format(vol_end)
plt.plot(past_model.fls[-1].surface_h, 'r', label=lab)
plt.plot(glacier[-1].bed_h, 'gray', linewidth=2)
plt.legend(loc='best')
plt.show()
|
/**
* Functions related to the homepage.
*
* @author Zsolt Molnar <zmolnar@sessiondigital.de>
* */
var _HDM_Productpage = (function($) {
'use strict';
return {
mediaSliderInited: false,
mediaGalleryInited: false,
tabsInited: false,
relatedBoxInited: false,
/* Initialization scripts */
init: function() {
_HDM_Productpage.initMediaSlider();
_HDM_Productpage.initMediaGallery();
_HDM_Productpage.initTabs();
_HDM_Productpage.initRelatedBox();
_HDM_Productpage.moveSuccessorNoticeAboveHeader();
},
/**
* Initialises the media slider on the pdp
* */
initMediaSlider: function () {
if (!_HDM_Productpage.mediaSliderInited) {
$('.media-slider').slick({
slidesToShow: 1,
slidesToScroll: 1,
arrows: false,
fade: true,
asNavFor: '.media-slider-nav'
});
$('.media-slider-nav').on('init', function(evt, slick){
$.each($('.media-slider-nav .slick-slide'), function (i, slide) {
var goTo = $(slide).data('slick-index');
if (goTo >= slick.slideCount) {
goTo = 0;
}
if (goTo < 0) {
goTo = slick.slideCount - 1;
}
$(slide).click(function() {
$('.media-slider-nav').slick('slickGoTo', goTo, true);
});
});
});
$('.media-slider-nav').slick({
slidesToShow: 3,
slidesToScroll: 1,
asNavFor: '.media-slider',
arrows: false,
dots: true,
centerMode: true,
centerPadding: 0,
focusOnSelect: true
});
}
_HDM_Productpage.mediaSliderInited = true;
},
/**
* Initialises the gallery
* */
initMediaGallery: function () {
if (!_HDM_Productpage.mediaGalleryInited) {
$('.productmedia-gallery').colorbox({
rel: 'productMediaGallery',
opacity: 0.6,
maxWidth: 900
});
}
_HDM_Productpage.mediaGalleryInited = true;
},
/**
* Initialises related/upell sliders
* */
initRelatedBox: function () {
if (!_HDM_Productpage.relatedBoxInited) {
$('.box-related .box-content').slick({
speed: 500,
slidesToShow: 4,
slidesToScroll: 2,
arrows: true,
infinite: false,
dots: false
});
$('.box-upsell .box-content').slick({
speed: 500,
slidesToShow: 4,
slidesToScroll: 2,
arrows: true,
infinite: false,
dots: false
});
}
_HDM_Productpage.relatedBoxInited = true;
},
/**
* Initialises info tabs
* */
initTabs: function () {
if (!_HDM_Productpage.tabsInited) {
$(".product-page-tabs").easytabs({
animate: false,
defaultTab: "li.tab-0",
panelActiveClass: "selected",
tabActiveClass: "selected",
tabs: "> ul > li",
updateHash: false
});
}
_HDM_Productpage.tabsInited = true;
},
/**
* If there is a successor block on the PDP, this function will move it above the header
* */
moveSuccessorNoticeAboveHeader: function() {
var $successorNoticeBlock = $('#notice-successor-block');
if (!$successorNoticeBlock.length) {
return;
}
$successorNoticeBlock.appendTo('.global-site-notices').show();
}
}
})(jQuery);
|
import {createAppContainer} from 'react-navigation';
import {createStackNavigator} from 'react-navigation-stack';
import Main from './pages/Main';
import User from './pages/User';
const Routes = createAppContainer(
createStackNavigator(
{
Main,
User,
},
{
headerLayoutPreset: 'center',
headerBackTitleVisible: false,
defaultNavigationOptions: {
headerStyle: {
backgroundColor: '#7159c1',
},
headerTintColor: '#fff',
},
}
)
);
export default Routes;
|
/*
* jQuery UI Button 1.8.2
*
* Copyright (c) 2010 AUTHORS.txt (http://jqueryui.com/about)
* Dual licensed under the MIT (MIT-LICENSE.txt)
* and GPL (GPL-LICENSE.txt) licenses.
*
* http://docs.jquery.com/UI/Button
*
* Depends:
* jquery.ui.core.js
* jquery.ui.widget.js
*/
(function(a){var g,i=function(b){a(":ui-button",b.target.form).each(function(){var c=a(this).data("button");setTimeout(function(){c.refresh()},1)})},h=function(b){var c=b.name,d=b.form,e=a([]);if(c)e=d?a(d).find("[name='"+c+"']"):a("[name='"+c+"']",b.ownerDocument).filter(function(){return!this.form});return e};a.widget("ui.button",{options:{text:true,label:null,icons:{primary:null,secondary:null}},_create:function(){this.element.closest("form").unbind("reset.button").bind("reset.button",i);this._determineButtonType();
this.hasTitle=!!this.buttonElement.attr("title");var b=this,c=this.options,d=this.type==="checkbox"||this.type==="radio",e="ui-state-hover"+(!d?" ui-state-active":"");if(c.label===null)c.label=this.buttonElement.html();if(this.element.is(":disabled"))c.disabled=true;this.buttonElement.addClass("ui-button ui-widget ui-state-default ui-corner-all").attr("role","button").bind("mouseenter.button",function(){if(!c.disabled){a(this).addClass("ui-state-hover");this===g&&a(this).addClass("ui-state-active")}}).bind("mouseleave.button",
function(){c.disabled||a(this).removeClass(e)}).bind("focus.button",function(){a(this).addClass("ui-state-focus")}).bind("blur.button",function(){a(this).removeClass("ui-state-focus")});d&&this.element.bind("change.button",function(){b.refresh()});if(this.type==="checkbox")this.buttonElement.bind("click.button",function(){if(c.disabled)return false;a(this).toggleClass("ui-state-active");b.buttonElement.attr("aria-pressed",b.element[0].checked)});else if(this.type==="radio")this.buttonElement.bind("click.button",
function(){if(c.disabled)return false;a(this).addClass("ui-state-active");b.buttonElement.attr("aria-pressed",true);var f=b.element[0];h(f).not(f).map(function(){return a(this).button("widget")[0]}).removeClass("ui-state-active").attr("aria-pressed",false)});else{this.buttonElement.bind("mousedown.button",function(){if(c.disabled)return false;a(this).addClass("ui-state-active");g=this;a(document).one("mouseup",function(){g=null})}).bind("mouseup.button",function(){if(c.disabled)return false;a(this).removeClass("ui-state-active")}).bind("keydown.button",
function(f){if(c.disabled)return false;if(f.keyCode==a.ui.keyCode.SPACE||f.keyCode==a.ui.keyCode.ENTER)a(this).addClass("ui-state-active")}).bind("keyup.button",function(){a(this).removeClass("ui-state-active")});this.buttonElement.is("a")&&this.buttonElement.keyup(function(f){f.keyCode===a.ui.keyCode.SPACE&&a(this).click()})}this._setOption("disabled",c.disabled)},_determineButtonType:function(){this.type=this.element.is(":checkbox")?"checkbox":this.element.is(":radio")?"radio":this.element.is("input")?
"input":"button";if(this.type==="checkbox"||this.type==="radio"){this.buttonElement=this.element.parents().last().find("[for="+this.element.attr("id")+"]");this.element.addClass("ui-helper-hidden-accessible");var b=this.element.is(":checked");b&&this.buttonElement.addClass("ui-state-active");this.buttonElement.attr("aria-pressed",b)}else this.buttonElement=this.element},widget:function(){return this.buttonElement},destroy:function(){this.element.removeClass("ui-helper-hidden-accessible");this.buttonElement.removeClass("ui-button ui-widget ui-state-default ui-corner-all ui-state-hover ui-state-active ui-button-icons-only ui-button-icon-only ui-button-text-icons ui-button-text-icon ui-button-text-only").removeAttr("role").removeAttr("aria-pressed").html(this.buttonElement.find(".ui-button-text").html());
this.hasTitle||this.buttonElement.removeAttr("title");a.Widget.prototype.destroy.call(this)},_setOption:function(b,c){a.Widget.prototype._setOption.apply(this,arguments);if(b==="disabled")c?this.element.attr("disabled",true):this.element.removeAttr("disabled");this._resetButton()},refresh:function(){var b=this.element.is(":disabled");b!==this.options.disabled&&this._setOption("disabled",b);if(this.type==="radio")h(this.element[0]).each(function(){a(this).is(":checked")?a(this).button("widget").addClass("ui-state-active").attr("aria-pressed",
true):a(this).button("widget").removeClass("ui-state-active").attr("aria-pressed",false)});else if(this.type==="checkbox")this.element.is(":checked")?this.buttonElement.addClass("ui-state-active").attr("aria-pressed",true):this.buttonElement.removeClass("ui-state-active").attr("aria-pressed",false)},_resetButton:function(){if(this.type==="input")this.options.label&&this.element.val(this.options.label);else{var b=this.buttonElement.removeClass("ui-button-icons-only ui-button-icon-only ui-button-text-icons ui-button-text-icon ui-button-text-only"),
c=a("<span></span>").addClass("ui-button-text").html(this.options.label).appendTo(b.empty()).text(),d=this.options.icons,e=d.primary&&d.secondary;if(d.primary||d.secondary){b.addClass("ui-button-text-icon"+(e?"s":""));d.primary&&b.prepend("<span class='ui-button-icon-primary ui-icon "+d.primary+"'></span>");d.secondary&&b.append("<span class='ui-button-icon-secondary ui-icon "+d.secondary+"'></span>");if(!this.options.text){b.addClass(e?"ui-button-icons-only":"ui-button-icon-only").removeClass("ui-button-text-icons ui-button-text-icon");
this.hasTitle||b.attr("title",c)}}else b.addClass("ui-button-text-only")}}});a.widget("ui.buttonset",{_create:function(){this.element.addClass("ui-buttonset");this._init()},_init:function(){this.refresh()},_setOption:function(b,c){b==="disabled"&&this.buttons.button("option",b,c);a.Widget.prototype._setOption.apply(this,arguments)},refresh:function(){this.buttons=this.element.find(":button, :submit, :reset, :checkbox, :radio, a, :data(button)").filter(":ui-button").button("refresh").end().not(":ui-button").button().end().map(function(){return a(this).button("widget")[0]}).removeClass("ui-corner-all ui-corner-left ui-corner-right").filter(":first").addClass("ui-corner-left").end().filter(":last").addClass("ui-corner-right").end().end()},
destroy:function(){this.element.removeClass("ui-buttonset");this.buttons.map(function(){return a(this).button("widget")[0]}).removeClass("ui-corner-left ui-corner-right").end().button("destroy");a.Widget.prototype.destroy.call(this)}})})(jQuery);
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
DWF Python Example
Modified by: MURAMATSU Atsushi <amura@tomato.sakura.ne.jp>
Revised: 2016-04-21
Original Author: Digilent, Inc.
Original Revision: 8/21/2014
Requires:
Python 2.7, 3.3 or later
"""
import dwf
import time
#print DWF version
print("DWF Version: " + dwf.FDwfGetVersion())
#open device
dwf_do = dwf.DwfDigitalOut()
hzSys = dwf_do.internalClockInfo()
# SPI parameters
CPOL = 0 # or 1
CPHA = 0 # or 1
hzFreq = 1e6
cBits = 16
rgdData = [0x12, 0x34]
# serialization time length
dwf_do.runSet((cBits + 0.5) / hzFreq)
# DIO 2 Select
dwf_do.enableSet(2, True)
# output high while DigitalOut not running
dwf_do.idleSet(2, dwf_do.IDLE.HIGH)
# output constant low while running
dwf_do.counterInitSet(2, False, 0)
dwf_do.counterSet(2, 0, 0)
# DIO 1 Clock
dwf_do.enableSet(1, True)
# set prescaler twice of SPI frequency
dwf_do.dividerSet(1, int(hzSys / hzFreq / 2))
# 1 tick low, 1 tick high
dwf_do.counterSet(1, 1, 1)
# start with low or high based on clock polarity
dwf_do.counterInitSet(1, CPOL, 1)
dwf_do.idleSet(1, dwf_do.IDLE.HIGH if CPOL else dwf_do.IDLE.LOW)
# DIO 0 Data
dwf_do.enableSet(0, True)
dwf_do.typeSet(0, dwf_do.TYPE.CUSTOM)
# for high active clock, hold the first bit for 1.5 periods
dwf_do.dividerInitSet(0, int((1+0.5*CPHA)*hzSys/hzFreq))
# SPI frequency, bit frequency
dwf_do.dividerSet(0, int(hzSys / hzFreq))
# data sent out LSB first
dwf_do.dataSet(0, dwf.create_bitdata_stream(rgdData, 8))
dwf_do.configure(True)
print("Generating SPI signal")
time.sleep(1)
dwf_do.reset()
dwf_do.close()
|
from http.server import BaseHTTPRequestHandler, HTTPServer
import cgi
class webserverHandler(BaseHTTPRequestHandler):
#Handles HTTP requests for HTTPServer
def do_GET(self):
try:
if self.path.endswith("/hello"): #path contains URL sent by the client
self.send_response(200) #response code 200 indicates success
self.send_header('Content-type', 'text/html') #indicates that the server is replying with html text
self.end_headers() #blank line to indicate end of headers
output = ""
output += "<html><body>"
output += "<h1>Hello!</h1>"
output += "<form method='POST' enctype='multipart/form-data' action='/hello'>"
output += "<h2>What would you like me to say?</h2>"
output += "<input name='message' type='text'> <input type='submit' value ='Submit'> </form>"
output += "</body></html>"
self.wfile.write(bytes(output, "utf-8")) #wfile contains output stream that responds back to client
#write takes encoded bytes, not strings
print(output) #debugging
return
if self.path.endswith("/hola"):
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
output = ""
output += "<html><body>¡Hola <br/> <a href='/hello'>Back to Hello</a>"
output += "<form method='POST' enctype='multipart/form-data' action='/hello'>"
output += "<h2>What would you like me to say?</h2>"
output += "<input name='message' type='text'> <input type='submit' value ='Submit'> </form>"
output += "</body></html>"
self.wfile.write(bytes(output, "utf-8"))
print(output)
return
except IOError:
self.send_error(404, "File Not Found %s" % self.path)
def do_POST(self):
try:
self.send_response(301)
self.send_header('Content-type', 'text/html')
self.end_headers()
ctype, pdict = cgi.parse_header(self.headers.get('content-type')) #parses into main value and dictionary of parameters
pdict['boundary'] = bytes(pdict['boundary'],"utf-8")
if ctype == 'multipart/form-data':
fields = cgi.parse_multipart(self.rfile, pdict) #rfile - file to be read
messagecontent = fields.get('message')
output = ""
output += "<html><body>"
output += "<h2> Okay, how about this: </h2>"
output += "<h1> %s </h1>" % messagecontent[0].decode("utf-8") #messagecontent[0] is byte string -> b' prefix
for item in messagecontent:
print(str(item))
#Form to get the data
output += "<form method='POST' enctype='multipart/form-data' action='/hello'>"
output += "<h2>What would you like me to say?</h2>"
output += "<input name='message' type='text'> <input type='submit' value ='Submit'> </form>"
output += "</body></html>"
self.wfile.write(bytes(output, 'utf-8'))
print(output)
except IOError:
self.send_error(404, "File")
def main():
try:
port = 8080
server = HTTPServer(('', port), webserverHandler)
print("Web server running on port %s" % port)
server.serve_forever()
except KeyboardInterrupt: #handles ctrl-c input
print("^C entered, stopping web server")
server.socket.close()
if __name__ == '__main__':
main()
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.12.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1ReplicationControllerSpec(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'min_ready_seconds': 'int',
'replicas': 'int',
'selector': 'dict(str, str)',
'template': 'V1PodTemplateSpec'
}
attribute_map = {
'min_ready_seconds': 'minReadySeconds',
'replicas': 'replicas',
'selector': 'selector',
'template': 'template'
}
def __init__(self, min_ready_seconds=None, replicas=None, selector=None, template=None):
"""
V1ReplicationControllerSpec - a model defined in Swagger
"""
self._min_ready_seconds = None
self._replicas = None
self._selector = None
self._template = None
self.discriminator = None
if min_ready_seconds is not None:
self.min_ready_seconds = min_ready_seconds
if replicas is not None:
self.replicas = replicas
if selector is not None:
self.selector = selector
if template is not None:
self.template = template
@property
def min_ready_seconds(self):
"""
Gets the min_ready_seconds of this V1ReplicationControllerSpec.
Minimum number of seconds for which a newly created pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)
:return: The min_ready_seconds of this V1ReplicationControllerSpec.
:rtype: int
"""
return self._min_ready_seconds
@min_ready_seconds.setter
def min_ready_seconds(self, min_ready_seconds):
"""
Sets the min_ready_seconds of this V1ReplicationControllerSpec.
Minimum number of seconds for which a newly created pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)
:param min_ready_seconds: The min_ready_seconds of this V1ReplicationControllerSpec.
:type: int
"""
self._min_ready_seconds = min_ready_seconds
@property
def replicas(self):
"""
Gets the replicas of this V1ReplicationControllerSpec.
Replicas is the number of desired replicas. This is a pointer to distinguish between explicit zero and unspecified. Defaults to 1. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#what-is-a-replicationcontroller
:return: The replicas of this V1ReplicationControllerSpec.
:rtype: int
"""
return self._replicas
@replicas.setter
def replicas(self, replicas):
"""
Sets the replicas of this V1ReplicationControllerSpec.
Replicas is the number of desired replicas. This is a pointer to distinguish between explicit zero and unspecified. Defaults to 1. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#what-is-a-replicationcontroller
:param replicas: The replicas of this V1ReplicationControllerSpec.
:type: int
"""
self._replicas = replicas
@property
def selector(self):
"""
Gets the selector of this V1ReplicationControllerSpec.
Selector is a label query over pods that should match the Replicas count. If Selector is empty, it is defaulted to the labels present on the Pod template. Label keys and values that must match in order to be controlled by this replication controller, if empty defaulted to labels on Pod template. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors
:return: The selector of this V1ReplicationControllerSpec.
:rtype: dict(str, str)
"""
return self._selector
@selector.setter
def selector(self, selector):
"""
Sets the selector of this V1ReplicationControllerSpec.
Selector is a label query over pods that should match the Replicas count. If Selector is empty, it is defaulted to the labels present on the Pod template. Label keys and values that must match in order to be controlled by this replication controller, if empty defaulted to labels on Pod template. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors
:param selector: The selector of this V1ReplicationControllerSpec.
:type: dict(str, str)
"""
self._selector = selector
@property
def template(self):
"""
Gets the template of this V1ReplicationControllerSpec.
Template is the object that describes the pod that will be created if insufficient replicas are detected. This takes precedence over a TemplateRef. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template
:return: The template of this V1ReplicationControllerSpec.
:rtype: V1PodTemplateSpec
"""
return self._template
@template.setter
def template(self, template):
"""
Sets the template of this V1ReplicationControllerSpec.
Template is the object that describes the pod that will be created if insufficient replicas are detected. This takes precedence over a TemplateRef. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template
:param template: The template of this V1ReplicationControllerSpec.
:type: V1PodTemplateSpec
"""
self._template = template
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V1ReplicationControllerSpec):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
"""Utilities that interact with IDA."""
import idaapi
import idc
import idautils
from ._service import Service
from ._comment_handler import CommentHandlers
from ._ctyperewriter import Rewriter
service = Service()
comment = CommentHandlers()
rewriter = Rewriter()
def addresses():
"""Generate all mapped addresses."""
for s in idautils.Segments():
ea = idc.SegStart(s)
while ea < idc.SegEnd(s):
yield ea
ea = idaapi.nextaddr(ea)
@service.provider('loader')
def output_segments(out):
"""Dump binary segmentation."""
info = idaapi.get_inf_structure()
size = "r32" if info.is_32bit else "r64"
out.writelines(('(', info.get_proc_name()[1], ' ', size, ' ('))
for seg in idautils.Segments():
out.write("\n({} {} {:d} ({:#x} {:d}))".format(
idaapi.get_segm_name(seg),
"code" if idaapi.segtype(seg) == idaapi.SEG_CODE else "data",
idaapi.get_fileregion_offset(seg),
seg, idaapi.getseg(seg).size()))
out.write("))\n")
@service.provider('symbols')
def output_symbols(out):
"""Dump symbols."""
try:
from idaapi import get_func_name2 as get_func_name
# Since get_func_name is deprecated (at least from IDA 6.9)
except ImportError:
from idaapi import get_func_name
# Older versions of IDA don't have get_func_name2
# so we just use the older name get_func_name
def func_name_propagate_thunk(ea):
current_name = get_func_name(ea)
if current_name[0].isalpha():
return current_name
func = idaapi.get_func(ea)
temp_ptr = idaapi.ea_pointer()
ea_new = idaapi.BADADDR
if func.flags & idaapi.FUNC_THUNK == idaapi.FUNC_THUNK:
ea_new = idaapi.calc_thunk_func_target(func, temp_ptr.cast())
if ea_new != idaapi.BADADDR:
ea = ea_new
propagated_name = get_func_name(ea) or '' # Ensure it is not `None`
if len(current_name) > len(propagated_name) > 0:
return propagated_name
else:
return current_name
# Fallback to non-propagated name for weird times that IDA gives
# a 0 length name, or finds a longer import name
for ea in idautils.Segments():
fs = idautils.Functions(idc.SegStart(ea), idc.SegEnd(ea))
for f in fs:
out.write('("%s" 0x%x 0x%x)\n' % (
func_name_propagate_thunk(f),
idc.GetFunctionAttr(f, idc.FUNCATTR_START),
idc.GetFunctionAttr(f, idc.FUNCATTR_END)))
@service.provider('types')
def output_types(out):
"""Dump type information."""
for line in local_types() + prototypes():
out.write(rewriter.translate(line) + '\n')
@service.provider('brancher')
def output_branches(out):
"""Dump static successors for each instruction """
out.write('(')
for addr in addresses():
succs = Succs(addr)
if succs.jmps or (succs.fall is not None):
out.write('{}\n'.format(succs.dumps()))
out.write(')')
def set_color(addr, color):
idc.SetColor(addr, idc.CIC_ITEM, color)
class Printer(idaapi.text_sink_t):
def __init__(self):
try:
idaapi.text_sink_t.__init__(self)
except AttributeError:
pass # Older IDA versions keep the text_sink_t abstract
self.lines = []
def _print(self, thing):
self.lines.append(thing)
return 0
def local_types():
printer = Printer()
idaapi.print_decls(printer, idaapi.cvar.idati, [],
idaapi.PDF_INCL_DEPS | idaapi.PDF_DEF_FWD)
return printer.lines
def prototypes():
types = set()
for ea in idautils.Functions():
proto = idaapi.print_type(ea, True)
if proto:
types.append(proto + ';')
return list(types)
class Succs(object):
def __init__(self, addr):
self.addr = addr
self.dests = set(idautils.CodeRefsFrom(addr, True))
self.jmps = set(idautils.CodeRefsFrom(addr, False))
falls = self.dests - self.jmps
self.fall = list(falls)[0] if falls else None
def dumps(self):
return ''.join([
'({:#x} '.format(self.addr),
' ({:#x}) '.format(self.fall) if self.fall else '()',
'{})'.format(sexps(self.jmps))
])
def sexps(addrs):
sexp = ['(']
for addr in addrs:
sexp.append('{:#x}'.format(addr))
sexp.append(')')
return ' '.join(sexp)
|
'use strict'
const Imap = require('imap')
const PROMISIFIED_METHODS = ['openBox', 'search']
const promisify = (fn, self) => (...args) => new Promise((ok, fail) =>
fn.call(self, ...args, (err, res) =>
err ? fail(err) : ok(res)
)
)
module.exports = options => {
const client = new Imap(options)
PROMISIFIED_METHODS.forEach(prop => {
client[prop + 'P'] = promisify(client[prop], client)
})
return client
}
|
/* ----------------------------------------------------------------------------
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
* Atlanta, Georgia 30332-0415
* All Rights Reserved
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
* See LICENSE for the license information
* -------------------------------------------------------------------------- */
/**
* @file KalmanFilter.h
* @brief Simple linear Kalman filter. Implemented using factor graphs, i.e., does Cholesky-based SRIF, really.
* @date Sep 3, 2011
* @author Stephen Williams
* @author Frank Dellaert
*/
#pragma once
#include <gtsam/linear/GaussianDensity.h>
#include <gtsam/linear/GaussianFactorGraph.h>
#include <gtsam/linear/NoiseModel.h>
#ifndef KALMANFILTER_DEFAULT_FACTORIZATION
#define KALMANFILTER_DEFAULT_FACTORIZATION QR
#endif
namespace gtsam {
/**
* Kalman Filter class
*
* Knows how to maintain a Gaussian density under linear-Gaussian motion and
* measurement models. It uses the square-root information form, as usual in GTSAM.
*
* The filter is functional, in that it does not have state: you call init() to create
* an initial state, then predict() and update() that create new states out of an old state.
*/
class GTSAM_EXPORT KalmanFilter {
public:
/**
* This Kalman filter is a Square-root Information filter
* The type below allows you to specify the factorization variant.
*/
enum Factorization {
QR, CHOLESKY
};
/**
* The Kalman filter state is simply a GaussianDensity
*/
typedef GaussianDensity::shared_ptr State;
private:
const size_t n_; /** dimensionality of state */
const Matrix I_; /** identity matrix of size n*n */
const GaussianFactorGraph::Eliminate function_; /** algorithm */
State solve(const GaussianFactorGraph& factorGraph) const;
State fuse(const State& p, GaussianFactor::shared_ptr newFactor) const;
public:
// Constructor
KalmanFilter(size_t n, Factorization method =
KALMANFILTER_DEFAULT_FACTORIZATION) :
n_(n), I_(Matrix::Identity(n_, n_)), function_(
method == QR ? GaussianFactorGraph::Eliminate(EliminateQR) :
GaussianFactorGraph::Eliminate(EliminateCholesky)) {
}
/**
* Create initial state, i.e., prior density at time k=0
* In Kalman Filter notation, these are x_{0|0} and P_{0|0}
* @param x0 estimate at time 0
* @param P0 covariance at time 0, given as a diagonal Gaussian 'model'
*/
State init(const Vector& x0, const SharedDiagonal& P0) const;
/// version of init with a full covariance matrix
State init(const Vector& x0, const Matrix& P0) const;
/// print
void print(const std::string& s = "") const;
/** Return step index k, starts at 0, incremented at each predict. */
static Key step(const State& p) {
return p->firstFrontalKey();
}
/**
* Predict the state P(x_{t+1}|Z^t)
* In Kalman Filter notation, this is x_{t+1|t} and P_{t+1|t}
* Details and parameters:
* In a linear Kalman Filter, the motion model is f(x_{t}) = F*x_{t} + B*u_{t} + w
* where F is the state transition model/matrix, B is the control input model,
* and w is zero-mean, Gaussian white noise with covariance Q.
*/
State predict(const State& p, const Matrix& F, const Matrix& B,
const Vector& u, const SharedDiagonal& modelQ) const;
/*
* Version of predict with full covariance
* Q is normally derived as G*w*G^T where w models uncertainty of some
* physical property, such as velocity or acceleration, and G is derived from physics.
* This version allows more realistic models than a diagonal covariance matrix.
*/
State predictQ(const State& p, const Matrix& F, const Matrix& B,
const Vector& u, const Matrix& Q) const;
/**
* Predict the state P(x_{t+1}|Z^t)
* In Kalman Filter notation, this is x_{t+1|t} and P_{t+1|t}
* After the call, that is the density that can be queried.
* Details and parameters:
* This version of predict takes GaussianFactor motion model [A0 A1 b]
* with an optional noise model.
*/
State predict2(const State& p, const Matrix& A0, const Matrix& A1,
const Vector& b, const SharedDiagonal& model) const;
/**
* Update Kalman filter with a measurement
* For the Kalman Filter, the measurement function, h(x_{t}) = z_{t}
* will be of the form h(x_{t}) = H*x_{t} + v
* where H is the observation model/matrix, and v is zero-mean,
* Gaussian white noise with covariance R.
* In this version, R is restricted to diagonal Gaussians (model parameter)
*/
State update(const State& p, const Matrix& H, const Vector& z,
const SharedDiagonal& model) const;
/*
* Version of update with full covariance
* Q is normally derived as G*w*G^T where w models uncertainty of some
* physical property, such as velocity or acceleration, and G is derived from physics.
* This version allows more realistic models than a diagonal covariance matrix.
*/
State updateQ(const State& p, const Matrix& H, const Vector& z,
const Matrix& Q) const;
};
} // \namespace gtsam
/* ************************************************************************* */
|
/**
* Copyright 2020 The Magma Authors.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* @flow
* @format
*/
import type {FBCNMSMiddleWareRequest} from '@fbcnms/express-middleware';
import {Strategy} from 'passport-strategy';
type StrategyBuilder = (req: FBCNMSMiddleWareRequest) => Promise<Strategy>;
type StrategyIDBuilder = (req: FBCNMSMiddleWareRequest) => Promise<string>;
export default class DynamicStrategy extends Strategy {
_strategies: {[string]: Strategy} = {};
_strategyBuilder: StrategyBuilder;
_strategyIDBuilder: StrategyIDBuilder;
constructor(
strategyIDBuilder: StrategyIDBuilder,
strategyBuilder: StrategyBuilder,
) {
super();
this._strategyIDBuilder = strategyIDBuilder;
this._strategyBuilder = strategyBuilder;
}
async _getStrategy(req: FBCNMSMiddleWareRequest, name: string): Strategy {
let strategy = this._strategies[name];
if (!strategy) {
strategy = this._strategies[name] = await this._strategyBuilder(req);
}
strategy.error = this.error;
strategy.redirect = this.redirect;
strategy.success = this.success;
strategy.fail = this.fail;
strategy.pass = this.pass;
return strategy;
}
authenticate(req: FBCNMSMiddleWareRequest, options: any) {
(async () => {
const strategyID = await this._strategyIDBuilder(req);
const strategy = await this._getStrategy(req, strategyID);
strategy.authenticate(req, options);
})().catch(error => {
this.error(error);
});
}
}
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import simplejson as json
from alipay.aop.api.constant.ParamConstants import *
class AlipayTradeFastpayEteDidiPayModel(object):
def __init__(self):
self._body = None
self._extend_params = None
self._login_id = None
self._login_passwd = None
self._mc_notify_url = None
self._out_trade_no = None
self._partner_id = None
self._pay_passwd = None
self._product_code = None
self._seller_id = None
self._subject = None
self._total_fee = None
self._user_id = None
@property
def body(self):
return self._body
@body.setter
def body(self, value):
self._body = value
@property
def extend_params(self):
return self._extend_params
@extend_params.setter
def extend_params(self, value):
self._extend_params = value
@property
def login_id(self):
return self._login_id
@login_id.setter
def login_id(self, value):
self._login_id = value
@property
def login_passwd(self):
return self._login_passwd
@login_passwd.setter
def login_passwd(self, value):
self._login_passwd = value
@property
def mc_notify_url(self):
return self._mc_notify_url
@mc_notify_url.setter
def mc_notify_url(self, value):
self._mc_notify_url = value
@property
def out_trade_no(self):
return self._out_trade_no
@out_trade_no.setter
def out_trade_no(self, value):
self._out_trade_no = value
@property
def partner_id(self):
return self._partner_id
@partner_id.setter
def partner_id(self, value):
self._partner_id = value
@property
def pay_passwd(self):
return self._pay_passwd
@pay_passwd.setter
def pay_passwd(self, value):
self._pay_passwd = value
@property
def product_code(self):
return self._product_code
@product_code.setter
def product_code(self, value):
self._product_code = value
@property
def seller_id(self):
return self._seller_id
@seller_id.setter
def seller_id(self, value):
self._seller_id = value
@property
def subject(self):
return self._subject
@subject.setter
def subject(self, value):
self._subject = value
@property
def total_fee(self):
return self._total_fee
@total_fee.setter
def total_fee(self, value):
self._total_fee = value
@property
def user_id(self):
return self._user_id
@user_id.setter
def user_id(self, value):
self._user_id = value
def to_alipay_dict(self):
params = dict()
if self.body:
if hasattr(self.body, 'to_alipay_dict'):
params['body'] = self.body.to_alipay_dict()
else:
params['body'] = self.body
if self.extend_params:
if hasattr(self.extend_params, 'to_alipay_dict'):
params['extend_params'] = self.extend_params.to_alipay_dict()
else:
params['extend_params'] = self.extend_params
if self.login_id:
if hasattr(self.login_id, 'to_alipay_dict'):
params['login_id'] = self.login_id.to_alipay_dict()
else:
params['login_id'] = self.login_id
if self.login_passwd:
if hasattr(self.login_passwd, 'to_alipay_dict'):
params['login_passwd'] = self.login_passwd.to_alipay_dict()
else:
params['login_passwd'] = self.login_passwd
if self.mc_notify_url:
if hasattr(self.mc_notify_url, 'to_alipay_dict'):
params['mc_notify_url'] = self.mc_notify_url.to_alipay_dict()
else:
params['mc_notify_url'] = self.mc_notify_url
if self.out_trade_no:
if hasattr(self.out_trade_no, 'to_alipay_dict'):
params['out_trade_no'] = self.out_trade_no.to_alipay_dict()
else:
params['out_trade_no'] = self.out_trade_no
if self.partner_id:
if hasattr(self.partner_id, 'to_alipay_dict'):
params['partner_id'] = self.partner_id.to_alipay_dict()
else:
params['partner_id'] = self.partner_id
if self.pay_passwd:
if hasattr(self.pay_passwd, 'to_alipay_dict'):
params['pay_passwd'] = self.pay_passwd.to_alipay_dict()
else:
params['pay_passwd'] = self.pay_passwd
if self.product_code:
if hasattr(self.product_code, 'to_alipay_dict'):
params['product_code'] = self.product_code.to_alipay_dict()
else:
params['product_code'] = self.product_code
if self.seller_id:
if hasattr(self.seller_id, 'to_alipay_dict'):
params['seller_id'] = self.seller_id.to_alipay_dict()
else:
params['seller_id'] = self.seller_id
if self.subject:
if hasattr(self.subject, 'to_alipay_dict'):
params['subject'] = self.subject.to_alipay_dict()
else:
params['subject'] = self.subject
if self.total_fee:
if hasattr(self.total_fee, 'to_alipay_dict'):
params['total_fee'] = self.total_fee.to_alipay_dict()
else:
params['total_fee'] = self.total_fee
if self.user_id:
if hasattr(self.user_id, 'to_alipay_dict'):
params['user_id'] = self.user_id.to_alipay_dict()
else:
params['user_id'] = self.user_id
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayTradeFastpayEteDidiPayModel()
if 'body' in d:
o.body = d['body']
if 'extend_params' in d:
o.extend_params = d['extend_params']
if 'login_id' in d:
o.login_id = d['login_id']
if 'login_passwd' in d:
o.login_passwd = d['login_passwd']
if 'mc_notify_url' in d:
o.mc_notify_url = d['mc_notify_url']
if 'out_trade_no' in d:
o.out_trade_no = d['out_trade_no']
if 'partner_id' in d:
o.partner_id = d['partner_id']
if 'pay_passwd' in d:
o.pay_passwd = d['pay_passwd']
if 'product_code' in d:
o.product_code = d['product_code']
if 'seller_id' in d:
o.seller_id = d['seller_id']
if 'subject' in d:
o.subject = d['subject']
if 'total_fee' in d:
o.total_fee = d['total_fee']
if 'user_id' in d:
o.user_id = d['user_id']
return o
|
/**
* @license
* Copyright (c) 2018 amCharts (Antanas Marcelionis, Martynas Majeris)
*
* This sofware is provided under multiple licenses. Please see below for
* links to appropriate usage.
*
* Free amCharts linkware license. Details and conditions:
* https://github.com/amcharts/amcharts4/blob/master/LICENSE
*
* One of the amCharts commercial licenses. Details and pricing:
* https://www.amcharts.com/online-store/
* https://www.amcharts.com/online-store/licenses-explained/
*
* If in doubt, contact amCharts at contact@amcharts.com
*
* PLEASE DO NOT REMOVE THIS COPYRIGHT NOTICE.
* @hidden
*/
am4internal_webpackJsonp(["3c52"],{XPBY:function(a,e,t){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var i={type:"FeatureCollection",features:[{type:"Feature",geometry:{type:"Polygon",coordinates:[[[50.7995,25.0816],[50.9932,25.083],[51.015,25.0944],[51.0147,25.1736],[51.1003,25.1576],[51.1396,25.1411],[51.1467,25.1358],[51.1664,25.1101],[51.1871,25.1233],[51.2152,25.1351],[51.3255,25.1758],[51.3227,25.1815],[51.322,25.1936],[51.2984,25.2514],[51.2859,25.2768],[51.272,25.2989],[51.2702,25.3071],[51.3091,25.3064],[51.3145,25.311],[51.3139,25.3923],[51.3609,25.3882],[51.3611,25.3689],[51.3903,25.368],[51.3902,25.3546],[51.4101,25.354],[51.4111,25.3445],[51.4191,25.3439],[51.4212,25.3502],[51.4171,25.3642],[51.4221,25.3678],[51.4305,25.367],[51.4407,25.356],[51.4498,25.3607],[51.4633,25.336],[51.4686,25.3223],[51.4748,25.2929],[51.4962,25.2675],[51.483,25.2577],[51.5002,25.2463],[51.5253,25.2331],[51.5302,25.2278],[51.5378,25.2086],[51.5376,25.2035],[51.5167,25.1964],[51.5106,25.1923],[51.5016,25.1804],[51.4725,25.1672],[51.4305,25.2279],[51.3943,25.2067],[51.4319,25.1511],[51.3977,25.137],[51.3786,25.1281],[51.4102,25.0977],[51.3734,25.1032],[51.359,25.1015],[51.354,25.0975],[51.3339,25.0757],[51.3163,25.0831],[51.3117,25.0869],[51.2131,25.1125],[51.2124,24.9946],[51.1133,24.9946],[51.1136,24.921],[51.0944,24.9205],[51.0942,24.8845],[51.0844,24.8847],[51.084,24.8486],[51.0941,24.8488],[51.0941,24.8272],[51.1139,24.8275],[51.1147,24.5496],[51.1162,24.4657],[51.0998,24.4646],[50.9959,24.4971],[50.9267,24.5414],[50.8549,24.6747],[50.8078,24.7422],[50.8235,24.7458],[50.8371,24.7551],[50.856,24.7649],[50.8604,24.7707],[50.8646,24.7874],[50.8685,24.7912],[50.8696,24.8021],[50.866,24.8115],[50.8565,24.8207],[50.8532,24.8318],[50.8604,24.861],[50.8587,24.8687],[50.8599,24.889],[50.8574,24.8918],[50.8582,24.9024],[50.8493,24.9121],[50.8504,24.9199],[50.8387,24.9293],[50.8376,24.9346],[50.8265,24.9401],[50.8224,24.9579],[50.8143,24.9626],[50.7971,24.9835],[50.7968,25.0121],[50.8024,25.0271],[50.8135,25.0385],[50.811,25.0574],[50.8057,25.0685],[50.8057,25.0743],[50.7995,25.0816]],[[51.4862,25.237],[51.474,25.2541],[51.4688,25.2505],[51.4807,25.2331],[51.4862,25.237]]]},properties:{name:"Ar Rayyān",id:"QA-RA",NAME_ENG:"Al Rayyan",CNTRY:"Qatar",TYPE:"Baladiyah",TYPE_ENG:"Municipality"},id:"QA-RA"},{type:"Feature",geometry:{type:"MultiPolygon",coordinates:[[[[51.4725,25.1672],[51.4319,25.1511],[51.3943,25.2067],[51.4305,25.2279],[51.4725,25.1672]]],[[[51.5301,25.3829],[51.5257,25.3699],[51.5295,25.3618],[51.5245,25.3551],[51.5307,25.3509],[51.5214,25.3394],[51.5248,25.331],[51.5373,25.3303],[51.5391,25.3244],[51.5193,25.3115],[51.516,25.3022],[51.5254,25.2938],[51.5346,25.2937],[51.5371,25.299],[51.5456,25.2981],[51.5466,25.2903],[51.5566,25.2855],[51.566,25.2928],[51.5784,25.2933],[51.58,25.287],[51.5935,25.2876],[51.6005,25.2987],[51.6056,25.3],[51.6265,25.2521],[51.6144,25.2491],[51.5993,25.2408],[51.6008,25.2235],[51.6167,25.209],[51.6174,25.1917],[51.6087,25.1944],[51.5928,25.1908],[51.579,25.2182],[51.5376,25.2035],[51.5378,25.2086],[51.5302,25.2278],[51.5253,25.2331],[51.5002,25.2463],[51.483,25.2577],[51.4962,25.2675],[51.4748,25.2929],[51.4686,25.3223],[51.4633,25.336],[51.4498,25.3607],[51.4483,25.3664],[51.4957,25.3946],[51.4979,25.3777],[51.5106,25.3777],[51.5191,25.3827],[51.5221,25.379],[51.5301,25.3829]]],[[[51.5474,25.3745],[51.5562,25.3769],[51.5675,25.357],[51.5498,25.3623],[51.541,25.3732],[51.5339,25.3656],[51.532,25.3744],[51.5401,25.379],[51.5474,25.3745]]],[[[51.4862,25.237],[51.4807,25.2331],[51.4688,25.2505],[51.474,25.2541],[51.4862,25.237]]],[[[51.5813,25.3404],[51.569,25.3433],[51.5703,25.3491],[51.5778,25.3491],[51.5813,25.3404]]]]},properties:{name:"Ad Dawḩah",id:"QA-DA",NAME_ENG:"Doha",CNTRY:"Qatar",TYPE:"Baladiyah",TYPE_ENG:"Municipality"},id:"QA-DA"},{type:"Feature",geometry:{type:"Polygon",coordinates:[[[51.4941,25.9544],[51.5135,25.9504],[51.5232,25.9446],[51.5354,25.9326],[51.5485,25.9282],[51.5682,25.9194],[51.5766,25.9242],[51.5816,25.9188],[51.5899,25.9181],[51.5941,25.9131],[51.5872,25.908],[51.5896,25.9041],[51.5836,25.8929],[51.5961,25.8922],[51.6279,25.9078],[51.6286,25.9067],[51.5964,25.891],[51.5717,25.8919],[51.5712,25.8812],[51.5833,25.8636],[51.5927,25.8686],[51.6293,25.885],[51.6341,25.8893],[51.6408,25.8875],[51.5843,25.8616],[51.5879,25.8542],[51.593,25.8117],[51.5996,25.7983],[51.5985,25.784],[51.5917,25.7716],[51.5922,25.7596],[51.5818,25.7563],[51.5775,25.7504],[51.5686,25.7527],[51.5689,25.7589],[51.5752,25.7648],[51.5676,25.7703],[51.5601,25.7609],[51.5583,25.7429],[51.5498,25.7458],[51.5422,25.7579],[51.5318,25.7562],[51.5338,25.7482],[51.5409,25.7386],[51.5473,25.7376],[51.5574,25.7319],[51.565,25.7218],[51.5756,25.7149],[51.5798,25.7225],[51.5735,25.7265],[51.5778,25.7405],[51.5896,25.7357],[51.5894,25.726],[51.5942,25.7164],[51.5868,25.7011],[51.5814,25.6981],[51.5813,25.6848],[51.5735,25.6787],[51.5681,25.6825],[51.5525,25.6809],[51.555,25.6859],[51.5631,25.6874],[51.5586,25.7042],[51.5426,25.7045],[51.538,25.7069],[51.5227,25.7002],[51.5109,25.6983],[51.5072,25.691],[51.5171,25.6863],[51.54,25.6838],[51.5326,25.672],[51.5316,25.6643],[51.5521,25.6621],[51.5528,25.6391],[51.5385,25.6406],[51.494,25.6402],[51.4586,25.6559],[51.4454,25.6487],[51.4372,25.647],[51.4151,25.627],[51.4019,25.6255],[51.414,25.5681],[51.3184,25.5631],[51.2639,25.5901],[51.2424,25.6229],[51.2425,25.6258],[51.2576,25.6694],[51.2597,25.6988],[51.2589,25.7057],[51.2737,25.7206],[51.1155,25.7206],[51.1155,25.7778],[50.9788,25.7776],[50.981,25.7815],[50.979,25.7913],[50.9882,25.7998],[50.9835,25.8051],[50.9871,25.814],[50.9793,25.8262],[50.969,25.8257],[50.9576,25.8307],[50.9574,25.8429],[50.9607,25.8496],[51.1156,25.8496],[51.1156,25.951],[51.1319,25.951],[51.1319,25.9681],[51.1628,25.9683],[51.1571,25.951],[51.2674,25.951],[51.2686,25.8965],[51.3426,25.8967],[51.3477,25.8948],[51.4122,25.9274],[51.4472,25.9446],[51.4706,25.9492],[51.4941,25.9504],[51.4941,25.9544]]]},properties:{name:"Al Khawr wa adh Dhakhīrah",id:"QA-KH",NAME_ENG:"Al Khor",CNTRY:"Qatar",TYPE:"Baladiyah",TYPE_ENG:"Municipality"},id:"QA-KH"},{type:"Feature",geometry:{type:"Polygon",coordinates:[[[51.4941,25.9544],[51.4941,25.9504],[51.4706,25.9492],[51.4472,25.9446],[51.4122,25.9274],[51.3477,25.8948],[51.3426,25.8967],[51.2686,25.8965],[51.2674,25.951],[51.1571,25.951],[51.1628,25.9683],[51.1319,25.9681],[51.1319,25.951],[51.1156,25.951],[51.1156,25.8496],[50.9607,25.8496],[50.9579,25.8571],[50.9646,25.8624],[50.9743,25.864],[50.9818,25.8699],[50.9721,25.8785],[50.9785,25.894],[50.9846,25.8951],[50.9904,25.9007],[50.9926,25.9151],[50.9957,25.9182],[50.9924,25.9363],[50.9932,25.9546],[50.989,25.9585],[50.9893,25.9674],[50.986,25.9749],[50.9888,25.9835],[50.9935,25.9851],[50.9999,25.9737],[50.9999,25.9649],[51.004,25.9599],[51.0112,25.9615],[51.0232,25.9688],[51.0232,25.9863],[51.0346,25.9857],[51.0321,26.0043],[51.039,26.0179],[51.0365,26.0263],[51.0396,26.0301],[51.0385,26.041],[51.0407,26.0449],[51.0532,26.0515],[51.0662,26.054],[51.0799,26.0621],[51.086,26.0729],[51.0962,26.0679],[51.1024,26.0704],[51.1104,26.081],[51.1168,26.0801],[51.1201,26.0715],[51.1282,26.0718],[51.1515,26.0857],[51.1565,26.0985],[51.1568,26.1121],[51.1735,26.1243],[51.1846,26.1293],[51.1843,26.136],[51.189,26.1393],[51.2082,26.1399],[51.2149,26.1449],[51.2215,26.1404],[51.2335,26.1446],[51.2393,26.1557],[51.2549,26.1574],[51.2628,26.1471],[51.2706,26.1467],[51.2774,26.1421],[51.2829,26.1458],[51.2667,26.162],[51.2769,26.1606],[51.2843,26.151],[51.2998,26.1369],[51.3099,26.1335],[51.3179,26.1262],[51.3271,26.1229],[51.341,26.111],[51.3585,26.1037],[51.3604,26.0993],[51.3568,26.086],[51.3601,26.0746],[51.3571,26.0707],[51.3574,26.0537],[51.379,26.0215],[51.399,26.011],[51.4004,25.9971],[51.3987,25.9885],[51.401,25.9818],[51.401,25.9679],[51.4193,25.9557],[51.4404,25.9479],[51.4493,25.9476],[51.4871,25.9543],[51.4941,25.9544]]]},properties:{name:"Ash Shamāl",id:"QA-MS",NAME_ENG:"Al Shamal",CNTRY:"Qatar",TYPE:"Baladiyah",TYPE_ENG:"Municipality"},id:"QA-MS"},{type:"Feature",geometry:{type:"Polygon",coordinates:[[[50.9788,25.7776],[51.1155,25.7778],[51.1155,25.7206],[51.2737,25.7206],[51.2589,25.7057],[51.2597,25.6988],[51.2576,25.6694],[51.2425,25.6258],[51.2424,25.6229],[51.2639,25.5901],[51.2639,25.5771],[51.2528,25.5773],[51.2561,25.567],[51.2623,25.5667],[51.2642,25.5609],[51.2639,25.4],[51.2653,25.3991],[51.3139,25.3923],[51.3145,25.311],[51.3091,25.3064],[51.2702,25.3071],[51.272,25.2989],[51.2859,25.2768],[51.2984,25.2514],[51.322,25.1936],[51.3227,25.1815],[51.3255,25.1758],[51.2152,25.1351],[51.1871,25.1233],[51.1664,25.1101],[51.1467,25.1358],[51.1396,25.1411],[51.1003,25.1576],[51.0147,25.1736],[51.015,25.0944],[50.9932,25.083],[50.7995,25.0816],[50.7957,25.0893],[50.786,25.0999],[50.7732,25.1074],[50.7704,25.1124],[50.7713,25.124],[50.7751,25.144],[50.7743,25.1629],[50.7674,25.1824],[50.7647,25.1991],[50.7653,25.2078],[50.7622,25.216],[50.7645,25.2348],[50.761,25.2525],[50.7629,25.2704],[50.7611,25.2829],[50.7687,25.2988],[50.7642,25.3254],[50.7634,25.3441],[50.7592,25.3477],[50.7614,25.3571],[50.7613,25.3755],[50.7592,25.3803],[50.7614,25.39],[50.7532,25.3993],[50.7565,25.4096],[50.7492,25.4203],[50.7532,25.4286],[50.7534,25.4377],[50.7605,25.4715],[50.7663,25.4831],[50.7635,25.4961],[50.7564,25.5055],[50.77,25.518],[50.7857,25.5296],[50.7977,25.5281],[50.8057,25.517],[50.8111,25.4969],[50.8173,25.4943],[50.8206,25.4875],[50.8197,25.4779],[50.8227,25.4643],[50.8318,25.4583],[50.8371,25.4589],[50.8452,25.4652],[50.846,25.4715],[50.8386,25.4759],[50.8336,25.4825],[50.8416,25.4905],[50.8322,25.5044],[50.8337,25.5095],[50.827,25.5177],[50.8246,25.5243],[50.8167,25.5261],[50.8229,25.5432],[50.8331,25.5539],[50.8309,25.5628],[50.8333,25.5712],[50.8261,25.5884],[50.8264,25.5955],[50.8164,25.5986],[50.8021,25.5915],[50.7959,25.5938],[50.7935,25.6026],[50.7988,25.6068],[50.8009,25.614],[50.8116,25.6181],[50.8184,25.6166],[50.8223,25.6219],[50.8307,25.6225],[50.8505,25.6023],[50.85,25.6152],[50.858,25.6193],[50.8401,25.6289],[50.8368,25.6348],[50.8425,25.639],[50.8555,25.6413],[50.8625,25.6401],[50.865,25.6329],[50.877,25.6306],[50.8743,25.6222],[50.8717,25.6041],[50.8672,25.5923],[50.8598,25.5846],[50.8576,25.5768],[50.8659,25.5707],[50.8686,25.562],[50.8649,25.5486],[50.8703,25.5279],[50.877,25.5208],[50.8868,25.517],[50.8906,25.522],[50.8877,25.5305],[50.8953,25.5302],[50.8932,25.5224],[50.8989,25.5143],[50.8922,25.5072],[50.886,25.512],[50.8751,25.5015],[50.873,25.4953],[50.8782,25.4884],[50.8765,25.4782],[50.883,25.4751],[50.8894,25.4851],[50.8991,25.4839],[50.9034,25.4779],[50.9125,25.4853],[50.9027,25.5005],[50.9132,25.5046],[50.9189,25.5124],[50.9129,25.5277],[50.9172,25.5329],[50.9127,25.5391],[50.9064,25.5419],[50.9114,25.5622],[50.9155,25.5685],[50.9135,25.5775],[50.9231,25.5897],[50.92,25.5972],[50.9291,25.6076],[50.9402,25.6073],[50.9462,25.597],[50.9567,25.5936],[50.9645,25.5983],[50.9702,25.605],[50.9679,25.6167],[50.9644,25.624],[50.9487,25.6291],[50.9408,25.6277],[50.9334,25.6226],[50.9275,25.6237],[50.9145,25.6336],[50.9018,25.6486],[50.8977,25.6665],[50.8938,25.674],[50.8966,25.6787],[50.8958,25.6922],[50.8915,25.7137],[50.8988,25.7206],[50.9022,25.7307],[50.9148,25.7402],[50.904,25.7665],[50.9058,25.7811],[50.9165,25.7884],[50.9228,25.7966],[50.9288,25.8091],[50.9359,25.8085],[50.9446,25.8025],[50.9536,25.7909],[50.9574,25.7832],[50.9518,25.7679],[50.966,25.7651],[50.9674,25.7707],[50.9788,25.7776]]]},properties:{name:"Ash Shīḩānīyah",id:"QA-SH",NAME_ENG:"Al Shahaniya",CNTRY:"Qatar",TYPE:"Baladiyah",TYPE_ENG:"Municipality"},id:"QA-SH"},{type:"Feature",geometry:{type:"Polygon",coordinates:[[[51.2639,25.5901],[51.3184,25.5631],[51.414,25.5681],[51.4087,25.5147],[51.4096,25.451],[51.4129,25.4367],[51.4498,25.3607],[51.4407,25.356],[51.4305,25.367],[51.4221,25.3678],[51.4171,25.3642],[51.4212,25.3502],[51.4191,25.3439],[51.4111,25.3445],[51.4101,25.354],[51.3902,25.3546],[51.3903,25.368],[51.3611,25.3689],[51.3609,25.3882],[51.3139,25.3923],[51.2653,25.3991],[51.2639,25.4],[51.2642,25.5609],[51.2623,25.5667],[51.2561,25.567],[51.2528,25.5773],[51.2639,25.5771],[51.2639,25.5901]]]},properties:{name:"Umm Şalāl",id:"QA-US",NAME_ENG:"Umm Salal",CNTRY:"Qatar",TYPE:"Baladiyah",TYPE_ENG:"Municipality"},id:"QA-US"},{type:"Feature",geometry:{type:"Polygon",coordinates:[[[51.5376,25.2035],[51.579,25.2182],[51.5928,25.1908],[51.6087,25.1944],[51.6174,25.1917],[51.6151,25.1781],[51.6104,25.1694],[51.6095,25.1542],[51.6171,25.1462],[51.6188,25.1405],[51.6158,25.1323],[51.6196,25.1073],[51.6171,25.0942],[51.6189,25.0869],[51.6085,25.0701],[51.6083,25.0558],[51.6135,25.0395],[51.6227,25.0406],[51.6384,25.0377],[51.6349,25.034],[51.6372,25.0253],[51.6281,25.0174],[51.6227,25.0082],[51.6212,24.9992],[51.6095,24.9781],[51.5908,24.9772],[51.5819,24.9664],[51.585,24.9585],[51.5916,24.9669],[51.5971,24.9641],[51.5908,24.9527],[51.5762,24.9318],[51.5637,24.9089],[51.5519,24.8942],[51.5481,24.8923],[51.534,24.8721],[51.5154,24.8596],[51.5107,24.8532],[51.504,24.8351],[51.4924,24.8162],[51.4901,24.7965],[51.4818,24.7826],[51.4821,24.7626],[51.4782,24.7512],[51.469,24.7376],[51.4593,24.7265],[51.4504,24.7101],[51.4487,24.7001],[51.4412,24.6815],[51.4376,24.6651],[51.4276,24.6524],[51.4212,24.6497],[51.415,24.638],[51.4055,24.6388],[51.3958,24.6344],[51.3834,24.6361],[51.3814,24.6246],[51.3759,24.6157],[51.3754,24.6075],[51.3793,24.6017],[51.3743,24.5951],[51.3732,24.5861],[51.3653,24.5787],[51.3533,24.5733],[51.3393,24.5584],[51.3336,24.562],[51.3365,24.5731],[51.3276,24.5875],[51.3245,24.5968],[51.3332,24.6031],[51.3396,24.6005],[51.3526,24.6185],[51.3543,24.6345],[51.3486,24.6316],[51.3415,24.6235],[51.3443,24.6526],[51.3369,24.6518],[51.3311,24.6409],[51.3241,24.6446],[51.3211,24.6389],[51.3054,24.6384],[51.2962,24.6501],[51.2921,24.6516],[51.2934,24.6612],[51.2912,24.6735],[51.2819,24.6606],[51.2802,24.6488],[51.2741,24.6551],[51.2611,24.6524],[51.2552,24.6472],[51.2468,24.6511],[51.239,24.6442],[51.2287,24.6402],[51.2215,24.6449],[51.217,24.6219],[51.2273,24.6157],[51.2367,24.6138],[51.2552,24.6132],[51.2674,24.6165],[51.2835,24.6041],[51.2905,24.6042],[51.2964,24.5925],[51.3118,24.5972],[51.2988,24.5748],[51.3,24.5487],[51.2993,24.5308],[51.2932,24.5228],[51.2933,24.5142],[51.2997,24.5049],[51.1162,24.4657],[51.1147,24.5496],[51.1139,24.8275],[51.0941,24.8272],[51.0941,24.8488],[51.084,24.8486],[51.0844,24.8847],[51.0942,24.8845],[51.0944,24.9205],[51.1136,24.921],[51.1133,24.9946],[51.2124,24.9946],[51.2131,25.1125],[51.3117,25.0869],[51.3163,25.0831],[51.3339,25.0757],[51.354,25.0975],[51.359,25.1015],[51.3734,25.1032],[51.4102,25.0977],[51.3786,25.1281],[51.3977,25.137],[51.4319,25.1511],[51.4725,25.1672],[51.5016,25.1804],[51.5106,25.1923],[51.5167,25.1964],[51.5376,25.2035]]]},properties:{name:"Al Wakrah",id:"QA-WA",NAME_ENG:"Al Wakrah",CNTRY:"Qatar",TYPE:"Baladiyah",TYPE_ENG:"Municipality"},id:"QA-WA"},{type:"Feature",geometry:{type:"MultiPolygon",coordinates:[[[[51.5528,25.6391],[51.5479,25.634],[51.5468,25.6229],[51.542,25.6191],[51.5296,25.6169],[51.5269,25.6259],[51.5167,25.6296],[51.5126,25.6175],[51.5145,25.6119],[51.4945,25.6066],[51.4889,25.5941],[51.4881,25.5801],[51.4907,25.5669],[51.4981,25.5658],[51.4983,25.555],[51.504,25.5496],[51.4919,25.5469],[51.4837,25.531],[51.4772,25.5252],[51.4744,25.5074],[51.4838,25.5004],[51.49,25.4917],[51.4904,25.4774],[51.5064,25.4718],[51.5108,25.4679],[51.5132,25.4592],[51.5227,25.4436],[51.5297,25.439],[51.5339,25.4278],[51.5194,25.426],[51.5211,25.4174],[51.5282,25.4214],[51.5336,25.396],[51.5299,25.3912],[51.5301,25.3829],[51.5221,25.379],[51.5191,25.3827],[51.5106,25.3777],[51.4979,25.3777],[51.4957,25.3946],[51.4483,25.3664],[51.4498,25.3607],[51.4129,25.4367],[51.4096,25.451],[51.4087,25.5147],[51.414,25.5681],[51.4019,25.6255],[51.4151,25.627],[51.4372,25.647],[51.4454,25.6487],[51.4586,25.6559],[51.494,25.6402],[51.5385,25.6406],[51.5528,25.6391]]],[[[51.571,25.4032],[51.565,25.4012],[51.5578,25.41],[51.5612,25.4128],[51.571,25.4032]]]]},properties:{name:"Az̧ Za̧`āyin",id:"QA-ZA",NAME_ENG:"Al Daayen",CNTRY:"Qatar",TYPE:"Baladiyah",TYPE_ENG:"Municipality"},id:"QA-ZA"}]};window.am4geodata_cc=i}},["XPBY"]);
//# sourceMappingURL=qatarLow.js.map
|
import { h } from 'vue'
export default {
name: "Rust",
vendor: "Fa",
type: "Brand",
tags: ["rust"],
render() {
return h(
"svg",
{"xmlns":"http://www.w3.org/2000/svg","viewBox":"0 0 512 512","class":"v-icon","fill":"currentColor","data-name":"fa-rust","innerHTML":"<path d='M508.52,249.75,486.7,236.24c-.17-2-.34-3.93-.55-5.88l18.72-17.5a7.35,7.35,0,0,0-2.44-12.25l-24-9c-.54-1.88-1.08-3.78-1.67-5.64l15-20.83a7.35,7.35,0,0,0-4.79-11.54l-25.42-4.15c-.9-1.73-1.79-3.45-2.73-5.15l10.68-23.42a7.35,7.35,0,0,0-6.95-10.39l-25.82.91q-1.79-2.22-3.61-4.4L439,81.84A7.36,7.36,0,0,0,430.16,73L405,78.93q-2.17-1.83-4.4-3.61l.91-25.82a7.35,7.35,0,0,0-10.39-7L367.7,53.23c-1.7-.94-3.43-1.84-5.15-2.73L358.4,25.08a7.35,7.35,0,0,0-11.54-4.79L326,35.26c-1.86-.59-3.75-1.13-5.64-1.67l-9-24a7.35,7.35,0,0,0-12.25-2.44l-17.5,18.72c-1.95-.21-3.91-.38-5.88-.55L262.25,3.48a7.35,7.35,0,0,0-12.5,0L236.24,25.3c-2,.17-3.93.34-5.88.55L212.86,7.13a7.35,7.35,0,0,0-12.25,2.44l-9,24c-1.89.55-3.79,1.08-5.66,1.68l-20.82-15a7.35,7.35,0,0,0-11.54,4.79l-4.15,25.41c-1.73.9-3.45,1.79-5.16,2.73L120.88,42.55a7.35,7.35,0,0,0-10.39,7l.92,25.81c-1.49,1.19-3,2.39-4.42,3.61L81.84,73A7.36,7.36,0,0,0,73,81.84L78.93,107c-1.23,1.45-2.43,2.93-3.62,4.41l-25.81-.91a7.42,7.42,0,0,0-6.37,3.26,7.35,7.35,0,0,0-.57,7.13l10.66,23.41c-.94,1.7-1.83,3.43-2.73,5.16L25.08,153.6a7.35,7.35,0,0,0-4.79,11.54l15,20.82c-.59,1.87-1.13,3.77-1.68,5.66l-24,9a7.35,7.35,0,0,0-2.44,12.25l18.72,17.5c-.21,1.95-.38,3.91-.55,5.88L3.48,249.75a7.35,7.35,0,0,0,0,12.5L25.3,275.76c.17,2,.34,3.92.55,5.87L7.13,299.13a7.35,7.35,0,0,0,2.44,12.25l24,9c.55,1.89,1.08,3.78,1.68,5.65l-15,20.83a7.35,7.35,0,0,0,4.79,11.54l25.42,4.15c.9,1.72,1.79,3.45,2.73,5.14L42.56,391.12a7.35,7.35,0,0,0,.57,7.13,7.13,7.13,0,0,0,6.37,3.26l25.83-.91q1.77,2.22,3.6,4.4L73,430.16A7.36,7.36,0,0,0,81.84,439L107,433.07q2.18,1.83,4.41,3.61l-.92,25.82a7.35,7.35,0,0,0,10.39,6.95l23.43-10.68c1.69.94,3.42,1.83,5.14,2.73l4.15,25.42a7.34,7.34,0,0,0,11.54,4.78l20.83-15c1.86.6,3.76,1.13,5.65,1.68l9,24a7.36,7.36,0,0,0,12.25,2.44l17.5-18.72c1.95.21,3.92.38,5.88.55l13.51,21.82a7.35,7.35,0,0,0,12.5,0l13.51-21.82c2-.17,3.93-.34,5.88-.56l17.5,18.73a7.36,7.36,0,0,0,12.25-2.44l9-24c1.89-.55,3.78-1.08,5.65-1.68l20.82,15a7.34,7.34,0,0,0,11.54-4.78l4.15-25.42c1.72-.9,3.45-1.79,5.15-2.73l23.42,10.68a7.35,7.35,0,0,0,10.39-6.95l-.91-25.82q2.22-1.79,4.4-3.61L430.16,439a7.36,7.36,0,0,0,8.84-8.84L433.07,405q1.83-2.17,3.61-4.4l25.82.91a7.23,7.23,0,0,0,6.37-3.26,7.35,7.35,0,0,0,.58-7.13L458.77,367.7c.94-1.7,1.83-3.43,2.73-5.15l25.42-4.15a7.35,7.35,0,0,0,4.79-11.54l-15-20.83c.59-1.87,1.13-3.76,1.67-5.65l24-9a7.35,7.35,0,0,0,2.44-12.25l-18.72-17.5c.21-1.95.38-3.91.55-5.87l21.82-13.51a7.35,7.35,0,0,0,0-12.5Zm-151,129.08A13.91,13.91,0,0,0,341,389.51l-7.64,35.67A187.51,187.51,0,0,1,177,424.44l-7.64-35.66a13.87,13.87,0,0,0-16.46-10.68l-31.51,6.76a187.38,187.38,0,0,1-16.26-19.21H258.3c1.72,0,2.89-.29,2.89-1.91V309.55c0-1.57-1.17-1.91-2.89-1.91H213.47l.05-34.35H262c4.41,0,23.66,1.28,29.79,25.87,1.91,7.55,6.17,32.14,9.06,40,2.89,8.82,14.6,26.46,27.1,26.46H407a187.3,187.3,0,0,1-17.34,20.09Zm25.77,34.49A15.24,15.24,0,1,1,368,398.08h.44A15.23,15.23,0,0,1,383.24,413.32Zm-225.62-.68a15.24,15.24,0,1,1-15.25-15.25h.45A15.25,15.25,0,0,1,157.62,412.64ZM69.57,234.15l32.83-14.6a13.88,13.88,0,0,0,7.06-18.33L102.69,186h26.56V305.73H75.65A187.65,187.65,0,0,1,69.57,234.15ZM58.31,198.09a15.24,15.24,0,0,1,15.23-15.25H74a15.24,15.24,0,1,1-15.67,15.24Zm155.16,24.49.05-35.32h63.26c3.28,0,23.07,3.77,23.07,18.62,0,12.29-15.19,16.7-27.68,16.7ZM399,306.71c-9.8,1.13-20.63-4.12-22-10.09-5.78-32.49-15.39-39.4-30.57-51.4,18.86-11.95,38.46-29.64,38.46-53.26,0-25.52-17.49-41.59-29.4-49.48-16.76-11-35.28-13.23-40.27-13.23H116.32A187.49,187.49,0,0,1,221.21,70.06l23.47,24.6a13.82,13.82,0,0,0,19.6.44l26.26-25a187.51,187.51,0,0,1,128.37,91.43l-18,40.57A14,14,0,0,0,408,220.43l34.59,15.33a187.12,187.12,0,0,1,.4,32.54H423.71c-1.91,0-2.69,1.27-2.69,3.13v8.82C421,301,409.31,305.58,399,306.71ZM240,60.21A15.24,15.24,0,0,1,255.21,45h.45A15.24,15.24,0,1,1,240,60.21ZM436.84,214a15.24,15.24,0,1,1,0-30.48h.44a15.24,15.24,0,0,1-.44,30.48Z'/>"},
)
}
}
|
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
"""Defines the tabular explainer meta-api for returning the best explanation result based on the given model."""
from .common.base_explainer import BaseExplainer
from .common.constants import Defaults, ExplainParams, Extension, ModelTask
from .common.structured_model_explainer import PureStructuredModelExplainer
from .dataset.decorator import tabular_decorator
from .shap.deep_explainer import DeepExplainer
from .shap.gpu_kernel_explainer import GPUKernelExplainer
from .shap.kernel_explainer import KernelExplainer
from .shap.linear_explainer import LinearExplainer
from .shap.tree_explainer import TreeExplainer
InvalidExplainerErr = 'Could not find valid explainer to explain model'
def _get_uninitialized_explainers(use_gpu):
"""Return the uninitialized explainers used by the tabular explainer.
:return: A list of the uninitialized explainers.
:rtype: list
"""
if use_gpu:
return [GPUKernelExplainer]
else:
return [TreeExplainer, DeepExplainer, LinearExplainer, KernelExplainer]
class TabularExplainer(BaseExplainer):
available_explanations = [Extension.GLOBAL, Extension.LOCAL]
explainer_type = Extension.BLACKBOX
"""The tabular explainer meta-api for returning the best explanation result based on the given model.
:param model: The model or pipeline to explain.
A model that implements sklearn.predict() or sklearn.predict_proba() or pipeline function that accepts
a 2d ndarray
:type model: object
:param initialization_examples: A matrix of feature vector examples (# examples x # features) for
initializing the explainer.
:type initialization_examples: numpy.array or pandas.DataFrame or scipy.sparse.csr_matrix
:param explain_subset: List of feature indices. If specified, only selects a subset of the
features in the evaluation dataset for explanation, which will speed up the explanation
process when number of features is large and the user already knows the set of interested
features. The subset can be the top-k features from the model summary. This argument is not supported when
transformations are set.
:type explain_subset: list[int]
:param features: A list of feature names.
:type features: list[str]
:param classes: Class names as a list of strings. The order of the class names should match
that of the model output. Only required if explaining classifier.
:type classes: list[str]
:param transformations: sklearn.compose.ColumnTransformer or a list of tuples describing the column name and
transformer. When transformations are provided, explanations are of the features before the transformation.
The format for a list of transformations is same as the one here:
https://github.com/scikit-learn-contrib/sklearn-pandas.
If the user is using a transformation that is not in the list of sklearn.preprocessing transformations
that are supported by the `interpret-community <https://github.com/interpretml/interpret-community>`_
package, then this parameter cannot take a list of more than one column as input for the transformation.
A user can use the following sklearn.preprocessing transformations with a list of columns since these are
already one to many or one to one: Binarizer, KBinsDiscretizer, KernelCenterer, LabelEncoder, MaxAbsScaler,
MinMaxScaler, Normalizer, OneHotEncoder, OrdinalEncoder, PowerTransformer, QuantileTransformer,
RobustScaler, StandardScaler.
Examples for transformations that work::
[
(["col1", "col2"], sklearn_one_hot_encoder),
(["col3"], None) #col3 passes as is
]
[
(["col1"], my_own_transformer),
(["col2"], my_own_transformer),
]
An example of a transformation that would raise an error since it cannot be interpreted as one to many::
[
(["col1", "col2"], my_own_transformer)
]
The last example would not work since the interpret-community package can't determine whether
my_own_transformer gives a many to many or one to many mapping when taking a sequence of columns.
:type transformations: sklearn.compose.ColumnTransformer or list[tuple]
:param allow_all_transformations: Allow many to many and many to one transformations
:type allow_all_transformations: bool
"""
def __init__(self, model, initialization_examples, explain_subset=None, features=None, classes=None,
transformations=None, allow_all_transformations=False, model_task=ModelTask.Unknown,
use_gpu=False, **kwargs):
"""Initialize the TabularExplainer.
:param model: The model or pipeline to explain.
A model that implements sklearn.predict() or sklearn.predict_proba() or pipeline function that accepts
a 2d ndarray
:type model: object
:param initialization_examples: A matrix of feature vector examples (# examples x # features) for
initializing the explainer.
:type initialization_examples: numpy.array or pandas.DataFrame or scipy.sparse.csr_matrix
:param explain_subset: List of feature indices. If specified, only selects a subset of the
features in the evaluation dataset for explanation, which will speed up the explanation
process when number of features is large and the user already knows the set of interested
features. The subset can be the top-k features from the model summary. This argument is not supported when
transformations are set.
:type explain_subset: list[int]
:param features: A list of feature names.
:type features: list[str]
:param classes: Class names as a list of strings. The order of the class names should match
that of the model output. Only required if explaining classifier.
:type classes: list[str]
:param transformations: sklearn.compose.ColumnTransformer or a list of tuples describing the column name and
transformer. When transformations are provided, explanations are of the features before the transformation.
The format for a list of transformations is same as the one here:
https://github.com/scikit-learn-contrib/sklearn-pandas.
If the user is using a transformation that is not in the list of sklearn.preprocessing transformations
that are supported by the `interpret-community <https://github.com/interpretml/interpret-community>`_
package, then this parameter cannot take a list of more than one column as input for the transformation.
A user can use the following sklearn.preprocessing transformations with a list of columns since these are
already one to many or one to one: Binarizer, KBinsDiscretizer, KernelCenterer, LabelEncoder, MaxAbsScaler,
MinMaxScaler, Normalizer, OneHotEncoder, OrdinalEncoder, PowerTransformer, QuantileTransformer,
RobustScaler, StandardScaler.
Examples for transformations that work::
[
(["col1", "col2"], sklearn_one_hot_encoder),
(["col3"], None) #col3 passes as is
]
[
(["col1"], my_own_transformer),
(["col2"], my_own_transformer),
]
An example of a transformation that would raise an error since it cannot be interpreted as one to many::
[
(["col1", "col2"], my_own_transformer)
]
The last example would not work since the interpret-community package can't determine whether
my_own_transformer gives a many to many or one to many mapping when taking a sequence of columns.
:type transformations: sklearn.compose.ColumnTransformer or list[tuple]
:param allow_all_transformations: Allow many to many and many to one transformations
:type allow_all_transformations: bool
:param model_task: Optional parameter to specify whether the model is a classification or regression model.
In most cases, the type of the model can be inferred based on the shape of the output, where a classifier
has a predict_proba method and outputs a 2 dimensional array, while a regressor has a predict method and
outputs a 1 dimensional array.
:type model_task: str
"""
super(TabularExplainer, self).__init__(**kwargs)
self._logger.debug('Initializing TabularExplainer')
if transformations is not None and explain_subset is not None:
raise ValueError("explain_subset not supported with non-None transformations")
self.model = model
self.features = features
self.classes = classes
self.explain_subset = explain_subset
self.transformations = transformations
kwargs[ExplainParams.EXPLAIN_SUBSET] = self.explain_subset
kwargs[ExplainParams.FEATURES] = features
kwargs[ExplainParams.CLASSES] = classes
uninitialized_explainers = _get_uninitialized_explainers(use_gpu)
is_valid = False
last_exception = None
for uninitialized_explainer in uninitialized_explainers:
try:
if issubclass(uninitialized_explainer, PureStructuredModelExplainer):
self.explainer = uninitialized_explainer(
self.model, transformations=transformations,
allow_all_transformations=allow_all_transformations, **kwargs)
else:
# Note: Unlike DeepExplainer, LinearExplainer does not need model_task
if uninitialized_explainer != LinearExplainer:
kwargs[ExplainParams.MODEL_TASK] = model_task
else:
kwargs.pop(ExplainParams.MODEL_TASK, None)
self.explainer = uninitialized_explainer(
self.model, initialization_examples, transformations=transformations,
allow_all_transformations=allow_all_transformations,
**kwargs)
self._method = self.explainer._method
self._logger.info('Initialized valid explainer {} with args {}'.format(self.explainer, kwargs))
is_valid = True
break
except Exception as ex:
last_exception = ex
self._logger.info('Failed to initialize explainer {} due to error: {}'
.format(uninitialized_explainer, ex))
if not is_valid:
self._logger.info(InvalidExplainerErr)
raise ValueError(InvalidExplainerErr) from last_exception
@tabular_decorator
def explain_global(self, evaluation_examples, sampling_policy=None, include_local=True,
batch_size=Defaults.DEFAULT_BATCH_SIZE):
"""Globally explains the black box model or function.
:param evaluation_examples: A matrix of feature vector examples (# examples x # features) on which
to explain the model's output.
:type evaluation_examples: numpy.array or pandas.DataFrame or scipy.sparse.csr_matrix
:param sampling_policy: Optional policy for sampling the evaluation examples. See documentation on
SamplingPolicy for more information.
:type sampling_policy: interpret_community.common.policy.SamplingPolicy
:param include_local: Include the local explanations in the returned global explanation.
If include_local is False, will stream the local explanations to aggregate to global.
:type include_local: bool
:param batch_size: If include_local is False, specifies the batch size for aggregating
local explanations to global.
:type batch_size: int
:return: A model explanation object. It is guaranteed to be a GlobalExplanation. If SHAP is used for the
explanation, it will also have the properties of a LocalExplanation and the ExpectedValuesMixin. If the
model does classification, it will have the properties of the PerClassMixin.
:rtype: DynamicGlobalExplanation
"""
kwargs = {ExplainParams.SAMPLING_POLICY: sampling_policy,
ExplainParams.INCLUDE_LOCAL: include_local,
ExplainParams.BATCH_SIZE: batch_size}
return self.explainer.explain_global(evaluation_examples, **kwargs)
@tabular_decorator
def explain_local(self, evaluation_examples):
"""Locally explains the black box model or function.
:param evaluation_examples: A matrix of feature vector examples (# examples x # features) on which
to explain the model's output.
:type evaluation_examples: numpy.array or pandas.DataFrame or scipy.sparse.csr_matrix
:return: A model explanation object. It is guaranteed to be a LocalExplanation. If SHAP is used for the
explanation, it will also have the properties of the ExpectedValuesMixin. If the model does
classification, it will have the properties of the ClassesMixin.
:rtype: DynamicLocalExplanation
"""
return self.explainer.explain_local(evaluation_examples)
|
var env = process.env.NODE_ENV || 'local',
multer = require("multer"),
storage = multer.memoryStorage(),
bodyParser = require("body-parser"),
flash = require('connect-flash'),
cookieParser = require("cookie-parser"),
logger = require("morgan"),
session = require("express-session"),
express = require("express")
var allowCrossDomain = function(req, res, next) {
res.header("Access-Control-Allow-Origin", "*");
res.header('Access-Control-Allow-Methods', 'GET,PUT,POST,DELETE,OPTIONS');
res.header('Access-Control-Allow-Headers', 'Content-Type, Content-Length, SessionToken, Authorization');
// intercept OPTIONS method
if ('OPTIONS' == req.method) {
res.status(200).end();
} else {
next();
}
};
module.exports = function(app, sessionStore, passport, config) {
app.set('port', process.env.PORT || config[env].port);
app.use(logger(":method :url :response-time ms - :res[content-length] b"))
app.use(express.static(process.cwd() + '/public'))
app.set('views', require('path').normalize(__dirname) + '/../app/frontend/views')
app.set('view engine', 'pug');
app.set('view options', {
layout: 'layouts/default'
})
app.use(allowCrossDomain);
app.use(bodyParser.urlencoded({
extended: true
}))
app.use(bodyParser.json({
limit: '50mb'
}))
app.use(cookieParser())
app.use(bodyParser.urlencoded({
limit: '50mb',
extended: true
}))
app.use(multer({
storage: storage
}).single("file"))
if (sessionStore) {
app.use(session({
store: sessionStore,
secret: 'secret',
key: 'express.sid',
saveUninitialized: true,
resave: true
}))
// flash messages init
app.use(flash());
}
if (passport) {
app.use(passport.initialize())
app.use(passport.session())
}
}
|
# Copyright @ 2019 Alibaba. All rights reserved.
# Created by ruhuan on 2019.08.31
""" python wrapper file for mnn converter tool """
from __future__ import print_function
import os
import argparse
import _tools as Tools
def usage():
""" print usage info """
print("usage: mnnconvert [-h]")
print(" [--framework {TF,CAFFE,ONNX,TFLITE,MNN}")
print(" [--modelFile MODELFILE]")
print(" [--prototxt PROTOTXT]")
print(" [--MNNModel MNNMODEL]")
print(" [--fp16 {True,False}]")
print(" [--weightQuantBits {num of bits for weight-only-quant, default:0, which means no quant}]")
print(" [--weightQuantAsymmetric {True,False use asymmetric quant method for weight-only-quant, \
the default method is symmetric quant, which is compatible with old MNN versions. \
you can set this flag to True use asymmetric quant method to improve accuracy of the weight-quant model in some cases, \
but asymmetric quant model cannot run on old MNN versions. You will need to upgrade MNN to new version to solve this problem. \
default: False, which means using SYMMETRIC quant method}]")
print(" [--compressionParamsFile COMPRESSION_PARAMS_PATH]")
def main():
""" main funcion """
accepted_framework = ['TF', 'CAFFE', 'ONNX', 'TFLITE', 'MNN']
parser = argparse.ArgumentParser()
parser.add_argument("-f", "--framework", type=str,\
choices=['TF', 'CAFFE', 'ONNX', 'TFLITE', 'MNN'], default='TF',\
required=True, help="model type, for example:TF/CAFFE/ONNX/TFLITE/MNN")
parser.add_argument("--modelFile", type=str, required=True,\
help="tensorflow Pb or caffeModel, for example:xxx.pb/xxx.caffemodel")
parser.add_argument("--prototxt", type=str,\
help="only used for caffe, for example: xxx.prototxt")
parser.add_argument("--MNNModel", type=str, required=True,\
help="MNN model, ex: xxx.mnn")
parser.add_argument("--fp16", type=bool, default=False,\
help="{True,False}\
Boolean to change the mnn usage. If True, the output\
model save data in half_float type")
parser.add_argument("--weightQuantBits", type=int, default=0)
parser.add_argument("--weightQuantAsymmetric", type=bool, default=False)
parser.add_argument("--compressionParamsFile", type=str, default=None,
help="The path of model compression file that stores the int8 calibration \
table for quantization or auxiliary parameters for sparsity.")
TF = 0
CAFFE = 1
ONNX = 2
MNN = 3
TFLITE = 4
args = parser.parse_args()
if args.framework.upper() in accepted_framework:
if args.framework == 'TF':
framework_type = TF
elif args.framework.upper() == 'CAFFE':
framework_type = CAFFE
elif args.framework.upper() == 'ONNX':
framework_type = ONNX
elif args.framework.upper() == 'MNN':
framework_type = MNN
elif args.framework.upper() == 'TFLITE':
framework_type = TFLITE
else:
usage()
return -1
if args.modelFile is None or not os.path.exists(args.modelFile):
print("modelfile not exist")
return -1
if args.MNNModel is None:
usage()
return -1
if args.framework.upper() == 'CAFFE':
if args.prototxt is None or not os.path.exists(args.prototxt):
print("prototxt file not exist")
return -1
else:
### just cheat with a not exist name ###
args.prototxt = "NA.mnn"
if args.compressionParamsFile is not None and \
not os.path.exists(args.compressionParamsFile):
print("Compression params file not exist.")
return -1
if args.compressionParamsFile is None:
args.compressionParamsFile = ""
Tools.mnnconvert(args.MNNModel, args. modelFile, framework_type,\
args.fp16, args.prototxt, args.weightQuantBits, args.weightQuantAsymmetric, args.compressionParamsFile)
return 0
if __name__ == "__main__":
main()
|
const {nativeImage} = require('electron')
const {clipboard} = require('electron')
const SimpleEvent = require("./SimpleEvent").SimpleEvent;
class ImageInserter {
constructor(elemModal) {
this.elemModal = $(elemModal);
this.elemModal.attr('tabindex', -1);
this.eventImageInsert = new SimpleEvent();
this.eventImageInsertPNG = new SimpleEvent();
this.elemModal.find(".buttonInsert").click(() => {
const croppedCanvas = this.cropper.getCroppedCanvas({
imageSmoothingEnabled: false,
imageSmoothingQuality: 'high',
});
const image = nativeImage.createFromDataURL(croppedCanvas.toDataURL());
this.eventImageInsert.trigger(image);
this.elemModal.modal("hide");
});
this.elemModal.find(".buttonInsertPNG").click(() => {
const croppedCanvas = this.cropper.getCroppedCanvas({
imageSmoothingEnabled: false,
imageSmoothingQuality: 'high',
});
const image = nativeImage.createFromDataURL(croppedCanvas.toDataURL());
this.eventImageInsertPNG.trigger(image);
this.elemModal.modal("hide");
});
this.eventClose = new SimpleEvent();
this.elemModal.on("hidden.bs.modal", () => this.eventClose.trigger())
}
show(image) {
// cleanup old cropper
if (this.cropper) {
this.cropper.destroy();
this.elemModal.find(".modal-body").html("");
}
// show
this.elemModal.modal({
show: true,
keyboard: true,
});
// setup image
let img = $("<img class='imageInserterImage' src='"+image.toDataURL()+"'>>");
this.elemModal.find(".modal-body").append(img);
// setup cropperjs
this.cropper = new Cropper(img[0], {
viewMode: 0,
});
setTimeout(() => {
this.cropper.setDragMode("crop");
this.cropper.clear()
}, 0);
}
copyToClipboard() {
const croppedCanvas = this.cropper.getCroppedCanvas({
imageSmoothingEnabled: false,
imageSmoothingQuality: 'high',
});
const image = nativeImage.createFromDataURL(croppedCanvas.toDataURL());
clipboard.writeImage(image);
}
}
exports.ImageInserter = ImageInserter;
|
async do {
await 42
}
|
/**
* @license Copyright (c) 2003-2021, CKSource - Frederico Knabben. All rights reserved.
* For licensing, see LICENSE.md or https://ckeditor.com/legal/ckeditor-oss-license
*/
/**
* @module core/plugincollection
*/
import CKEditorError from '@ckeditor/ckeditor5-utils/src/ckeditorerror';
import EmitterMixin from '@ckeditor/ckeditor5-utils/src/emittermixin';
import mix from '@ckeditor/ckeditor5-utils/src/mix';
/**
* Manages a list of CKEditor plugins, including loading, resolving dependencies and initialization.
*
* @mixes module:utils/emittermixin~EmitterMixin
*/
export default class PluginCollection {
/**
* Creates an instance of the plugin collection class.
* Allows loading and initializing plugins and their dependencies.
* Allows providing a list of already loaded plugins. These plugins will not be destroyed along with this collection.
*
* @param {module:core/editor/editor~Editor|module:core/context~Context} context
* @param {Array.<Function>} [availablePlugins] Plugins (constructors) which the collection will be able to use
* when {@link module:core/plugincollection~PluginCollection#init} is used with the plugin names (strings, instead of constructors).
* Usually, the editor will pass its built-in plugins to the collection so they can later be
* used in `config.plugins` or `config.removePlugins` by names.
* @param {Iterable.<Array>} contextPlugins A list of already initialized plugins represented by a
* `[ PluginConstructor, pluginInstance ]` pair.
*/
constructor( context, availablePlugins = [], contextPlugins = [] ) {
/**
* @protected
* @type {module:core/editor/editor~Editor|module:core/context~Context}
*/
this._context = context;
/**
* @protected
* @type {Map}
*/
this._plugins = new Map();
/**
* A map of plugin constructors that can be retrieved by their names.
*
* @protected
* @type {Map.<String|Function,Function>}
*/
this._availablePlugins = new Map();
for ( const PluginConstructor of availablePlugins ) {
if ( PluginConstructor.pluginName ) {
this._availablePlugins.set( PluginConstructor.pluginName, PluginConstructor );
}
}
/**
* Map of {@link module:core/contextplugin~ContextPlugin context plugins} which can be retrieved by their constructors or instances.
*
* @protected
* @type {Map<Function,Function>}
*/
this._contextPlugins = new Map();
for ( const [ PluginConstructor, pluginInstance ] of contextPlugins ) {
this._contextPlugins.set( PluginConstructor, pluginInstance );
this._contextPlugins.set( pluginInstance, PluginConstructor );
// To make it possible to require a plugin by its name.
if ( PluginConstructor.pluginName ) {
this._availablePlugins.set( PluginConstructor.pluginName, PluginConstructor );
}
}
}
/**
* Iterable interface.
*
* Returns `[ PluginConstructor, pluginInstance ]` pairs.
*
* @returns {Iterable.<Array>}
*/
* [ Symbol.iterator ]() {
for ( const entry of this._plugins ) {
if ( typeof entry[ 0 ] == 'function' ) {
yield entry;
}
}
}
/**
* Gets the plugin instance by its constructor or name.
*
* // Check if 'Clipboard' plugin was loaded.
* if ( editor.plugins.has( 'Clipboard' ) ) {
* // Get clipboard plugin instance
* const clipboard = editor.plugins.get( 'Clipboard' );
*
* this.listenTo( clipboard, 'inputTransformation', ( evt, data ) => {
* // Do something on clipboard input.
* } );
* }
*
* **Note**: This method will throw an error if a plugin is not loaded. Use `{@link #has editor.plugins.has()}`
* to check if a plugin is available.
*
* @param {Function|String} key The plugin constructor or {@link module:core/plugin~PluginInterface.pluginName name}.
* @returns {module:core/plugin~PluginInterface}
*/
get( key ) {
const plugin = this._plugins.get( key );
if ( !plugin ) {
let pluginName = key;
if ( typeof key == 'function' ) {
pluginName = key.pluginName || key.name;
}
/**
* The plugin is not loaded and could not be obtained.
*
* Plugin classes (constructors) need to be provided to the editor and must be loaded before they can be obtained from
* the plugin collection.
* This is usually done in CKEditor 5 builds by setting the {@link module:core/editor/editor~Editor.builtinPlugins}
* property.
*
* **Note**: You can use `{@link module:core/plugincollection~PluginCollection#has editor.plugins.has()}`
* to check if a plugin was loaded.
*
* @error plugincollection-plugin-not-loaded
* @param {String} plugin The name of the plugin which is not loaded.
*/
throw new CKEditorError( 'plugincollection-plugin-not-loaded', this._context, { plugin: pluginName } );
}
return plugin;
}
/**
* Checks if a plugin is loaded.
*
* // Check if the 'Clipboard' plugin was loaded.
* if ( editor.plugins.has( 'Clipboard' ) ) {
* // Now use the clipboard plugin instance:
* const clipboard = editor.plugins.get( 'Clipboard' );
*
* // ...
* }
*
* @param {Function|String} key The plugin constructor or {@link module:core/plugin~PluginInterface.pluginName name}.
* @returns {Boolean}
*/
has( key ) {
return this._plugins.has( key );
}
/**
* Initializes a set of plugins and adds them to the collection.
*
* @param {Array.<Function|String>} plugins An array of {@link module:core/plugin~PluginInterface plugin constructors}
* or {@link module:core/plugin~PluginInterface.pluginName plugin names}.
* @param {Array.<String|Function>} [removedPlugins] Names of the plugins or plugin constructors
* that should not be loaded (despite being specified in the `plugins` array).
* @returns {Promise.<module:core/plugin~LoadedPlugins>} A promise which gets resolved once all plugins are loaded
* and available in the collection.
*/
init( plugins, removedPlugins = [] ) {
// Plugin initialization procedure consists of 2 main steps:
// 1) collecting all available plugin constructors,
// 2) verification whether all required plugins can be instantiated.
//
// In the first step, all plugin constructors, available in the provided `plugins` array and inside
// plugin's dependencies (from the `Plugin.requires` array), are recursively collected and added to the existing
// `this._availablePlugins` map, but without any verification at the given moment. Performing the verification
// at this point (during the plugin constructor searching) would cause false errors to occur, that some plugin
// is missing but in fact it may be defined further in the array as the dependency of other plugin. After
// traversing the entire dependency tree, it will be checked if all required "top level" plugins are available.
//
// In the second step, the list of plugins that have not been explicitly removed is traversed to get all the
// plugin constructors to be instantiated in the correct order and to validate against some rules. Finally, if
// no plugin is missing and no other error has been found, they all will be instantiated.
const that = this;
const context = this._context;
findAvailablePluginConstructors( plugins );
validatePlugins( plugins );
const pluginsToLoad = plugins.filter( plugin => !isPluginRemoved( plugin, removedPlugins ) );
const pluginConstructors = [ ...getPluginConstructors( pluginsToLoad ) ];
const pluginInstances = loadPlugins( pluginConstructors );
return initPlugins( pluginInstances, 'init' )
.then( () => initPlugins( pluginInstances, 'afterInit' ) )
.then( () => pluginInstances );
function isPluginConstructor( plugin ) {
return typeof plugin === 'function';
}
function isContextPlugin( plugin ) {
return isPluginConstructor( plugin ) && plugin.isContextPlugin;
}
function isPluginRemoved( plugin, removedPlugins ) {
return removedPlugins.some( removedPlugin => {
if ( removedPlugin === plugin ) {
return true;
}
if ( getPluginName( plugin ) === removedPlugin ) {
return true;
}
if ( getPluginName( removedPlugin ) === plugin ) {
return true;
}
return false;
} );
}
function getPluginName( plugin ) {
return isPluginConstructor( plugin ) ?
plugin.pluginName || plugin.name :
plugin;
}
function findAvailablePluginConstructors( plugins, processed = new Set() ) {
plugins.forEach( plugin => {
if ( !isPluginConstructor( plugin ) ) {
return;
}
if ( processed.has( plugin ) ) {
return;
}
processed.add( plugin );
if ( plugin.pluginName && !that._availablePlugins.has( plugin.pluginName ) ) {
that._availablePlugins.set( plugin.pluginName, plugin );
}
if ( plugin.requires ) {
findAvailablePluginConstructors( plugin.requires, processed );
}
} );
}
function getPluginConstructors( plugins, processed = new Set() ) {
return plugins
.map( plugin => {
return isPluginConstructor( plugin ) ?
plugin :
that._availablePlugins.get( plugin );
} )
.reduce( ( result, plugin ) => {
if ( processed.has( plugin ) ) {
return result;
}
processed.add( plugin );
if ( plugin.requires ) {
validatePlugins( plugin.requires, plugin );
getPluginConstructors( plugin.requires, processed ).forEach( plugin => result.add( plugin ) );
}
return result.add( plugin );
}, new Set() );
}
function validatePlugins( plugins, parentPluginConstructor = null ) {
plugins
.map( plugin => {
return isPluginConstructor( plugin ) ?
plugin :
that._availablePlugins.get( plugin ) || plugin;
} )
.forEach( plugin => {
checkMissingPlugin( plugin, parentPluginConstructor );
checkContextPlugin( plugin, parentPluginConstructor );
checkRemovedPlugin( plugin, parentPluginConstructor );
} );
}
function checkMissingPlugin( plugin, parentPluginConstructor ) {
if ( isPluginConstructor( plugin ) ) {
return;
}
if ( parentPluginConstructor ) {
/**
* A required "soft" dependency was not found on plugin list.
*
* Plugin classes (constructors) need to be provided to the editor before they can be loaded by name.
* This is usually done in CKEditor 5 builds by setting the
* {@link module:core/editor/editor~Editor.builtinPlugins} property. Alternatively they can be provided using
* {@link module:core/editor/editorconfig~EditorConfig#plugins} or
* {@link module:core/editor/editorconfig~EditorConfig#extraPlugins} configuration.
*
* **If you see this warning when using one of the {@glink builds/index CKEditor 5 Builds}**, it means
* that you didn't add the required plugin to the plugins list when loading the editor.
*
* @error plugincollection-soft-required
* @param {String} plugin The name of the required plugin.
* @param {String} requiredBy The name of the plugin that was requiring other plugin.
*/
throw new CKEditorError(
'plugincollection-soft-required',
context,
{ plugin, requiredBy: getPluginName( parentPluginConstructor ) }
);
}
/**
* A plugin is not available and could not be loaded.
*
* Plugin classes (constructors) need to be provided to the editor before they can be loaded by name.
* This is usually done in CKEditor 5 builds by setting the {@link module:core/editor/editor~Editor.builtinPlugins}
* property.
*
* **If you see this warning when using one of the {@glink builds/index CKEditor 5 Builds}**, it means
* that you try to enable a plugin which was not included in that build. This may be due to a typo
* in the plugin name or simply because that plugin is not a part of this build. In the latter scenario,
* read more about {@glink builds/guides/development/custom-builds custom builds}.
*
* **If you see this warning when using one of the editor creators directly** (not a build), then it means
* that you tried loading plugins by name. However, unlike CKEditor 4, CKEditor 5 does not implement a "plugin loader".
* This means that CKEditor 5 does not know where to load the plugin modules from. Therefore, you need to
* provide each plugin through a reference (as a constructor function). Check out the examples in
* {@glink builds/guides/integration/advanced-setup#scenario-2-building-from-source "Building from source"}.
*
* @error plugincollection-plugin-not-found
* @param {String} plugin The name of the plugin which could not be loaded.
*/
throw new CKEditorError(
'plugincollection-plugin-not-found',
context,
{ plugin }
);
}
function checkContextPlugin( plugin, parentPluginConstructor ) {
if ( !isContextPlugin( parentPluginConstructor ) ) {
return;
}
if ( isContextPlugin( plugin ) ) {
return;
}
/**
* If a plugin is a context plugin, all plugins it requires should also be context plugins
* instead of plugins. In other words, if one plugin can be used in the context,
* all its requirements should also be ready to be used in the context. Note that the context
* provides only a part of the API provided by the editor. If one plugin needs a full
* editor API, all plugins which require it are considered as plugins that need a full
* editor API.
*
* @error plugincollection-context-required
* @param {String} plugin The name of the required plugin.
* @param {String} requiredBy The name of the parent plugin.
*/
throw new CKEditorError(
'plugincollection-context-required',
context,
{ plugin: getPluginName( plugin ), requiredBy: getPluginName( parentPluginConstructor ) }
);
}
function checkRemovedPlugin( plugin, parentPluginConstructor ) {
if ( !parentPluginConstructor ) {
return;
}
if ( !isPluginRemoved( plugin, removedPlugins ) ) {
return;
}
/**
* Cannot load a plugin because one of its dependencies is listed in the `removePlugins` option.
*
* @error plugincollection-required
* @param {String} plugin The name of the required plugin.
* @param {String} requiredBy The name of the parent plugin.
*/
throw new CKEditorError(
'plugincollection-required',
context,
{ plugin: getPluginName( plugin ), requiredBy: getPluginName( parentPluginConstructor ) }
);
}
function loadPlugins( pluginConstructors ) {
return pluginConstructors.map( PluginConstructor => {
const pluginInstance = that._contextPlugins.get( PluginConstructor ) || new PluginConstructor( context );
that._add( PluginConstructor, pluginInstance );
return pluginInstance;
} );
}
function initPlugins( pluginInstances, method ) {
return pluginInstances.reduce( ( promise, plugin ) => {
if ( !plugin[ method ] ) {
return promise;
}
if ( that._contextPlugins.has( plugin ) ) {
return promise;
}
return promise.then( plugin[ method ].bind( plugin ) );
}, Promise.resolve() );
}
}
/**
* Destroys all loaded plugins.
*
* @returns {Promise}
*/
destroy() {
const promises = [];
for ( const [ , pluginInstance ] of this ) {
if ( typeof pluginInstance.destroy == 'function' && !this._contextPlugins.has( pluginInstance ) ) {
promises.push( pluginInstance.destroy() );
}
}
return Promise.all( promises );
}
/**
* Adds the plugin to the collection. Exposed mainly for testing purposes.
*
* @protected
* @param {Function} PluginConstructor The plugin constructor.
* @param {module:core/plugin~PluginInterface} plugin The instance of the plugin.
*/
_add( PluginConstructor, plugin ) {
this._plugins.set( PluginConstructor, plugin );
const pluginName = PluginConstructor.pluginName;
if ( !pluginName ) {
return;
}
if ( this._plugins.has( pluginName ) ) {
/**
* Two plugins with the same {@link module:core/plugin~PluginInterface.pluginName} were loaded.
* This will lead to runtime conflicts between these plugins.
*
* In practice, this warning usually means that new plugins were added to an existing CKEditor 5 build.
* Plugins should always be added to a source version of the editor (`@ckeditor/ckeditor5-editor-*`),
* not to an editor imported from one of the `@ckeditor/ckeditor5-build-*` packages.
*
* Check your import paths and the list of plugins passed to
* {@link module:core/editor/editor~Editor.create `Editor.create()`}
* or specified in {@link module:core/editor/editor~Editor.builtinPlugins `Editor.builtinPlugins`}.
*
* The second option is that your `node_modules/` directory contains duplicated versions of the same
* CKEditor 5 packages. Normally, on clean installations, npm deduplicates packages in `node_modules/`, so
* it may be enough to call `rm -rf node_modules && npm i`. However, if you installed conflicting versions
* of some packages, their dependencies may need to be installed in more than one version which may lead to this
* warning.
*
* Technically speaking, this error occurs because after adding a plugin to an existing editor build
* the dependencies of this plugin are being duplicated.
* They are already built into that editor build and now get added for the second time as dependencies
* of the plugin you are installing.
*
* Read more about {@glink builds/guides/integration/installing-plugins installing plugins}.
*
* @error plugincollection-plugin-name-conflict
* @param {String} pluginName The duplicated plugin name.
* @param {Function} plugin1 The first plugin constructor.
* @param {Function} plugin2 The second plugin constructor.
*/
throw new CKEditorError(
'plugincollection-plugin-name-conflict',
null,
{ pluginName, plugin1: this._plugins.get( pluginName ).constructor, plugin2: PluginConstructor }
);
}
this._plugins.set( pluginName, plugin );
}
}
mix( PluginCollection, EmitterMixin );
|
"""
Reference
https://github.com/VisionLearningGroup/VisionLearningGroup.github.io/tree/master/M3SDA
"""
import torch.nn as nn
from torch.nn import functional as F
from .build import BACKBONE_REGISTRY
from .backbone import Backbone
class FeatureExtractor(Backbone):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=5, stride=1, padding=2)
self.bn1 = nn.BatchNorm2d(64)
self.conv2 = nn.Conv2d(64, 64, kernel_size=5, stride=1, padding=2)
self.bn2 = nn.BatchNorm2d(64)
self.conv3 = nn.Conv2d(64, 128, kernel_size=5, stride=1, padding=2)
self.bn3 = nn.BatchNorm2d(128)
self.fc1 = nn.Linear(8192, 3072)
self.bn1_fc = nn.BatchNorm1d(3072)
self.fc2 = nn.Linear(3072, 2048)
self.bn2_fc = nn.BatchNorm1d(2048)
self._out_features = 2048
def _check_input(self, x):
H, W = x.shape[2:]
assert (
H == 32 and W == 32
), "Input to network must be 32x32, " "but got {}x{}".format(H, W)
def forward(self, x):
self._check_input(x)
x = F.relu(self.bn1(self.conv1(x)))
x = F.max_pool2d(x, stride=2, kernel_size=3, padding=1)
x = F.relu(self.bn2(self.conv2(x)))
x = F.max_pool2d(x, stride=2, kernel_size=3, padding=1)
x = F.relu(self.bn3(self.conv3(x)))
x = x.view(x.size(0), 8192)
x = F.relu(self.bn1_fc(self.fc1(x)))
x = F.dropout(x, training=self.training)
x = F.relu(self.bn2_fc(self.fc2(x)))
return x
@BACKBONE_REGISTRY.register()
def cnn_digit5_m3sda(**kwargs):
"""
This architecture was used for the Digit-5 dataset in:
- Peng et al. Moment Matching for Multi-Source
Domain Adaptation. ICCV 2019.
"""
return FeatureExtractor()
|
"""TODO."""
import os
import re
import getpass
import tempfile
from testplan.testing.filtering import Filter
from testplan.testing.ordering import NoopSorter
from testplan.testing.multitest import MultiTest, testsuite, testcase
from testplan import Testplan, defaults
from testplan.common.entity.base import Environment, ResourceStatus
from testplan.common.utils.context import context
from testplan.common.utils.path import StdFiles, default_runpath
from testplan.common.utils.testing import log_propagation_disabled
from testplan.testing.multitest.driver.base import Driver
from testplan.testing.multitest.driver.tcp import TCPServer, TCPClient
from testplan.common.utils.logger import TESTPLAN_LOGGER
@testsuite
class MySuite(object):
@testcase
def test_drivers(self, env, result):
assert isinstance(env, Environment)
assert isinstance(env.server, TCPServer)
assert env.server.cfg.name == "server"
assert os.path.exists(env.server.runpath)
assert isinstance(env.server.context, Environment)
assert isinstance(env.client, TCPClient)
assert env.client.cfg.name == "client"
assert os.path.exists(env.client.runpath)
assert isinstance(env.client.context, Environment)
assert env.server.context.client == env.client
assert env.client.context.server == env.server
assert env.server.status.tag == ResourceStatus.STARTED
assert env.client.status.tag == ResourceStatus.STARTED
@testcase
def test_drivers_usage(self, env, result):
"""
Client ---"Hello"---> Server ---"World"---> Client
"""
env.server.accept_connection()
msg = b"Hello"
env.client.send(msg)
# Server received data
assert len(result.entries) == 0
result.equal(env.server.receive(len(msg)), msg, "Server received")
assert len(result.entries) == 1
assertion = result.entries[-1]
assert bool(assertion) is True
assert assertion.first == assertion.second == msg
resp = b"World"
env.server.send(resp)
# Client received response
result.equal(env.client.receive(len(resp)), resp, "Client received")
assert len(result.entries) == 2
assertion = result.entries[-1]
assert bool(assertion) is True
assert assertion.first == assertion.second == resp
@testcase
def test_context_access(self, env, result):
"""
Test context access from env and drivers.
"""
assert isinstance(env, Environment)
assert env.test_key == env["test_key"] == "test_value"
assert env is env.server.context
assert env is env.client.context
def test_multitest_drivers(runpath):
"""TODO."""
for idx, opts in enumerate(
(
dict(name="Mtest", suites=[MySuite()], runpath=runpath),
dict(name="Mtest", suites=[MySuite()]),
)
):
server = TCPServer(name="server")
client = TCPClient(
name="client",
host=context(server.cfg.name, "{{host}}"),
port=context(server.cfg.name, "{{port}}"),
)
opts.update(
environment=[server, client],
initial_context={"test_key": "test_value"},
stdout_style=defaults.STDOUT_STYLE,
test_filter=Filter(),
test_sorter=NoopSorter(),
)
mtest = MultiTest(**opts)
assert server.status.tag == ResourceStatus.NONE
assert client.status.tag == ResourceStatus.NONE
mtest.run()
res = mtest.result
assert res.run is True
if idx == 0:
assert mtest.runpath == runpath
else:
assert mtest.runpath == default_runpath(mtest)
assert server.runpath == os.path.join(mtest.runpath, server.uid())
assert client.runpath == os.path.join(mtest.runpath, client.uid())
assert server.status.tag == ResourceStatus.STOPPED
assert client.status.tag == ResourceStatus.STOPPED
def test_multitest_drivers_in_testplan(runpath):
"""TODO."""
for idx, opts in enumerate(
(
dict(name="MyPlan", parse_cmdline=False, runpath=runpath),
dict(name="MyPlan", parse_cmdline=False),
)
):
plan = Testplan(**opts)
server = TCPServer(name="server")
client = TCPClient(
name="client",
host=context(server.cfg.name, "{{host}}"),
port=context(server.cfg.name, "{{port}}"),
)
mtest = MultiTest(
name="Mtest",
suites=[MySuite()],
environment=[server, client],
initial_context={"test_key": "test_value"},
)
plan.add(mtest)
assert server.status.tag == ResourceStatus.NONE
assert client.status.tag == ResourceStatus.NONE
with log_propagation_disabled(TESTPLAN_LOGGER):
plan.run()
res = plan.result
assert res.run is True
if idx == 0:
assert plan.runpath == runpath
else:
assert plan.runpath == default_runpath(plan._runnable)
assert mtest.runpath == os.path.join(plan.runpath, mtest.uid())
assert server.runpath == os.path.join(mtest.runpath, server.uid())
assert client.runpath == os.path.join(mtest.runpath, client.uid())
assert server.status.tag == ResourceStatus.STOPPED
assert client.status.tag == ResourceStatus.STOPPED
@testsuite
class EmptySuite(object):
@testcase
def test_empty(self, env, result):
pass
class BaseDriver(Driver):
"""Base class of vulnerable driver which can raise exception."""
@property
def logpath(self):
if self.cfg.logfile:
return os.path.join(self.runpath, self.cfg.logfile)
return self.outpath
@property
def outpath(self):
return self.std.out_path
@property
def errpath(self):
return self.std.err_path
def starting(self):
super(BaseDriver, self).starting()
self.std = StdFiles(self.runpath)
def stopping(self):
super(BaseDriver, self).stopping()
self.std.close()
class VulnerableDriver1(BaseDriver):
"""This driver raises exception during startup."""
def starting(self):
super(VulnerableDriver1, self).starting()
self.std.err.write("Error found{}".format(os.linesep))
self.std.err.flush()
raise Exception("Startup error")
class VulnerableDriver2(BaseDriver):
"""This driver raises exception during shutdown."""
def stopping(self):
"""Trigger driver stop."""
super(VulnerableDriver2, self).stopping()
with open(self.logpath, "w") as log_handle:
for idx in range(1000):
log_handle.write("This is line {}\n".format(idx))
raise Exception("Shutdown error")
def test_multitest_driver_failure():
"""If driver fails to start or stop, the error log could be fetched."""
plan1 = Testplan(name="MyPlan1", parse_cmdline=False)
plan1.add(
MultiTest(
name="Mtest1",
suites=[MySuite()],
environment=[
VulnerableDriver1(
name="vulnerable_driver_1", report_errors_from_logs=True
)
],
)
)
with log_propagation_disabled(TESTPLAN_LOGGER):
plan1.run()
plan2 = Testplan(name="MyPlan2", parse_cmdline=False)
plan2.add(
MultiTest(
name="Mtest2",
suites=[MySuite()],
environment=[
VulnerableDriver2(
name="vulnerable_driver_2",
logfile="logfile",
report_errors_from_logs=True,
error_logs_max_lines=10,
)
],
)
)
with log_propagation_disabled(TESTPLAN_LOGGER):
plan2.run()
res1, res2 = plan1.result, plan2.result
assert res1.run is True and res2.run is True
report1, report2 = res1.report, res2.report
assert "Exception: Startup error" in report1.entries[0].logs[0]["message"]
assert "Exception: Shutdown error" in report2.entries[0].logs[0]["message"]
text1 = report1.entries[0].logs[1]["message"].split(os.linesep)
text2 = report2.entries[0].logs[1]["message"].split(os.linesep)
assert re.match(r".*Information from log file:.+stderr.*", text1[0])
assert re.match(r".*Error found.*", text1[1])
assert re.match(r".*Information from log file:.+logfile.*", text2[0])
for idx, line in enumerate(text2[1:]):
assert re.match(r".*This is line 99{}.*".format(idx), line)
|
export default class RetryCountExceededException extends Error {
constructor(requestContext) {
super('Retry count has been exceeded');
this.name = this.constructor.name;
this.requestContext = requestContext;
// Use V8's native method if available, otherwise fallback
if ('captureStackTrace' in Error) {
Error.captureStackTrace(this, RetryCountExceededException);
} else {
this.stack = (new Error()).stack;
}
}
}
|
import { DatabaseSingleton } from "../core/mongo";
let db = DatabaseSingleton.getConnectMain();
class AnimalSchema{};
AnimalSchema.prototype.name = "Animal";
AnimalSchema.prototype.schema = {
type : { type: String, required : true },
name : { type: String, required : true },
weight : { type: Number, required : true },
age : { type: Number, required : true }
};
AnimalSchema.prototype.model = db.model(AnimalSchema.prototype.name, new db.Schema(AnimalSchema.prototype.schema, { timestamps: true }));
export {
AnimalSchema
}
|
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Performance test for the Oppia reader view.
Before running this script, exploration 0 should be loaded in the target
server.
Run this script from the Oppia root directory:
python core/tests/reader_view_load_test.py --thread_count=5 --start_uid=1 \
https://my-oppia-instance.appspot.com
"""
__author__ = 'Sean Lip (sll@google.com)'
import argparse
import cookielib
import json
import logging
import sys
import threading
import time
import urllib
import urllib2
XSSI_PREFIX = ')]}\'\n'
# command line arguments parser
PARSER = argparse.ArgumentParser()
PARSER.add_argument(
'base_url', help=('Base URL of the Oppia installation to test'), type=str)
PARSER.add_argument(
'--start_uid',
help='Initial value for unique thread identifier.', default=1, type=int)
PARSER.add_argument(
'--thread_count',
help='Number of concurrent threads for executing the test.',
default=1, type=int)
PARSER.add_argument(
'--iteration_count',
help='Number of iterations for executing the test. Each thread of each '
'iteration acts as a unique user with the uid equal to:'
'start_uid + thread_count * iteration_index.',
default=1, type=int)
def assert_contains(needle, haystack):
if needle not in haystack:
raise Exception('Expected to find term: %s\n%s', needle, haystack)
def assert_does_not_contain(needle, haystack):
if needle in haystack:
raise Exception(
'Did not expect to find term: %s\n%s', needle, haystack)
def assert_equals(expected, actual):
if expected != actual:
raise Exception('Expected equality of %s and %s.', expected, actual)
class WebSession(object):
"""A class that allows navigation of web pages keeping cookie session."""
PROGRESS_LOCK = threading.Lock()
MAX_RETRIES = 3
RETRY_SLEEP_SEC = 3
GET_COUNT = 0
POST_COUNT = 0
RETRY_COUNT = 0
PROGRESS_BATCH = 10
RESPONSE_TIME_HISTOGRAM = [0, 0, 0, 0, 0, 0]
def __init__(self, uid, common_headers=None):
if common_headers is None:
common_headers = {}
self.uid = uid
self.common_headers = common_headers
self.cj = cookielib.CookieJar()
self.opener = urllib2.build_opener(
urllib2.HTTPCookieProcessor(self.cj))
@classmethod
def increment_duration_bucket(cls, index):
cls.RESPONSE_TIME_HISTOGRAM[index] += 1
@classmethod
def update_duration(cls, duration):
if duration > 30:
cls.increment_duration_bucket(0)
elif duration > 15:
cls.increment_duration_bucket(1)
elif duration > 7:
cls.increment_duration_bucket(2)
elif duration > 3:
cls.increment_duration_bucket(3)
elif duration > 1:
cls.increment_duration_bucket(4)
else:
cls.increment_duration_bucket(5)
@classmethod
def log_progress(cls, force=False):
update = ((cls.GET_COUNT + cls.POST_COUNT) % (
cls.PROGRESS_BATCH) == 0)
if update or force:
logging.info(
'GET/POST:[%s, %s], RETRIES:[%s], SLA:%s',
cls.GET_COUNT, cls.POST_COUNT, cls.RETRY_COUNT,
cls.RESPONSE_TIME_HISTOGRAM)
def get_cookie_value(self, name):
for cookie in self.cj:
if cookie.name == name:
return cookie.value
return None
def is_soft_error(self, http_error):
"""Checks if HTTPError is due to starvation of frontend instances."""
body = http_error.fp.read()
# this is the text specific to the front end instance starvation, which
# is a retriable error for both GET and POST; normal HTTP error 500 has
# this specific text '<h1>500 Internal Server Error</h1>'
if http_error.code == 500 and '<h1>Error: Server Error</h1>' in body:
return True
logging.error(
'Non-retriable HTTP %s error:\n%s', http_error.code, body)
return False
def open(self, request, hint):
"""Executes any HTTP request."""
start_time = time.time()
try:
try_count = 0
while True:
try:
return self.opener.open(request)
except urllib2.HTTPError as he:
if (try_count < WebSession.MAX_RETRIES and
self.is_soft_error(he)):
try_count += 1
with WebSession.PROGRESS_LOCK:
WebSession.RETRY_COUNT += 1
time.sleep(WebSession.RETRY_SLEEP_SEC)
continue
raise he
except Exception as e:
logging.info(
'Error in session %s executing: %s', self.uid, hint)
raise e
finally:
with WebSession.PROGRESS_LOCK:
self.update_duration(time.time() - start_time)
def get(self, url, expected_code=200):
"""HTTP GET."""
with WebSession.PROGRESS_LOCK:
WebSession.GET_COUNT += 1
self.log_progress()
request = urllib2.Request(url)
for key, value in self.common_headers.items():
request.add_header(key, value)
response = self.open(request, 'GET %s' % url)
assert_equals(expected_code, response.code)
return response.read()
def post(self, url, args_dict, expected_code=200):
"""HTTP POST."""
with WebSession.PROGRESS_LOCK:
WebSession.POST_COUNT += 1
self.log_progress()
data = urllib.urlencode(args_dict)
request = urllib2.Request(url, data)
response = self.open(request, 'POST %s' % url)
assert_equals(expected_code, response.code)
return response.read()
class TaskThread(threading.Thread):
"""Runs a task in a separate thread."""
def __init__(self, func, name=None):
super(TaskThread, self).__init__()
self.func = func
self.exception = None
self.name = name
@classmethod
def start_all_tasks(cls, tasks):
"""Starts all tasks."""
for task in tasks:
task.start()
@classmethod
def check_all_tasks(cls, tasks):
"""Checks results of all tasks; fails on the first exception found."""
failed_count = 0
for task in tasks:
while True:
# Timeouts should happen after 30 seconds.
task.join(30)
if task.isAlive():
logging.info('Still waiting for: %s.', task.name)
continue
else:
break
if task.exception:
failed_count += 1
if failed_count:
raise Exception('Tasks failed: %s', failed_count)
@classmethod
def execute_task_list(cls, tasks):
"""Starts all tasks and checks the results."""
cls.start_all_tasks(tasks)
cls.check_all_tasks(tasks)
def run(self):
try:
self.func()
except Exception as e: # pylint: disable-msg=broad-except
logging.error('Error in %s: %s', self.name, e)
self.exc_info = sys.exc_info()
raise self.exc_info[1], None, self.exc_info[2]
class ReaderViewLoadTest(object):
"""A reader view load test."""
def __init__(self, base_url, uid):
self.uid = uid
self.host = base_url
self.exp_id = None
self.last_state_name = None
self.last_params = None
self.state_history = None
self.session = WebSession(uid=uid)
def run(self):
self.init_player(
'0', 'Welcome to Oppia!', 'do you know where the name \'Oppia\'')
self.submit_and_compare(
'0', 'In fact, the word Oppia means \'learn\'.')
self.submit_and_compare('Finish', 'Check your spelling!')
self.submit_and_compare(
'Finnish', 'Yes! Oppia is the Finnish word for learn.')
def _get(self, url):
return self.session.get(url)
def _get_json(self, url):
"""Get a JSON response, transformed to a Python object."""
json_body = self.session.get(url)
if not json_body.startswith(XSSI_PREFIX):
raise Exception('Expected an XSSI prefix; found none.')
return json.loads(json_body[len(XSSI_PREFIX):])
def _post(self, url, data):
return self.session.post(url, data)
def _post_json(self, url, data):
"""Post a JSON request, returning the response as a Python object."""
json_body = self.session.post(str(url), {'payload': json.dumps(data)})
if not json_body.startswith(XSSI_PREFIX):
raise Exception('Expected an XSSI prefix; found none.')
return json.loads(json_body[len(XSSI_PREFIX):])
def init_player(self, exploration_id, expected_title, expected_response):
self.exp_id = exploration_id
body = self._get('%s/explore/%s' % (self.host, self.exp_id))
assert_contains('Learn', body)
assert_contains('Return to the gallery', body)
body = self._get_json(
'%s/explorehandler/init/%s' % (self.host, self.exp_id))
assert_equals(body['title'], expected_title)
assert_contains(expected_response, body['init_html'])
self.last_state_name = body['state_name']
self.last_params = body['params']
self.state_history = [self.last_state_name]
def submit_and_compare(self, answer, expected_response):
url = '%s/explorehandler/transition/%s/%s' % (
self.host, self.exp_id, urllib.quote(self.last_state_name))
body = self._post_json(url, {
'answer': answer, 'handler': 'submit', 'params': self.last_params,
'state_history': self.state_history,
})
assert_contains(expected_response, body['oppia_html'])
self.last_state_name = body['state_name']
self.last_params = body['params']
self.state_history += [self.last_state_name]
def run_all(args):
"""Runs test scenario in multiple threads."""
if args.thread_count < 1 or args.thread_count > 256:
raise Exception('Please use between 1 and 256 threads.')
start_time = time.time()
logging.info('Started testing: %s', args.base_url)
logging.info('base_url: %s', args.base_url)
logging.info('start_uid: %s', args.start_uid)
logging.info('thread_count: %s', args.thread_count)
logging.info('iteration_count: %s', args.iteration_count)
logging.info('SLAs are [>30s, >15s, >7s, >3s, >1s, <1s]')
try:
for iteration_index in range(0, args.iteration_count):
logging.info('Started iteration: %s', iteration_index)
tasks = []
WebSession.PROGRESS_BATCH = args.thread_count
for index in range(0, args.thread_count):
test = ReaderViewLoadTest(args.base_url, (
args.start_uid + iteration_index * args.thread_count +
index))
task = TaskThread(
test.run, name='ReaderViewLoadTest-%s' % index)
tasks.append(task)
try:
TaskThread.execute_task_list(tasks)
except Exception as e:
logging.info('Failed iteration: %s', iteration_index)
raise e
finally:
WebSession.log_progress(force=True)
logging.info('Done! Duration (s): %s', time.time() - start_time)
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
run_all(PARSER.parse_args())
|
//----------------------------------------------------------------------------//
// //
// ozz-animation is hosted at http://github.com/guillaumeblanc/ozz-animation //
// and distributed under the MIT License (MIT). //
// //
// Copyright (c) Guillaume Blanc //
// //
// Permission is hereby granted, free of charge, to any person obtaining a //
// copy of this software and associated documentation files (the "Software"), //
// to deal in the Software without restriction, including without limitation //
// the rights to use, copy, modify, merge, publish, distribute, sublicense, //
// and/or sell copies of the Software, and to permit persons to whom the //
// Software is furnished to do so, subject to the following conditions: //
// //
// The above copyright notice and this permission notice shall be included in //
// all copies or substantial portions of the Software. //
// //
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR //
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, //
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL //
// THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER //
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING //
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER //
// DEALINGS IN THE SOFTWARE. //
// //
//----------------------------------------------------------------------------//
#ifndef OZZ_OZZ_BASE_MATHS_INTERNAL_SIMD_MATH_SSE_INL_H_
#define OZZ_OZZ_BASE_MATHS_INTERNAL_SIMD_MATH_SSE_INL_H_
// SIMD SSE2+ implementation, based on scalar floats.
#include <stdint.h>
#include <cassert>
// Temporarly needed while trigonometric functions aren't implemented.
#include <cmath>
#include "ozz/base/maths/math_constant.h"
namespace ozz {
namespace math {
namespace simd_float4 {
// Internal macros.
// Unused components of the result vector are replicated from the first input
// argument.
#ifdef OZZ_SIMD_AVX
#define OZZ_SHUFFLE_PS1(_v, _m) _mm_permute_ps(_v, _m)
#else // OZZ_SIMD_AVX
#define OZZ_SHUFFLE_PS1(_v, _m) _mm_shuffle_ps(_v, _v, _m)
#endif // OZZ_SIMD_AVX
#define OZZ_SSE_SPLAT_F(_v, _i) OZZ_SHUFFLE_PS1(_v, _MM_SHUFFLE(_i, _i, _i, _i))
#define OZZ_SSE_SPLAT_I(_v, _i) \
_mm_shuffle_epi32(_v, _MM_SHUFFLE(_i, _i, _i, _i))
// _v.x + _v.y, _v.y, _v.z, _v.w
#define OZZ_SSE_HADD2_F(_v) _mm_add_ss(_v, OZZ_SSE_SPLAT_F(_v, 1))
// _v.x + _v.y + _v.z, _v.y, _v.z, _v.w
#define OZZ_SSE_HADD3_F(_v) \
_mm_add_ss(_mm_add_ss(_v, OZZ_SSE_SPLAT_F(_v, 2)), OZZ_SSE_SPLAT_F(_v, 1))
// _v.x + _v.y + _v.z + _v.w, ?, ?, ?
#define OZZ_SSE_HADD4_F(_v, _r) \
do { \
const __m128 haddxyzw = _mm_add_ps(_v, _mm_movehl_ps(_v, _v)); \
_r = _mm_add_ss(haddxyzw, OZZ_SSE_SPLAT_F(haddxyzw, 1)); \
} while (void(0), 0)
// dot2, ?, ?, ?
#define OZZ_SSE_DOT2_F(_a, _b, _r) \
do { \
const __m128 ab = _mm_mul_ps(_a, _b); \
_r = _mm_add_ss(ab, OZZ_SSE_SPLAT_F(ab, 1)); \
\
} while (void(0), 0)
#ifdef OZZ_SIMD_SSE4_1
// dot3, ?, ?, ?
#define OZZ_SSE_DOT3_F(_a, _b, _r) \
do { \
_r = _mm_dp_ps(_a, _b, 0x7f); \
} while (void(0), 0)
// dot4, ?, ?, ?
#define OZZ_SSE_DOT4_F(_a, _b, _r) \
do { \
_r = _mm_dp_ps(_a, _b, 0xff); \
} while (void(0), 0)
#else // OZZ_SIMD_SSE4_1
// dot3, ?, ?, ?
#define OZZ_SSE_DOT3_F(_a, _b, _r) \
do { \
const __m128 ab = _mm_mul_ps(_a, _b); \
_r = OZZ_SSE_HADD3_F(ab); \
} while (void(0), 0)
// dot4, ?, ?, ?
#define OZZ_SSE_DOT4_F(_a, _b, _r) \
do { \
const __m128 ab = _mm_mul_ps(_a, _b); \
OZZ_SSE_HADD4_F(ab, _r); \
} while (void(0), 0)
#endif // OZZ_SIMD_SSE4_1
// FMA operations
#ifdef OZZ_SIMD_FMA
#define OZZ_MADD(_a, _b, _c) _mm_fmadd_ps(_a, _b, _c)
#define OZZ_MSUB(_a, _b, _c) _mm_fmsub_ps(_a, _b, _c)
#define OZZ_NMADD(_a, _b, _c) _mm_fnmadd_ps(_a, _b, _c)
#define OZZ_NMSUB(_a, _b, _c) _mm_fnmsub_ps(_a, _b, _c)
#define OZZ_MADDX(_a, _b, _c) _mm_fmadd_ss(_a, _b, _c)
#define OZZ_MSUBX(_a, _b, _c) _mm_fmsub_ss(_a, _b, _c)
#define OZZ_NMADDX(_a, _b, _c) _mm_fnmadd_ss(_a, _b, _c)
#define OZZ_NMSUBX(_a, _b, _c) _mm_fnmsub_ss(_a, _b, _c)
#else // OZZ_SIMD_FMA
#define OZZ_MADD(_a, _b, _c) _mm_add_ps(_mm_mul_ps(_a, _b), _c)
#define OZZ_MSUB(_a, _b, _c) _mm_sub_ps(_mm_mul_ps(_a, _b), _c)
#define OZZ_NMADD(_a, _b, _c) _mm_sub_ps(_c, _mm_mul_ps(_a, _b))
#define OZZ_NMSUB(_a, _b, _c) (-_mm_add_ps(_mm_mul_ps(_a, _b), _c))
#define OZZ_MADDX(_a, _b, _c) _mm_add_ss(_mm_mul_ss(_a, _b), _c)
#define OZZ_MSUBX(_a, _b, _c) _mm_sub_ss(_mm_mul_ss(_a, _b), _c)
#define OZZ_NMADDX(_a, _b, _c) _mm_sub_ss(_c, _mm_mul_ss(_a, _b))
#define OZZ_NMSUBX(_a, _b, _c) (-_mm_add_ss(_mm_mul_ss(_a, _b), _c))
#endif // OZZ_SIMD_FMA
OZZ_INLINE SimdFloat4 DivX(_SimdFloat4 _a, _SimdFloat4 _b) {
return _mm_div_ss(_a, _b);
}
#ifdef OZZ_SIMD_SSE4_1
#define OZZ_SSE_SELECT_F(_b, _true, _false) \
_mm_blendv_ps(_false, _true, _mm_castsi128_ps(_b))
#define OZZ_SSE_SELECT_I(_b, _true, _false) _mm_blendv_epi8(_false, _true, _b)
#else // OZZ_SIMD_SSE4_1
#define OZZ_SSE_SELECT_F(_b, _true, _false) \
_mm_or_ps(_mm_and_ps(_true, _mm_castsi128_ps(_b)), \
_mm_andnot_ps(_mm_castsi128_ps(_b), _false))
#define OZZ_SSE_SELECT_I(_b, _true, _false) \
_mm_or_si128(_mm_and_si128(_true, _b), _mm_andnot_si128(_b, _false))
#endif // OZZ_SIMD_SSE4_1
OZZ_INLINE SimdFloat4 zero() { return _mm_setzero_ps(); }
OZZ_INLINE SimdFloat4 one() {
const __m128i zero = _mm_setzero_si128();
return _mm_castsi128_ps(
_mm_srli_epi32(_mm_slli_epi32(_mm_cmpeq_epi32(zero, zero), 25), 2));
}
OZZ_INLINE SimdFloat4 x_axis() {
const __m128i zero = _mm_setzero_si128();
const __m128i one =
_mm_srli_epi32(_mm_slli_epi32(_mm_cmpeq_epi32(zero, zero), 25), 2);
return _mm_castsi128_ps(_mm_srli_si128(one, 12));
}
OZZ_INLINE SimdFloat4 y_axis() {
const __m128i zero = _mm_setzero_si128();
const __m128i one =
_mm_srli_epi32(_mm_slli_epi32(_mm_cmpeq_epi32(zero, zero), 25), 2);
return _mm_castsi128_ps(_mm_slli_si128(_mm_srli_si128(one, 12), 4));
}
OZZ_INLINE SimdFloat4 z_axis() {
const __m128i zero = _mm_setzero_si128();
const __m128i one =
_mm_srli_epi32(_mm_slli_epi32(_mm_cmpeq_epi32(zero, zero), 25), 2);
return _mm_castsi128_ps(_mm_slli_si128(_mm_srli_si128(one, 12), 8));
}
OZZ_INLINE SimdFloat4 w_axis() {
const __m128i zero = _mm_setzero_si128();
const __m128i one =
_mm_srli_epi32(_mm_slli_epi32(_mm_cmpeq_epi32(zero, zero), 25), 2);
return _mm_castsi128_ps(_mm_slli_si128(one, 12));
}
OZZ_INLINE SimdFloat4 Load(float _x, float _y, float _z, float _w) {
return _mm_set_ps(_w, _z, _y, _x);
}
OZZ_INLINE SimdFloat4 LoadX(float _x) { return _mm_set_ss(_x); }
OZZ_INLINE SimdFloat4 Load1(float _x) { return _mm_set_ps1(_x); }
OZZ_INLINE SimdFloat4 LoadPtr(const float* _f) {
assert(!(reinterpret_cast<uintptr_t>(_f) & 0xf) && "Invalid alignment");
return _mm_load_ps(_f);
}
OZZ_INLINE SimdFloat4 LoadPtrU(const float* _f) {
assert(!(reinterpret_cast<uintptr_t>(_f) & 0x3) && "Invalid alignment");
return _mm_loadu_ps(_f);
}
OZZ_INLINE SimdFloat4 LoadXPtrU(const float* _f) {
assert(!(reinterpret_cast<uintptr_t>(_f) & 0x3) && "Invalid alignment");
return _mm_load_ss(_f);
}
OZZ_INLINE SimdFloat4 Load1PtrU(const float* _f) {
assert(!(reinterpret_cast<uintptr_t>(_f) & 0x3) && "Invalid alignment");
return _mm_load_ps1(_f);
}
OZZ_INLINE SimdFloat4 Load2PtrU(const float* _f) {
assert(!(reinterpret_cast<uintptr_t>(_f) & 0x3) && "Invalid alignment");
return _mm_unpacklo_ps(_mm_load_ss(_f + 0), _mm_load_ss(_f + 1));
}
OZZ_INLINE SimdFloat4 Load3PtrU(const float* _f) {
assert(!(reinterpret_cast<uintptr_t>(_f) & 0x3) && "Invalid alignment");
return _mm_movelh_ps(
_mm_unpacklo_ps(_mm_load_ss(_f + 0), _mm_load_ss(_f + 1)),
_mm_load_ss(_f + 2));
}
OZZ_INLINE SimdFloat4 FromInt(_SimdInt4 _i) { return _mm_cvtepi32_ps(_i); }
} // namespace simd_float4
OZZ_INLINE float GetX(_SimdFloat4 _v) { return _mm_cvtss_f32(_v); }
OZZ_INLINE float GetY(_SimdFloat4 _v) {
return _mm_cvtss_f32(OZZ_SSE_SPLAT_F(_v, 1));
}
OZZ_INLINE float GetZ(_SimdFloat4 _v) {
return _mm_cvtss_f32(_mm_movehl_ps(_v, _v));
}
OZZ_INLINE float GetW(_SimdFloat4 _v) {
return _mm_cvtss_f32(OZZ_SSE_SPLAT_F(_v, 3));
}
OZZ_INLINE SimdFloat4 SetX(_SimdFloat4 _v, _SimdFloat4 _f) {
return _mm_move_ss(_v, _f);
}
OZZ_INLINE SimdFloat4 SetY(_SimdFloat4 _v, _SimdFloat4 _f) {
const __m128 xfnn = _mm_unpacklo_ps(_v, _f);
return _mm_shuffle_ps(xfnn, _v, _MM_SHUFFLE(3, 2, 1, 0));
}
OZZ_INLINE SimdFloat4 SetZ(_SimdFloat4 _v, _SimdFloat4 _f) {
const __m128 ffww = _mm_shuffle_ps(_f, _v, _MM_SHUFFLE(3, 3, 0, 0));
return _mm_shuffle_ps(_v, ffww, _MM_SHUFFLE(2, 0, 1, 0));
}
OZZ_INLINE SimdFloat4 SetW(_SimdFloat4 _v, _SimdFloat4 _f) {
const __m128 ffzz = _mm_shuffle_ps(_f, _v, _MM_SHUFFLE(2, 2, 0, 0));
return _mm_shuffle_ps(_v, ffzz, _MM_SHUFFLE(0, 2, 1, 0));
}
OZZ_INLINE SimdFloat4 SetI(_SimdFloat4 _v, _SimdFloat4 _f, int _ith) {
assert(_ith >= 0 && _ith <= 3 && "Invalid index, out of range.");
union {
SimdFloat4 ret;
float af[4];
} u = {_v};
u.af[_ith] = _mm_cvtss_f32(_f);
return u.ret;
}
OZZ_INLINE void StorePtr(_SimdFloat4 _v, float* _f) {
assert(!(reinterpret_cast<uintptr_t>(_f) & 0xf) && "Invalid alignment");
_mm_store_ps(_f, _v);
}
OZZ_INLINE void Store1Ptr(_SimdFloat4 _v, float* _f) {
assert(!(reinterpret_cast<uintptr_t>(_f) & 0xf) && "Invalid alignment");
_mm_store_ss(_f, _v);
}
OZZ_INLINE void Store2Ptr(_SimdFloat4 _v, float* _f) {
assert(!(reinterpret_cast<uintptr_t>(_f) & 0xf) && "Invalid alignment");
_mm_storel_pi(reinterpret_cast<__m64*>(_f), _v);
}
OZZ_INLINE void Store3Ptr(_SimdFloat4 _v, float* _f) {
assert(!(reinterpret_cast<uintptr_t>(_f) & 0xf) && "Invalid alignment");
_mm_storel_pi(reinterpret_cast<__m64*>(_f), _v);
_mm_store_ss(_f + 2, _mm_movehl_ps(_v, _v));
}
OZZ_INLINE void StorePtrU(_SimdFloat4 _v, float* _f) {
assert(!(reinterpret_cast<uintptr_t>(_f) & 0x3) && "Invalid alignment");
_mm_storeu_ps(_f, _v);
}
OZZ_INLINE void Store1PtrU(_SimdFloat4 _v, float* _f) {
assert(!(reinterpret_cast<uintptr_t>(_f) & 0x3) && "Invalid alignment");
_mm_store_ss(_f, _v);
}
OZZ_INLINE void Store2PtrU(_SimdFloat4 _v, float* _f) {
assert(!(reinterpret_cast<uintptr_t>(_f) & 0x3) && "Invalid alignment");
_mm_store_ss(_f + 0, _v);
_mm_store_ss(_f + 1, OZZ_SSE_SPLAT_F(_v, 1));
}
OZZ_INLINE void Store3PtrU(_SimdFloat4 _v, float* _f) {
assert(!(reinterpret_cast<uintptr_t>(_f) & 0x3) && "Invalid alignment");
_mm_store_ss(_f + 0, _v);
_mm_store_ss(_f + 1, OZZ_SSE_SPLAT_F(_v, 1));
_mm_store_ss(_f + 2, _mm_movehl_ps(_v, _v));
}
OZZ_INLINE SimdFloat4 SplatX(_SimdFloat4 _v) { return OZZ_SSE_SPLAT_F(_v, 0); }
OZZ_INLINE SimdFloat4 SplatY(_SimdFloat4 _v) { return OZZ_SSE_SPLAT_F(_v, 1); }
OZZ_INLINE SimdFloat4 SplatZ(_SimdFloat4 _v) { return OZZ_SSE_SPLAT_F(_v, 2); }
OZZ_INLINE SimdFloat4 SplatW(_SimdFloat4 _v) { return OZZ_SSE_SPLAT_F(_v, 3); }
template <size_t _X, size_t _Y, size_t _Z, size_t _W>
OZZ_INLINE SimdFloat4 Swizzle(_SimdFloat4 _v) {
static_assert(_X <= 3 && _Y <= 3 && _Z <= 3 && _W <= 3,
"Indices must be between 0 and 3");
return OZZ_SHUFFLE_PS1(_v, _MM_SHUFFLE(_W, _Z, _Y, _X));
}
template <>
OZZ_INLINE SimdFloat4 Swizzle<0, 1, 2, 3>(_SimdFloat4 _v) {
return _v;
}
template <>
OZZ_INLINE SimdFloat4 Swizzle<0, 1, 0, 1>(_SimdFloat4 _v) {
return _mm_movelh_ps(_v, _v);
}
template <>
OZZ_INLINE SimdFloat4 Swizzle<2, 3, 2, 3>(_SimdFloat4 _v) {
return _mm_movehl_ps(_v, _v);
}
template <>
OZZ_INLINE SimdFloat4 Swizzle<0, 0, 1, 1>(_SimdFloat4 _v) {
return _mm_unpacklo_ps(_v, _v);
}
template <>
OZZ_INLINE SimdFloat4 Swizzle<2, 2, 3, 3>(_SimdFloat4 _v) {
return _mm_unpackhi_ps(_v, _v);
}
OZZ_INLINE void Transpose4x1(const SimdFloat4 _in[4], SimdFloat4 _out[1]) {
const __m128 xz = _mm_unpacklo_ps(_in[0], _in[2]);
const __m128 yw = _mm_unpacklo_ps(_in[1], _in[3]);
_out[0] = _mm_unpacklo_ps(xz, yw);
}
OZZ_INLINE void Transpose1x4(const SimdFloat4 _in[1], SimdFloat4 _out[4]) {
const __m128 zwzw = _mm_movehl_ps(_in[0], _in[0]);
const __m128 yyyy = OZZ_SSE_SPLAT_F(_in[0], 1);
const __m128 wwww = OZZ_SSE_SPLAT_F(_in[0], 3);
const __m128 zero = _mm_setzero_ps();
_out[0] = _mm_move_ss(zero, _in[0]);
_out[1] = _mm_move_ss(zero, yyyy);
_out[2] = _mm_move_ss(zero, zwzw);
_out[3] = _mm_move_ss(zero, wwww);
}
OZZ_INLINE void Transpose4x2(const SimdFloat4 _in[4], SimdFloat4 _out[2]) {
const __m128 tmp0 = _mm_unpacklo_ps(_in[0], _in[2]);
const __m128 tmp1 = _mm_unpacklo_ps(_in[1], _in[3]);
_out[0] = _mm_unpacklo_ps(tmp0, tmp1);
_out[1] = _mm_unpackhi_ps(tmp0, tmp1);
}
OZZ_INLINE void Transpose2x4(const SimdFloat4 _in[2], SimdFloat4 _out[4]) {
const __m128 tmp0 = _mm_unpacklo_ps(_in[0], _in[1]);
const __m128 tmp1 = _mm_unpackhi_ps(_in[0], _in[1]);
const __m128 zero = _mm_setzero_ps();
_out[0] = _mm_movelh_ps(tmp0, zero);
_out[1] = _mm_movehl_ps(zero, tmp0);
_out[2] = _mm_movelh_ps(tmp1, zero);
_out[3] = _mm_movehl_ps(zero, tmp1);
}
OZZ_INLINE void Transpose4x3(const SimdFloat4 _in[4], SimdFloat4 _out[3]) {
const __m128 tmp0 = _mm_unpacklo_ps(_in[0], _in[2]);
const __m128 tmp1 = _mm_unpacklo_ps(_in[1], _in[3]);
const __m128 tmp2 = _mm_unpackhi_ps(_in[0], _in[2]);
const __m128 tmp3 = _mm_unpackhi_ps(_in[1], _in[3]);
_out[0] = _mm_unpacklo_ps(tmp0, tmp1);
_out[1] = _mm_unpackhi_ps(tmp0, tmp1);
_out[2] = _mm_unpacklo_ps(tmp2, tmp3);
}
OZZ_INLINE void Transpose3x4(const SimdFloat4 _in[3], SimdFloat4 _out[4]) {
const __m128 zero = _mm_setzero_ps();
const __m128 temp0 = _mm_unpacklo_ps(_in[0], _in[1]);
const __m128 temp1 = _mm_unpacklo_ps(_in[2], zero);
const __m128 temp2 = _mm_unpackhi_ps(_in[0], _in[1]);
const __m128 temp3 = _mm_unpackhi_ps(_in[2], zero);
_out[0] = _mm_movelh_ps(temp0, temp1);
_out[1] = _mm_movehl_ps(temp1, temp0);
_out[2] = _mm_movelh_ps(temp2, temp3);
_out[3] = _mm_movehl_ps(temp3, temp2);
}
OZZ_INLINE void Transpose4x4(const SimdFloat4 _in[4], SimdFloat4 _out[4]) {
const __m128 tmp0 = _mm_unpacklo_ps(_in[0], _in[2]);
const __m128 tmp1 = _mm_unpacklo_ps(_in[1], _in[3]);
const __m128 tmp2 = _mm_unpackhi_ps(_in[0], _in[2]);
const __m128 tmp3 = _mm_unpackhi_ps(_in[1], _in[3]);
_out[0] = _mm_unpacklo_ps(tmp0, tmp1);
_out[1] = _mm_unpackhi_ps(tmp0, tmp1);
_out[2] = _mm_unpacklo_ps(tmp2, tmp3);
_out[3] = _mm_unpackhi_ps(tmp2, tmp3);
}
OZZ_INLINE void Transpose16x16(const SimdFloat4 _in[16], SimdFloat4 _out[16]) {
const __m128 tmp0 = _mm_unpacklo_ps(_in[0], _in[2]);
const __m128 tmp1 = _mm_unpacklo_ps(_in[1], _in[3]);
_out[0] = _mm_unpacklo_ps(tmp0, tmp1);
_out[4] = _mm_unpackhi_ps(tmp0, tmp1);
const __m128 tmp2 = _mm_unpackhi_ps(_in[0], _in[2]);
const __m128 tmp3 = _mm_unpackhi_ps(_in[1], _in[3]);
_out[8] = _mm_unpacklo_ps(tmp2, tmp3);
_out[12] = _mm_unpackhi_ps(tmp2, tmp3);
const __m128 tmp4 = _mm_unpacklo_ps(_in[4], _in[6]);
const __m128 tmp5 = _mm_unpacklo_ps(_in[5], _in[7]);
_out[1] = _mm_unpacklo_ps(tmp4, tmp5);
_out[5] = _mm_unpackhi_ps(tmp4, tmp5);
const __m128 tmp6 = _mm_unpackhi_ps(_in[4], _in[6]);
const __m128 tmp7 = _mm_unpackhi_ps(_in[5], _in[7]);
_out[9] = _mm_unpacklo_ps(tmp6, tmp7);
_out[13] = _mm_unpackhi_ps(tmp6, tmp7);
const __m128 tmp8 = _mm_unpacklo_ps(_in[8], _in[10]);
const __m128 tmp9 = _mm_unpacklo_ps(_in[9], _in[11]);
_out[2] = _mm_unpacklo_ps(tmp8, tmp9);
_out[6] = _mm_unpackhi_ps(tmp8, tmp9);
const __m128 tmp10 = _mm_unpackhi_ps(_in[8], _in[10]);
const __m128 tmp11 = _mm_unpackhi_ps(_in[9], _in[11]);
_out[10] = _mm_unpacklo_ps(tmp10, tmp11);
_out[14] = _mm_unpackhi_ps(tmp10, tmp11);
const __m128 tmp12 = _mm_unpacklo_ps(_in[12], _in[14]);
const __m128 tmp13 = _mm_unpacklo_ps(_in[13], _in[15]);
_out[3] = _mm_unpacklo_ps(tmp12, tmp13);
_out[7] = _mm_unpackhi_ps(tmp12, tmp13);
const __m128 tmp14 = _mm_unpackhi_ps(_in[12], _in[14]);
const __m128 tmp15 = _mm_unpackhi_ps(_in[13], _in[15]);
_out[11] = _mm_unpacklo_ps(tmp14, tmp15);
_out[15] = _mm_unpackhi_ps(tmp14, tmp15);
}
OZZ_INLINE SimdFloat4 MAdd(_SimdFloat4 _a, _SimdFloat4 _b, _SimdFloat4 _c) {
return OZZ_MADD(_a, _b, _c);
}
OZZ_INLINE SimdFloat4 MSub(_SimdFloat4 _a, _SimdFloat4 _b, _SimdFloat4 _c) {
return OZZ_MSUB(_a, _b, _c);
}
OZZ_INLINE SimdFloat4 NMAdd(_SimdFloat4 _a, _SimdFloat4 _b, _SimdFloat4 _c) {
return OZZ_NMADD(_a, _b, _c);
}
OZZ_INLINE SimdFloat4 NMSub(_SimdFloat4 _a, _SimdFloat4 _b, _SimdFloat4 _c) {
return OZZ_NMSUB(_a, _b, _c);
}
OZZ_INLINE SimdFloat4 DivX(_SimdFloat4 _a, _SimdFloat4 _b) {
return _mm_div_ss(_a, _b);
}
OZZ_INLINE SimdFloat4 HAdd2(_SimdFloat4 _v) { return OZZ_SSE_HADD2_F(_v); }
OZZ_INLINE SimdFloat4 HAdd3(_SimdFloat4 _v) { return OZZ_SSE_HADD3_F(_v); }
OZZ_INLINE SimdFloat4 HAdd4(_SimdFloat4 _v) {
__m128 hadd4;
OZZ_SSE_HADD4_F(_v, hadd4);
return hadd4;
}
OZZ_INLINE SimdFloat4 Dot2(_SimdFloat4 _a, _SimdFloat4 _b) {
__m128 dot2;
OZZ_SSE_DOT2_F(_a, _b, dot2);
return dot2;
}
OZZ_INLINE SimdFloat4 Dot3(_SimdFloat4 _a, _SimdFloat4 _b) {
__m128 dot3;
OZZ_SSE_DOT3_F(_a, _b, dot3);
return dot3;
}
OZZ_INLINE SimdFloat4 Dot4(_SimdFloat4 _a, _SimdFloat4 _b) {
__m128 dot4;
OZZ_SSE_DOT4_F(_a, _b, dot4);
return dot4;
}
OZZ_INLINE SimdFloat4 Cross3(_SimdFloat4 _a, _SimdFloat4 _b) {
// Implementation with 3 shuffles only is based on:
// https://geometrian.com/programming/tutorials/cross-product
const __m128 shufa = OZZ_SHUFFLE_PS1(_a, _MM_SHUFFLE(3, 0, 2, 1));
const __m128 shufb = OZZ_SHUFFLE_PS1(_b, _MM_SHUFFLE(3, 0, 2, 1));
const __m128 shufc = OZZ_MSUB(_a, shufb, _mm_mul_ps(_b, shufa));
return OZZ_SHUFFLE_PS1(shufc, _MM_SHUFFLE(3, 0, 2, 1));
}
OZZ_INLINE SimdFloat4 RcpEst(_SimdFloat4 _v) { return _mm_rcp_ps(_v); }
OZZ_INLINE SimdFloat4 RcpEstNR(_SimdFloat4 _v) {
const __m128 nr = _mm_rcp_ps(_v);
// Do one more Newton-Raphson step to improve precision.
return OZZ_NMADD(_mm_mul_ps(nr, nr), _v, _mm_add_ps(nr, nr));
}
OZZ_INLINE SimdFloat4 RcpEstX(_SimdFloat4 _v) { return _mm_rcp_ss(_v); }
OZZ_INLINE SimdFloat4 RcpEstXNR(_SimdFloat4 _v) {
const __m128 nr = _mm_rcp_ss(_v);
// Do one more Newton-Raphson step to improve precision.
return OZZ_NMADDX(_mm_mul_ss(nr, nr), _v, _mm_add_ss(nr, nr));
}
OZZ_INLINE SimdFloat4 Sqrt(_SimdFloat4 _v) { return _mm_sqrt_ps(_v); }
OZZ_INLINE SimdFloat4 SqrtX(_SimdFloat4 _v) { return _mm_sqrt_ss(_v); }
OZZ_INLINE SimdFloat4 RSqrtEst(_SimdFloat4 _v) { return _mm_rsqrt_ps(_v); }
OZZ_INLINE SimdFloat4 RSqrtEstNR(_SimdFloat4 _v) {
const __m128 nr = _mm_rsqrt_ps(_v);
// Do one more Newton-Raphson step to improve precision.
return _mm_mul_ps(_mm_mul_ps(_mm_set_ps1(.5f), nr),
OZZ_NMADD(_mm_mul_ps(_v, nr), nr, _mm_set_ps1(3.f)));
}
OZZ_INLINE SimdFloat4 RSqrtEstX(_SimdFloat4 _v) { return _mm_rsqrt_ss(_v); }
OZZ_INLINE SimdFloat4 RSqrtEstXNR(_SimdFloat4 _v) {
const __m128 nr = _mm_rsqrt_ss(_v);
// Do one more Newton-Raphson step to improve precision.
return _mm_mul_ss(_mm_mul_ss(_mm_set_ps1(.5f), nr),
OZZ_NMADDX(_mm_mul_ss(_v, nr), nr, _mm_set_ps1(3.f)));
}
OZZ_INLINE SimdFloat4 Abs(_SimdFloat4 _v) {
const __m128i zero = _mm_setzero_si128();
return _mm_and_ps(
_mm_castsi128_ps(_mm_srli_epi32(_mm_cmpeq_epi32(zero, zero), 1)), _v);
}
OZZ_INLINE SimdInt4 Sign(_SimdFloat4 _v) {
return _mm_slli_epi32(_mm_srli_epi32(_mm_castps_si128(_v), 31), 31);
}
OZZ_INLINE SimdFloat4 Length2(_SimdFloat4 _v) {
__m128 sq_len;
OZZ_SSE_DOT2_F(_v, _v, sq_len);
return _mm_sqrt_ss(sq_len);
}
OZZ_INLINE SimdFloat4 Length3(_SimdFloat4 _v) {
__m128 sq_len;
OZZ_SSE_DOT3_F(_v, _v, sq_len);
return _mm_sqrt_ss(sq_len);
}
OZZ_INLINE SimdFloat4 Length4(_SimdFloat4 _v) {
__m128 sq_len;
OZZ_SSE_DOT4_F(_v, _v, sq_len);
return _mm_sqrt_ss(sq_len);
}
OZZ_INLINE SimdFloat4 Length2Sqr(_SimdFloat4 _v) {
__m128 sq_len;
OZZ_SSE_DOT2_F(_v, _v, sq_len);
return sq_len;
}
OZZ_INLINE SimdFloat4 Length3Sqr(_SimdFloat4 _v) {
__m128 sq_len;
OZZ_SSE_DOT3_F(_v, _v, sq_len);
return sq_len;
}
OZZ_INLINE SimdFloat4 Length4Sqr(_SimdFloat4 _v) {
__m128 sq_len;
OZZ_SSE_DOT4_F(_v, _v, sq_len);
return sq_len;
}
OZZ_INLINE SimdFloat4 Normalize2(_SimdFloat4 _v) {
__m128 sq_len;
OZZ_SSE_DOT2_F(_v, _v, sq_len);
assert(_mm_cvtss_f32(sq_len) != 0.f && "_v is not normalizable");
const __m128 inv_len = _mm_div_ss(simd_float4::one(), _mm_sqrt_ss(sq_len));
const __m128 inv_lenxxxx = OZZ_SSE_SPLAT_F(inv_len, 0);
const __m128 norm = _mm_mul_ps(_v, inv_lenxxxx);
return _mm_movelh_ps(norm, _mm_movehl_ps(_v, _v));
}
OZZ_INLINE SimdFloat4 Normalize3(_SimdFloat4 _v) {
__m128 sq_len;
OZZ_SSE_DOT3_F(_v, _v, sq_len);
assert(_mm_cvtss_f32(sq_len) != 0.f && "_v is not normalizable");
const __m128 inv_len = _mm_div_ss(simd_float4::one(), _mm_sqrt_ss(sq_len));
const __m128 vwxyz = OZZ_SHUFFLE_PS1(_v, _MM_SHUFFLE(0, 1, 2, 3));
const __m128 inv_lenxxxx = OZZ_SSE_SPLAT_F(inv_len, 0);
const __m128 normwxyz = _mm_move_ss(_mm_mul_ps(vwxyz, inv_lenxxxx), vwxyz);
return OZZ_SHUFFLE_PS1(normwxyz, _MM_SHUFFLE(0, 1, 2, 3));
}
OZZ_INLINE SimdFloat4 Normalize4(_SimdFloat4 _v) {
__m128 sq_len;
OZZ_SSE_DOT4_F(_v, _v, sq_len);
assert(_mm_cvtss_f32(sq_len) != 0.f && "_v is not normalizable");
const __m128 inv_len = _mm_div_ss(simd_float4::one(), _mm_sqrt_ss(sq_len));
const __m128 inv_lenxxxx = OZZ_SSE_SPLAT_F(inv_len, 0);
return _mm_mul_ps(_v, inv_lenxxxx);
}
OZZ_INLINE SimdFloat4 NormalizeEst2(_SimdFloat4 _v) {
__m128 sq_len;
OZZ_SSE_DOT2_F(_v, _v, sq_len);
assert(_mm_cvtss_f32(sq_len) != 0.f && "_v is not normalizable");
const __m128 inv_len = _mm_rsqrt_ss(sq_len);
const __m128 inv_lenxxxx = OZZ_SSE_SPLAT_F(inv_len, 0);
const __m128 norm = _mm_mul_ps(_v, inv_lenxxxx);
return _mm_movelh_ps(norm, _mm_movehl_ps(_v, _v));
}
OZZ_INLINE SimdFloat4 NormalizeEst3(_SimdFloat4 _v) {
__m128 sq_len;
OZZ_SSE_DOT3_F(_v, _v, sq_len);
assert(_mm_cvtss_f32(sq_len) != 0.f && "_v is not normalizable");
const __m128 inv_len = _mm_rsqrt_ss(sq_len);
const __m128 vwxyz = OZZ_SHUFFLE_PS1(_v, _MM_SHUFFLE(0, 1, 2, 3));
const __m128 inv_lenxxxx = OZZ_SSE_SPLAT_F(inv_len, 0);
const __m128 normwxyz = _mm_move_ss(_mm_mul_ps(vwxyz, inv_lenxxxx), vwxyz);
return OZZ_SHUFFLE_PS1(normwxyz, _MM_SHUFFLE(0, 1, 2, 3));
}
OZZ_INLINE SimdFloat4 NormalizeEst4(_SimdFloat4 _v) {
__m128 sq_len;
OZZ_SSE_DOT4_F(_v, _v, sq_len);
assert(_mm_cvtss_f32(sq_len) != 0.f && "_v is not normalizable");
const __m128 inv_len = _mm_rsqrt_ss(sq_len);
const __m128 inv_lenxxxx = OZZ_SSE_SPLAT_F(inv_len, 0);
return _mm_mul_ps(_v, inv_lenxxxx);
}
OZZ_INLINE SimdInt4 IsNormalized2(_SimdFloat4 _v) {
const __m128 max = _mm_set_ss(1.f + kNormalizationToleranceSq);
const __m128 min = _mm_set_ss(1.f - kNormalizationToleranceSq);
__m128 dot;
OZZ_SSE_DOT2_F(_v, _v, dot);
__m128 dotx000 = _mm_move_ss(_mm_setzero_ps(), dot);
return _mm_castps_si128(
_mm_and_ps(_mm_cmplt_ss(dotx000, max), _mm_cmpgt_ss(dotx000, min)));
}
OZZ_INLINE SimdInt4 IsNormalized3(_SimdFloat4 _v) {
const __m128 max = _mm_set_ss(1.f + kNormalizationToleranceSq);
const __m128 min = _mm_set_ss(1.f - kNormalizationToleranceSq);
__m128 dot;
OZZ_SSE_DOT3_F(_v, _v, dot);
__m128 dotx000 = _mm_move_ss(_mm_setzero_ps(), dot);
return _mm_castps_si128(
_mm_and_ps(_mm_cmplt_ss(dotx000, max), _mm_cmpgt_ss(dotx000, min)));
}
OZZ_INLINE SimdInt4 IsNormalized4(_SimdFloat4 _v) {
const __m128 max = _mm_set_ss(1.f + kNormalizationToleranceSq);
const __m128 min = _mm_set_ss(1.f - kNormalizationToleranceSq);
__m128 dot;
OZZ_SSE_DOT4_F(_v, _v, dot);
__m128 dotx000 = _mm_move_ss(_mm_setzero_ps(), dot);
return _mm_castps_si128(
_mm_and_ps(_mm_cmplt_ss(dotx000, max), _mm_cmpgt_ss(dotx000, min)));
}
OZZ_INLINE SimdInt4 IsNormalizedEst2(_SimdFloat4 _v) {
const __m128 max = _mm_set_ss(1.f + kNormalizationToleranceEstSq);
const __m128 min = _mm_set_ss(1.f - kNormalizationToleranceEstSq);
__m128 dot;
OZZ_SSE_DOT2_F(_v, _v, dot);
__m128 dotx000 = _mm_move_ss(_mm_setzero_ps(), dot);
return _mm_castps_si128(
_mm_and_ps(_mm_cmplt_ss(dotx000, max), _mm_cmpgt_ss(dotx000, min)));
}
OZZ_INLINE SimdInt4 IsNormalizedEst3(_SimdFloat4 _v) {
const __m128 max = _mm_set_ss(1.f + kNormalizationToleranceEstSq);
const __m128 min = _mm_set_ss(1.f - kNormalizationToleranceEstSq);
__m128 dot;
OZZ_SSE_DOT3_F(_v, _v, dot);
__m128 dotx000 = _mm_move_ss(_mm_setzero_ps(), dot);
return _mm_castps_si128(
_mm_and_ps(_mm_cmplt_ss(dotx000, max), _mm_cmpgt_ss(dotx000, min)));
}
OZZ_INLINE SimdInt4 IsNormalizedEst4(_SimdFloat4 _v) {
const __m128 max = _mm_set_ss(1.f + kNormalizationToleranceEstSq);
const __m128 min = _mm_set_ss(1.f - kNormalizationToleranceEstSq);
__m128 dot;
OZZ_SSE_DOT4_F(_v, _v, dot);
__m128 dotx000 = _mm_move_ss(_mm_setzero_ps(), dot);
return _mm_castps_si128(
_mm_and_ps(_mm_cmplt_ss(dotx000, max), _mm_cmpgt_ss(dotx000, min)));
}
OZZ_INLINE SimdFloat4 NormalizeSafe2(_SimdFloat4 _v, _SimdFloat4 _safe) {
// assert(AreAllTrue1(IsNormalized2(_safe)) && "_safe is not normalized");
__m128 sq_len;
OZZ_SSE_DOT2_F(_v, _v, sq_len);
const __m128 inv_len = _mm_div_ss(simd_float4::one(), _mm_sqrt_ss(sq_len));
const __m128 inv_lenxxxx = OZZ_SSE_SPLAT_F(inv_len, 0);
const __m128 norm = _mm_mul_ps(_v, inv_lenxxxx);
const __m128i cond = _mm_castps_si128(
_mm_cmple_ps(OZZ_SSE_SPLAT_F(sq_len, 0), _mm_setzero_ps()));
const __m128 cfalse = _mm_movelh_ps(norm, _mm_movehl_ps(_v, _v));
return OZZ_SSE_SELECT_F(cond, _safe, cfalse);
}
OZZ_INLINE SimdFloat4 NormalizeSafe3(_SimdFloat4 _v, _SimdFloat4 _safe) {
// assert(AreAllTrue1(IsNormalized3(_safe)) && "_safe is not normalized");
__m128 sq_len;
OZZ_SSE_DOT3_F(_v, _v, sq_len);
const __m128 inv_len = _mm_div_ss(simd_float4::one(), _mm_sqrt_ss(sq_len));
const __m128 vwxyz = OZZ_SHUFFLE_PS1(_v, _MM_SHUFFLE(0, 1, 2, 3));
const __m128 inv_lenxxxx = OZZ_SSE_SPLAT_F(inv_len, 0);
const __m128 normwxyz = _mm_move_ss(_mm_mul_ps(vwxyz, inv_lenxxxx), vwxyz);
const __m128i cond = _mm_castps_si128(
_mm_cmple_ps(OZZ_SSE_SPLAT_F(sq_len, 0), _mm_setzero_ps()));
const __m128 cfalse = OZZ_SHUFFLE_PS1(normwxyz, _MM_SHUFFLE(0, 1, 2, 3));
return OZZ_SSE_SELECT_F(cond, _safe, cfalse);
}
OZZ_INLINE SimdFloat4 NormalizeSafe4(_SimdFloat4 _v, _SimdFloat4 _safe) {
// assert(AreAllTrue1(IsNormalized4(_safe)) && "_safe is not normalized");
__m128 sq_len;
OZZ_SSE_DOT4_F(_v, _v, sq_len);
const __m128 inv_len = _mm_div_ss(simd_float4::one(), _mm_sqrt_ss(sq_len));
const __m128 inv_lenxxxx = OZZ_SSE_SPLAT_F(inv_len, 0);
const __m128i cond = _mm_castps_si128(
_mm_cmple_ps(OZZ_SSE_SPLAT_F(sq_len, 0), _mm_setzero_ps()));
const __m128 cfalse = _mm_mul_ps(_v, inv_lenxxxx);
return OZZ_SSE_SELECT_F(cond, _safe, cfalse);
}
OZZ_INLINE SimdFloat4 NormalizeSafeEst2(_SimdFloat4 _v, _SimdFloat4 _safe) {
// assert(AreAllTrue1(IsNormalizedEst2(_safe)) && "_safe is not normalized");
__m128 sq_len;
OZZ_SSE_DOT2_F(_v, _v, sq_len);
const __m128 inv_len = _mm_rsqrt_ss(sq_len);
const __m128 inv_lenxxxx = OZZ_SSE_SPLAT_F(inv_len, 0);
const __m128 norm = _mm_mul_ps(_v, inv_lenxxxx);
const __m128i cond = _mm_castps_si128(
_mm_cmple_ps(OZZ_SSE_SPLAT_F(sq_len, 0), _mm_setzero_ps()));
const __m128 cfalse = _mm_movelh_ps(norm, _mm_movehl_ps(_v, _v));
return OZZ_SSE_SELECT_F(cond, _safe, cfalse);
}
OZZ_INLINE SimdFloat4 NormalizeSafeEst3(_SimdFloat4 _v, _SimdFloat4 _safe) {
// assert(AreAllTrue1(IsNormalizedEst3(_safe)) && "_safe is not normalized");
__m128 sq_len;
OZZ_SSE_DOT3_F(_v, _v, sq_len);
const __m128 inv_len = _mm_rsqrt_ss(sq_len);
const __m128 vwxyz = OZZ_SHUFFLE_PS1(_v, _MM_SHUFFLE(0, 1, 2, 3));
const __m128 inv_lenxxxx = OZZ_SSE_SPLAT_F(inv_len, 0);
const __m128 normwxyz = _mm_move_ss(_mm_mul_ps(vwxyz, inv_lenxxxx), vwxyz);
const __m128i cond = _mm_castps_si128(
_mm_cmple_ps(OZZ_SSE_SPLAT_F(sq_len, 0), _mm_setzero_ps()));
const __m128 cfalse = OZZ_SHUFFLE_PS1(normwxyz, _MM_SHUFFLE(0, 1, 2, 3));
return OZZ_SSE_SELECT_F(cond, _safe, cfalse);
}
OZZ_INLINE SimdFloat4 NormalizeSafeEst4(_SimdFloat4 _v, _SimdFloat4 _safe) {
// assert(AreAllTrue1(IsNormalizedEst4(_safe)) && "_safe is not normalized");
__m128 sq_len;
OZZ_SSE_DOT4_F(_v, _v, sq_len);
const __m128 inv_len = _mm_rsqrt_ss(sq_len);
const __m128 inv_lenxxxx = OZZ_SSE_SPLAT_F(inv_len, 0);
const __m128i cond = _mm_castps_si128(
_mm_cmple_ps(OZZ_SSE_SPLAT_F(sq_len, 0), _mm_setzero_ps()));
const __m128 cfalse = _mm_mul_ps(_v, inv_lenxxxx);
return OZZ_SSE_SELECT_F(cond, _safe, cfalse);
}
OZZ_INLINE SimdFloat4 Lerp(_SimdFloat4 _a, _SimdFloat4 _b, _SimdFloat4 _alpha) {
return OZZ_MADD(_alpha, _mm_sub_ps(_b, _a), _a);
}
OZZ_INLINE SimdFloat4 Min(_SimdFloat4 _a, _SimdFloat4 _b) {
return _mm_min_ps(_a, _b);
}
OZZ_INLINE SimdFloat4 Max(_SimdFloat4 _a, _SimdFloat4 _b) {
return _mm_max_ps(_a, _b);
}
OZZ_INLINE SimdFloat4 Min0(_SimdFloat4 _v) {
return _mm_min_ps(_mm_setzero_ps(), _v);
}
OZZ_INLINE SimdFloat4 Max0(_SimdFloat4 _v) {
return _mm_max_ps(_mm_setzero_ps(), _v);
}
OZZ_INLINE SimdFloat4 Clamp(_SimdFloat4 _a, _SimdFloat4 _v, _SimdFloat4 _b) {
return _mm_max_ps(_a, _mm_min_ps(_v, _b));
}
OZZ_INLINE SimdFloat4 Select(_SimdInt4 _b, _SimdFloat4 _true,
_SimdFloat4 _false) {
return OZZ_SSE_SELECT_F(_b, _true, _false);
}
OZZ_INLINE SimdInt4 CmpEq(_SimdFloat4 _a, _SimdFloat4 _b) {
return _mm_castps_si128(_mm_cmpeq_ps(_a, _b));
}
OZZ_INLINE SimdInt4 CmpNe(_SimdFloat4 _a, _SimdFloat4 _b) {
return _mm_castps_si128(_mm_cmpneq_ps(_a, _b));
}
OZZ_INLINE SimdInt4 CmpLt(_SimdFloat4 _a, _SimdFloat4 _b) {
return _mm_castps_si128(_mm_cmplt_ps(_a, _b));
}
OZZ_INLINE SimdInt4 CmpLe(_SimdFloat4 _a, _SimdFloat4 _b) {
return _mm_castps_si128(_mm_cmple_ps(_a, _b));
}
OZZ_INLINE SimdInt4 CmpGt(_SimdFloat4 _a, _SimdFloat4 _b) {
return _mm_castps_si128(_mm_cmpgt_ps(_a, _b));
}
OZZ_INLINE SimdInt4 CmpGe(_SimdFloat4 _a, _SimdFloat4 _b) {
return _mm_castps_si128(_mm_cmpge_ps(_a, _b));
}
OZZ_INLINE SimdFloat4 And(_SimdFloat4 _a, _SimdFloat4 _b) {
return _mm_and_ps(_a, _b);
}
OZZ_INLINE SimdFloat4 Or(_SimdFloat4 _a, _SimdFloat4 _b) {
return _mm_or_ps(_a, _b);
}
OZZ_INLINE SimdFloat4 Xor(_SimdFloat4 _a, _SimdFloat4 _b) {
return _mm_xor_ps(_a, _b);
}
OZZ_INLINE SimdFloat4 And(_SimdFloat4 _a, _SimdInt4 _b) {
return _mm_and_ps(_a, _mm_castsi128_ps(_b));
}
OZZ_INLINE SimdFloat4 AndNot(_SimdFloat4 _a, _SimdInt4 _b) {
return _mm_andnot_ps(_mm_castsi128_ps(_b), _a);
}
OZZ_INLINE SimdFloat4 Or(_SimdFloat4 _a, _SimdInt4 _b) {
return _mm_or_ps(_a, _mm_castsi128_ps(_b));
}
OZZ_INLINE SimdFloat4 Xor(_SimdFloat4 _a, _SimdInt4 _b) {
return _mm_xor_ps(_a, _mm_castsi128_ps(_b));
}
OZZ_INLINE SimdFloat4 Cos(_SimdFloat4 _v) {
return _mm_set_ps(std::cos(GetW(_v)), std::cos(GetZ(_v)), std::cos(GetY(_v)),
std::cos(GetX(_v)));
}
OZZ_INLINE SimdFloat4 CosX(_SimdFloat4 _v) {
return _mm_move_ss(_v, _mm_set_ps1(std::cos(GetX(_v))));
}
OZZ_INLINE SimdFloat4 ACos(_SimdFloat4 _v) {
return _mm_set_ps(std::acos(GetW(_v)), std::acos(GetZ(_v)),
std::acos(GetY(_v)), std::acos(GetX(_v)));
}
OZZ_INLINE SimdFloat4 ACosX(_SimdFloat4 _v) {
return _mm_move_ss(_v, _mm_set_ps1(std::acos(GetX(_v))));
}
OZZ_INLINE SimdFloat4 Sin(_SimdFloat4 _v) {
return _mm_set_ps(std::sin(GetW(_v)), std::sin(GetZ(_v)), std::sin(GetY(_v)),
std::sin(GetX(_v)));
}
OZZ_INLINE SimdFloat4 SinX(_SimdFloat4 _v) {
return _mm_move_ss(_v, _mm_set_ps1(std::sin(GetX(_v))));
}
OZZ_INLINE SimdFloat4 ASin(_SimdFloat4 _v) {
return _mm_set_ps(std::asin(GetW(_v)), std::asin(GetZ(_v)),
std::asin(GetY(_v)), std::asin(GetX(_v)));
}
OZZ_INLINE SimdFloat4 ASinX(_SimdFloat4 _v) {
return _mm_move_ss(_v, _mm_set_ps1(std::asin(GetX(_v))));
}
OZZ_INLINE SimdFloat4 Tan(_SimdFloat4 _v) {
return _mm_set_ps(std::tan(GetW(_v)), std::tan(GetZ(_v)), std::tan(GetY(_v)),
std::tan(GetX(_v)));
}
OZZ_INLINE SimdFloat4 TanX(_SimdFloat4 _v) {
return _mm_move_ss(_v, _mm_set_ps1(std::tan(GetX(_v))));
}
OZZ_INLINE SimdFloat4 ATan(_SimdFloat4 _v) {
return _mm_set_ps(std::atan(GetW(_v)), std::atan(GetZ(_v)),
std::atan(GetY(_v)), std::atan(GetX(_v)));
}
OZZ_INLINE SimdFloat4 ATanX(_SimdFloat4 _v) {
return _mm_move_ss(_v, _mm_set_ps1(std::atan(GetX(_v))));
}
namespace simd_int4 {
OZZ_INLINE SimdInt4 zero() { return _mm_setzero_si128(); }
OZZ_INLINE SimdInt4 one() {
const __m128i zero = _mm_setzero_si128();
return _mm_sub_epi32(zero, _mm_cmpeq_epi32(zero, zero));
}
OZZ_INLINE SimdInt4 x_axis() {
const __m128i zero = _mm_setzero_si128();
return _mm_srli_si128(_mm_sub_epi32(zero, _mm_cmpeq_epi32(zero, zero)), 12);
}
OZZ_INLINE SimdInt4 y_axis() {
const __m128i zero = _mm_setzero_si128();
return _mm_slli_si128(
_mm_srli_si128(_mm_sub_epi32(zero, _mm_cmpeq_epi32(zero, zero)), 12), 4);
}
OZZ_INLINE SimdInt4 z_axis() {
const __m128i zero = _mm_setzero_si128();
return _mm_slli_si128(
_mm_srli_si128(_mm_sub_epi32(zero, _mm_cmpeq_epi32(zero, zero)), 12), 8);
}
OZZ_INLINE SimdInt4 w_axis() {
const __m128i zero = _mm_setzero_si128();
return _mm_slli_si128(_mm_sub_epi32(zero, _mm_cmpeq_epi32(zero, zero)), 12);
}
OZZ_INLINE SimdInt4 all_true() {
const __m128i zero = _mm_setzero_si128();
return _mm_cmpeq_epi32(zero, zero);
}
OZZ_INLINE SimdInt4 all_false() { return _mm_setzero_si128(); }
OZZ_INLINE SimdInt4 mask_sign() {
const __m128i zero = _mm_setzero_si128();
return _mm_slli_epi32(_mm_cmpeq_epi32(zero, zero), 31);
}
OZZ_INLINE SimdInt4 mask_sign_xyz() {
const __m128i zero = _mm_setzero_si128();
return _mm_srli_si128(_mm_slli_epi32(_mm_cmpeq_epi32(zero, zero), 31), 4);
}
OZZ_INLINE SimdInt4 mask_sign_w() {
const __m128i zero = _mm_setzero_si128();
return _mm_slli_si128(_mm_slli_epi32(_mm_cmpeq_epi32(zero, zero), 31), 12);
}
OZZ_INLINE SimdInt4 mask_not_sign() {
const __m128i zero = _mm_setzero_si128();
return _mm_srli_epi32(_mm_cmpeq_epi32(zero, zero), 1);
}
OZZ_INLINE SimdInt4 mask_ffff() {
const __m128i zero = _mm_setzero_si128();
return _mm_cmpeq_epi32(zero, zero);
}
OZZ_INLINE SimdInt4 mask_0000() { return _mm_setzero_si128(); }
OZZ_INLINE SimdInt4 mask_fff0() {
const __m128i zero = _mm_setzero_si128();
return _mm_srli_si128(_mm_cmpeq_epi32(zero, zero), 4);
}
OZZ_INLINE SimdInt4 mask_f000() {
const __m128i zero = _mm_setzero_si128();
return _mm_srli_si128(_mm_cmpeq_epi32(zero, zero), 12);
}
OZZ_INLINE SimdInt4 mask_0f00() {
const __m128i zero = _mm_setzero_si128();
return _mm_srli_si128(_mm_slli_si128(_mm_cmpeq_epi32(zero, zero), 12), 8);
}
OZZ_INLINE SimdInt4 mask_00f0() {
const __m128i zero = _mm_setzero_si128();
return _mm_srli_si128(_mm_slli_si128(_mm_cmpeq_epi32(zero, zero), 12), 4);
}
OZZ_INLINE SimdInt4 mask_000f() {
const __m128i zero = _mm_setzero_si128();
return _mm_slli_si128(_mm_cmpeq_epi32(zero, zero), 12);
}
OZZ_INLINE SimdInt4 Load(int _x, int _y, int _z, int _w) {
return _mm_set_epi32(_w, _z, _y, _x);
}
OZZ_INLINE SimdInt4 LoadX(int _x) { return _mm_set_epi32(0, 0, 0, _x); }
OZZ_INLINE SimdInt4 Load1(int _x) { return _mm_set1_epi32(_x); }
OZZ_INLINE SimdInt4 Load(bool _x, bool _y, bool _z, bool _w) {
return _mm_sub_epi32(_mm_setzero_si128(), _mm_set_epi32(_w, _z, _y, _x));
}
OZZ_INLINE SimdInt4 LoadX(bool _x) {
return _mm_sub_epi32(_mm_setzero_si128(), _mm_set_epi32(0, 0, 0, _x));
}
OZZ_INLINE SimdInt4 Load1(bool _x) {
return _mm_sub_epi32(_mm_setzero_si128(), _mm_set1_epi32(_x));
}
OZZ_INLINE SimdInt4 LoadPtr(const int* _i) {
assert(!(uintptr_t(_i) & 0xf) && "Invalid alignment");
return _mm_load_si128(reinterpret_cast<const __m128i*>(_i));
}
OZZ_INLINE SimdInt4 LoadXPtr(const int* _i) {
assert(!(uintptr_t(_i) & 0xf) && "Invalid alignment");
return _mm_cvtsi32_si128(*_i);
}
OZZ_INLINE SimdInt4 Load1Ptr(const int* _i) {
assert(!(uintptr_t(_i) & 0xf) && "Invalid alignment");
return _mm_shuffle_epi32(
_mm_loadl_epi64(reinterpret_cast<const __m128i*>(_i)),
_MM_SHUFFLE(0, 0, 0, 0));
}
OZZ_INLINE SimdInt4 Load2Ptr(const int* _i) {
assert(!(uintptr_t(_i) & 0xf) && "Invalid alignment");
return _mm_loadl_epi64(reinterpret_cast<const __m128i*>(_i));
}
OZZ_INLINE SimdInt4 Load3Ptr(const int* _i) {
assert(!(uintptr_t(_i) & 0xf) && "Invalid alignment");
return _mm_set_epi32(0, _i[2], _i[1], _i[0]);
}
OZZ_INLINE SimdInt4 LoadPtrU(const int* _i) {
assert(!(uintptr_t(_i) & 0x3) && "Invalid alignment");
return _mm_loadu_si128(reinterpret_cast<const __m128i*>(_i));
}
OZZ_INLINE SimdInt4 LoadXPtrU(const int* _i) {
assert(!(uintptr_t(_i) & 0x3) && "Invalid alignment");
return _mm_cvtsi32_si128(*_i);
}
OZZ_INLINE SimdInt4 Load1PtrU(const int* _i) {
assert(!(uintptr_t(_i) & 0x3) && "Invalid alignment");
return _mm_set1_epi32(*_i);
}
OZZ_INLINE SimdInt4 Load2PtrU(const int* _i) {
assert(!(uintptr_t(_i) & 0x3) && "Invalid alignment");
return _mm_set_epi32(0, 0, _i[1], _i[0]);
}
OZZ_INLINE SimdInt4 Load3PtrU(const int* _i) {
assert(!(uintptr_t(_i) & 0x3) && "Invalid alignment");
return _mm_set_epi32(0, _i[2], _i[1], _i[0]);
}
OZZ_INLINE SimdInt4 FromFloatRound(_SimdFloat4 _f) {
return _mm_cvtps_epi32(_f);
}
OZZ_INLINE SimdInt4 FromFloatTrunc(_SimdFloat4 _f) {
return _mm_cvttps_epi32(_f);
}
} // namespace simd_int4
OZZ_INLINE int GetX(_SimdInt4 _v) { return _mm_cvtsi128_si32(_v); }
OZZ_INLINE int GetY(_SimdInt4 _v) {
return _mm_cvtsi128_si32(OZZ_SSE_SPLAT_I(_v, 1));
}
OZZ_INLINE int GetZ(_SimdInt4 _v) {
return _mm_cvtsi128_si32(_mm_unpackhi_epi32(_v, _v));
}
OZZ_INLINE int GetW(_SimdInt4 _v) {
return _mm_cvtsi128_si32(OZZ_SSE_SPLAT_I(_v, 3));
}
OZZ_INLINE SimdInt4 SetX(_SimdInt4 _v, _SimdInt4 _i) {
return _mm_castps_si128(
_mm_move_ss(_mm_castsi128_ps(_v), _mm_castsi128_ps(_i)));
}
OZZ_INLINE SimdInt4 SetY(_SimdInt4 _v, _SimdInt4 _i) {
const __m128 xfnn = _mm_castsi128_ps(_mm_unpacklo_epi32(_v, _i));
return _mm_castps_si128(
_mm_shuffle_ps(xfnn, _mm_castsi128_ps(_v), _MM_SHUFFLE(3, 2, 1, 0)));
}
OZZ_INLINE SimdInt4 SetZ(_SimdInt4 _v, _SimdInt4 _i) {
const __m128 ffww = _mm_shuffle_ps(_mm_castsi128_ps(_i), _mm_castsi128_ps(_v),
_MM_SHUFFLE(3, 3, 0, 0));
return _mm_castps_si128(
_mm_shuffle_ps(_mm_castsi128_ps(_v), ffww, _MM_SHUFFLE(2, 0, 1, 0)));
}
OZZ_INLINE SimdInt4 SetW(_SimdInt4 _v, _SimdInt4 _i) {
const __m128 ffzz = _mm_shuffle_ps(_mm_castsi128_ps(_i), _mm_castsi128_ps(_v),
_MM_SHUFFLE(2, 2, 0, 0));
return _mm_castps_si128(
_mm_shuffle_ps(_mm_castsi128_ps(_v), ffzz, _MM_SHUFFLE(0, 2, 1, 0)));
}
OZZ_INLINE SimdInt4 SetI(_SimdInt4 _v, _SimdInt4 _i, int _ith) {
assert(_ith >= 0 && _ith <= 3 && "Invalid index, out of range.");
union {
SimdInt4 ret;
int af[4];
} u = {_v};
u.af[_ith] = GetX(_i);
return u.ret;
}
OZZ_INLINE void StorePtr(_SimdInt4 _v, int* _i) {
assert(!(uintptr_t(_i) & 0xf) && "Invalid alignment");
_mm_store_si128(reinterpret_cast<__m128i*>(_i), _v);
}
OZZ_INLINE void Store1Ptr(_SimdInt4 _v, int* _i) {
assert(!(uintptr_t(_i) & 0xf) && "Invalid alignment");
*_i = _mm_cvtsi128_si32(_v);
}
OZZ_INLINE void Store2Ptr(_SimdInt4 _v, int* _i) {
assert(!(uintptr_t(_i) & 0xf) && "Invalid alignment");
_i[0] = _mm_cvtsi128_si32(_v);
_i[1] = _mm_cvtsi128_si32(OZZ_SSE_SPLAT_I(_v, 1));
}
OZZ_INLINE void Store3Ptr(_SimdInt4 _v, int* _i) {
assert(!(uintptr_t(_i) & 0xf) && "Invalid alignment");
_i[0] = _mm_cvtsi128_si32(_v);
_i[1] = _mm_cvtsi128_si32(OZZ_SSE_SPLAT_I(_v, 1));
_i[2] = _mm_cvtsi128_si32(_mm_unpackhi_epi32(_v, _v));
}
OZZ_INLINE void StorePtrU(_SimdInt4 _v, int* _i) {
assert(!(uintptr_t(_i) & 0x3) && "Invalid alignment");
_mm_storeu_si128(reinterpret_cast<__m128i*>(_i), _v);
}
OZZ_INLINE void Store1PtrU(_SimdInt4 _v, int* _i) {
assert(!(uintptr_t(_i) & 0x3) && "Invalid alignment");
*_i = _mm_cvtsi128_si32(_v);
}
OZZ_INLINE void Store2PtrU(_SimdInt4 _v, int* _i) {
assert(!(uintptr_t(_i) & 0x3) && "Invalid alignment");
_i[0] = _mm_cvtsi128_si32(_v);
_i[1] = _mm_cvtsi128_si32(OZZ_SSE_SPLAT_I(_v, 1));
}
OZZ_INLINE void Store3PtrU(_SimdInt4 _v, int* _i) {
assert(!(uintptr_t(_i) & 0x3) && "Invalid alignment");
_i[0] = _mm_cvtsi128_si32(_v);
_i[1] = _mm_cvtsi128_si32(OZZ_SSE_SPLAT_I(_v, 1));
_i[2] = _mm_cvtsi128_si32(_mm_unpackhi_epi32(_v, _v));
}
OZZ_INLINE SimdInt4 SplatX(_SimdInt4 _a) { return OZZ_SSE_SPLAT_I(_a, 0); }
OZZ_INLINE SimdInt4 SplatY(_SimdInt4 _a) { return OZZ_SSE_SPLAT_I(_a, 1); }
OZZ_INLINE SimdInt4 SplatZ(_SimdInt4 _a) { return OZZ_SSE_SPLAT_I(_a, 2); }
OZZ_INLINE SimdInt4 SplatW(_SimdInt4 _a) { return OZZ_SSE_SPLAT_I(_a, 3); }
template <size_t _X, size_t _Y, size_t _Z, size_t _W>
OZZ_INLINE SimdInt4 Swizzle(_SimdInt4 _v) {
static_assert(_X <= 3 && _Y <= 3 && _Z <= 3 && _W <= 3,
"Indices must be between 0 and 3");
return _mm_shuffle_epi32(_v, _MM_SHUFFLE(_W, _Z, _Y, _X));
}
template <>
OZZ_INLINE SimdInt4 Swizzle<0, 1, 2, 3>(_SimdInt4 _v) {
return _v;
}
OZZ_INLINE int MoveMask(_SimdInt4 _v) {
return _mm_movemask_ps(_mm_castsi128_ps(_v));
}
OZZ_INLINE bool AreAllTrue(_SimdInt4 _v) {
return _mm_movemask_ps(_mm_castsi128_ps(_v)) == 0xf;
}
OZZ_INLINE bool AreAllTrue3(_SimdInt4 _v) {
return (_mm_movemask_ps(_mm_castsi128_ps(_v)) & 0x7) == 0x7;
}
OZZ_INLINE bool AreAllTrue2(_SimdInt4 _v) {
return (_mm_movemask_ps(_mm_castsi128_ps(_v)) & 0x3) == 0x3;
}
OZZ_INLINE bool AreAllTrue1(_SimdInt4 _v) {
return (_mm_movemask_ps(_mm_castsi128_ps(_v)) & 0x1) == 0x1;
}
OZZ_INLINE bool AreAllFalse(_SimdInt4 _v) {
return _mm_movemask_ps(_mm_castsi128_ps(_v)) == 0;
}
OZZ_INLINE bool AreAllFalse3(_SimdInt4 _v) {
return (_mm_movemask_ps(_mm_castsi128_ps(_v)) & 0x7) == 0;
}
OZZ_INLINE bool AreAllFalse2(_SimdInt4 _v) {
return (_mm_movemask_ps(_mm_castsi128_ps(_v)) & 0x3) == 0;
}
OZZ_INLINE bool AreAllFalse1(_SimdInt4 _v) {
return (_mm_movemask_ps(_mm_castsi128_ps(_v)) & 0x1) == 0;
}
OZZ_INLINE SimdInt4 HAdd2(_SimdInt4 _v) {
const __m128i hadd = _mm_add_epi32(_v, OZZ_SSE_SPLAT_I(_v, 1));
return _mm_castps_si128(
_mm_move_ss(_mm_castsi128_ps(_v), _mm_castsi128_ps(hadd)));
}
OZZ_INLINE SimdInt4 HAdd3(_SimdInt4 _v) {
const __m128i hadd = _mm_add_epi32(_mm_add_epi32(_v, OZZ_SSE_SPLAT_I(_v, 1)),
_mm_unpackhi_epi32(_v, _v));
return _mm_castps_si128(
_mm_move_ss(_mm_castsi128_ps(_v), _mm_castsi128_ps(hadd)));
}
OZZ_INLINE SimdInt4 HAdd4(_SimdInt4 _v) {
const __m128 v = _mm_castsi128_ps(_v);
const __m128i haddxyzw =
_mm_add_epi32(_v, _mm_castps_si128(_mm_movehl_ps(v, v)));
return _mm_castps_si128(_mm_move_ss(
v,
_mm_castsi128_ps(_mm_add_epi32(haddxyzw, OZZ_SSE_SPLAT_I(haddxyzw, 1)))));
}
OZZ_INLINE SimdInt4 Abs(_SimdInt4 _v) {
#ifdef OZZ_SIMD_SSSE3
return _mm_abs_epi32(_v);
#else // OZZ_SIMD_SSSE3
const __m128i zero = _mm_setzero_si128();
return OZZ_SSE_SELECT_I(_mm_cmplt_epi32(_v, zero), _mm_sub_epi32(zero, _v),
_v);
#endif // OZZ_SIMD_SSSE3
}
OZZ_INLINE SimdInt4 Sign(_SimdInt4 _v) {
return _mm_slli_epi32(_mm_srli_epi32(_v, 31), 31);
}
OZZ_INLINE SimdInt4 Min(_SimdInt4 _a, _SimdInt4 _b) {
#ifdef OZZ_SIMD_SSE4_1
return _mm_min_epi32(_a, _b);
#else // OZZ_SIMD_SSE4_1
return OZZ_SSE_SELECT_I(_mm_cmplt_epi32(_a, _b), _a, _b);
#endif // OZZ_SIMD_SSE4_1
}
OZZ_INLINE SimdInt4 Max(_SimdInt4 _a, _SimdInt4 _b) {
#ifdef OZZ_SIMD_SSE4_1
return _mm_max_epi32(_a, _b);
#else // OZZ_SIMD_SSE4_1
return OZZ_SSE_SELECT_I(_mm_cmpgt_epi32(_a, _b), _a, _b);
#endif // OZZ_SIMD_SSE4_1
}
OZZ_INLINE SimdInt4 Min0(_SimdInt4 _v) {
const __m128i zero = _mm_setzero_si128();
#ifdef OZZ_SIMD_SSE4_1
return _mm_min_epi32(zero, _v);
#else // OZZ_SIMD_SSE4_1
return OZZ_SSE_SELECT_I(_mm_cmplt_epi32(zero, _v), zero, _v);
#endif // OZZ_SIMD_SSE4_1
}
OZZ_INLINE SimdInt4 Max0(_SimdInt4 _v) {
const __m128i zero = _mm_setzero_si128();
#ifdef OZZ_SIMD_SSE4_1
return _mm_max_epi32(zero, _v);
#else // OZZ_SIMD_SSE4_1
return OZZ_SSE_SELECT_I(_mm_cmpgt_epi32(zero, _v), zero, _v);
#endif // OZZ_SIMD_SSE4_1
}
OZZ_INLINE SimdInt4 Clamp(_SimdInt4 _a, _SimdInt4 _v, _SimdInt4 _b) {
#ifdef OZZ_SIMD_SSE4_1
return _mm_min_epi32(_mm_max_epi32(_a, _v), _b);
#else // OZZ_SIMD_SSE4_1
const __m128i min = OZZ_SSE_SELECT_I(_mm_cmplt_epi32(_v, _b), _v, _b);
return OZZ_SSE_SELECT_I(_mm_cmpgt_epi32(_a, min), _a, min);
#endif // OZZ_SIMD_SSE4_1
}
OZZ_INLINE SimdInt4 Select(_SimdInt4 _b, _SimdInt4 _true, _SimdInt4 _false) {
return OZZ_SSE_SELECT_I(_b, _true, _false);
}
OZZ_INLINE SimdInt4 And(_SimdInt4 _a, _SimdInt4 _b) {
return _mm_and_si128(_a, _b);
}
OZZ_INLINE SimdInt4 AndNot(_SimdInt4 _a, _SimdInt4 _b) {
return _mm_andnot_si128(_b, _a);
}
OZZ_INLINE SimdInt4 Or(_SimdInt4 _a, _SimdInt4 _b) {
return _mm_or_si128(_a, _b);
}
OZZ_INLINE SimdInt4 Xor(_SimdInt4 _a, _SimdInt4 _b) {
return _mm_xor_si128(_a, _b);
}
OZZ_INLINE SimdInt4 Not(_SimdInt4 _v) {
return _mm_xor_si128(_v, _mm_cmpeq_epi32(_v, _v));
}
OZZ_INLINE SimdInt4 ShiftL(_SimdInt4 _v, int _bits) {
return _mm_slli_epi32(_v, _bits);
}
OZZ_INLINE SimdInt4 ShiftR(_SimdInt4 _v, int _bits) {
return _mm_srai_epi32(_v, _bits);
}
OZZ_INLINE SimdInt4 ShiftRu(_SimdInt4 _v, int _bits) {
return _mm_srli_epi32(_v, _bits);
}
OZZ_INLINE SimdInt4 CmpEq(_SimdInt4 _a, _SimdInt4 _b) {
return _mm_cmpeq_epi32(_a, _b);
}
OZZ_INLINE SimdInt4 CmpNe(_SimdInt4 _a, _SimdInt4 _b) {
const __m128i eq = _mm_cmpeq_epi32(_a, _b);
return _mm_xor_si128(eq, _mm_cmpeq_epi32(_a, _a));
}
OZZ_INLINE SimdInt4 CmpLt(_SimdInt4 _a, _SimdInt4 _b) {
return _mm_cmpgt_epi32(_b, _a);
}
OZZ_INLINE SimdInt4 CmpLe(_SimdInt4 _a, _SimdInt4 _b) {
const __m128i gt = _mm_cmpgt_epi32(_a, _b);
return _mm_xor_si128(gt, _mm_cmpeq_epi32(_a, _a));
}
OZZ_INLINE SimdInt4 CmpGt(_SimdInt4 _a, _SimdInt4 _b) {
return _mm_cmpgt_epi32(_a, _b);
}
OZZ_INLINE SimdInt4 CmpGe(_SimdInt4 _a, _SimdInt4 _b) {
const __m128i lt = _mm_cmpgt_epi32(_b, _a);
return _mm_xor_si128(lt, _mm_cmpeq_epi32(_a, _a));
}
OZZ_INLINE Float4x4 Float4x4::identity() {
const __m128i zero = _mm_setzero_si128();
const __m128i ffff = _mm_cmpeq_epi32(zero, zero);
const __m128i one = _mm_srli_epi32(_mm_slli_epi32(ffff, 25), 2);
const __m128i x = _mm_srli_si128(one, 12);
const Float4x4 ret = {{_mm_castsi128_ps(x),
_mm_castsi128_ps(_mm_slli_si128(x, 4)),
_mm_castsi128_ps(_mm_slli_si128(x, 8)),
_mm_castsi128_ps(_mm_slli_si128(one, 12))}};
return ret;
}
OZZ_INLINE Float4x4 Transpose(const Float4x4& _m) {
const __m128 tmp0 = _mm_unpacklo_ps(_m.cols[0], _m.cols[2]);
const __m128 tmp1 = _mm_unpacklo_ps(_m.cols[1], _m.cols[3]);
const __m128 tmp2 = _mm_unpackhi_ps(_m.cols[0], _m.cols[2]);
const __m128 tmp3 = _mm_unpackhi_ps(_m.cols[1], _m.cols[3]);
const Float4x4 ret = {
{_mm_unpacklo_ps(tmp0, tmp1), _mm_unpackhi_ps(tmp0, tmp1),
_mm_unpacklo_ps(tmp2, tmp3), _mm_unpackhi_ps(tmp2, tmp3)}};
return ret;
}
inline Float4x4 Invert(const Float4x4& _m, SimdInt4* _invertible) {
const __m128 _t0 =
_mm_shuffle_ps(_m.cols[0], _m.cols[1], _MM_SHUFFLE(1, 0, 1, 0));
const __m128 _t1 =
_mm_shuffle_ps(_m.cols[2], _m.cols[3], _MM_SHUFFLE(1, 0, 1, 0));
const __m128 _t2 =
_mm_shuffle_ps(_m.cols[0], _m.cols[1], _MM_SHUFFLE(3, 2, 3, 2));
const __m128 _t3 =
_mm_shuffle_ps(_m.cols[2], _m.cols[3], _MM_SHUFFLE(3, 2, 3, 2));
const __m128 c0 = _mm_shuffle_ps(_t0, _t1, _MM_SHUFFLE(2, 0, 2, 0));
const __m128 c1 = _mm_shuffle_ps(_t1, _t0, _MM_SHUFFLE(3, 1, 3, 1));
const __m128 c2 = _mm_shuffle_ps(_t2, _t3, _MM_SHUFFLE(2, 0, 2, 0));
const __m128 c3 = _mm_shuffle_ps(_t3, _t2, _MM_SHUFFLE(3, 1, 3, 1));
__m128 minor0, minor1, minor2, minor3, tmp1, tmp2;
tmp1 = _mm_mul_ps(c2, c3);
tmp1 = OZZ_SHUFFLE_PS1(tmp1, 0xB1);
minor0 = _mm_mul_ps(c1, tmp1);
minor1 = _mm_mul_ps(c0, tmp1);
tmp1 = OZZ_SHUFFLE_PS1(tmp1, 0x4E);
minor0 = OZZ_MSUB(c1, tmp1, minor0);
minor1 = OZZ_MSUB(c0, tmp1, minor1);
minor1 = OZZ_SHUFFLE_PS1(minor1, 0x4E);
tmp1 = _mm_mul_ps(c1, c2);
tmp1 = OZZ_SHUFFLE_PS1(tmp1, 0xB1);
minor0 = OZZ_MADD(c3, tmp1, minor0);
minor3 = _mm_mul_ps(c0, tmp1);
tmp1 = OZZ_SHUFFLE_PS1(tmp1, 0x4E);
minor0 = OZZ_NMADD(c3, tmp1, minor0);
minor3 = OZZ_MSUB(c0, tmp1, minor3);
minor3 = OZZ_SHUFFLE_PS1(minor3, 0x4E);
tmp1 = _mm_mul_ps(OZZ_SHUFFLE_PS1(c1, 0x4E), c3);
tmp1 = OZZ_SHUFFLE_PS1(tmp1, 0xB1);
tmp2 = OZZ_SHUFFLE_PS1(c2, 0x4E);
minor0 = OZZ_MADD(tmp2, tmp1, minor0);
minor2 = _mm_mul_ps(c0, tmp1);
tmp1 = OZZ_SHUFFLE_PS1(tmp1, 0x4E);
minor0 = OZZ_NMADD(tmp2, tmp1, minor0);
minor2 = OZZ_MSUB(c0, tmp1, minor2);
minor2 = OZZ_SHUFFLE_PS1(minor2, 0x4E);
tmp1 = _mm_mul_ps(c0, c1);
tmp1 = OZZ_SHUFFLE_PS1(tmp1, 0xB1);
minor2 = OZZ_MADD(c3, tmp1, minor2);
minor3 = OZZ_MSUB(tmp2, tmp1, minor3);
tmp1 = OZZ_SHUFFLE_PS1(tmp1, 0x4E);
minor2 = OZZ_MSUB(c3, tmp1, minor2);
minor3 = OZZ_NMADD(tmp2, tmp1, minor3);
tmp1 = _mm_mul_ps(c0, c3);
tmp1 = OZZ_SHUFFLE_PS1(tmp1, 0xB1);
minor1 = OZZ_NMADD(tmp2, tmp1, minor1);
minor2 = OZZ_MADD(c1, tmp1, minor2);
tmp1 = OZZ_SHUFFLE_PS1(tmp1, 0x4E);
minor1 = OZZ_MADD(tmp2, tmp1, minor1);
minor2 = OZZ_NMADD(c1, tmp1, minor2);
tmp1 = _mm_mul_ps(c0, tmp2);
tmp1 = OZZ_SHUFFLE_PS1(tmp1, 0xB1);
minor1 = OZZ_MADD(c3, tmp1, minor1);
minor3 = OZZ_NMADD(c1, tmp1, minor3);
tmp1 = OZZ_SHUFFLE_PS1(tmp1, 0x4E);
minor1 = OZZ_NMADD(c3, tmp1, minor1);
minor3 = OZZ_MADD(c1, tmp1, minor3);
__m128 det;
det = _mm_mul_ps(c0, minor0);
det = _mm_add_ps(OZZ_SHUFFLE_PS1(det, 0x4E), det);
det = _mm_add_ss(OZZ_SHUFFLE_PS1(det, 0xB1), det);
const SimdInt4 invertible = CmpNe(det, simd_float4::zero());
assert((_invertible || AreAllTrue1(invertible)) &&
"Matrix is not invertible");
if (_invertible != nullptr) {
*_invertible = invertible;
}
tmp1 = OZZ_SSE_SELECT_F(invertible, RcpEstNR(det), simd_float4::zero());
det = OZZ_NMADDX(det, _mm_mul_ss(tmp1, tmp1), _mm_add_ss(tmp1, tmp1));
det = OZZ_SHUFFLE_PS1(det, 0x00);
// Copy the final columns
const Float4x4 ret = {{_mm_mul_ps(det, minor0), _mm_mul_ps(det, minor1),
_mm_mul_ps(det, minor2), _mm_mul_ps(det, minor3)}};
return ret;
}
Float4x4 Float4x4::Translation(_SimdFloat4 _v) {
const __m128i zero = _mm_setzero_si128();
const __m128i ffff = _mm_cmpeq_epi32(zero, zero);
const __m128i mask000f = _mm_slli_si128(ffff, 12);
const __m128i one = _mm_srli_epi32(_mm_slli_epi32(ffff, 25), 2);
const __m128i x = _mm_srli_si128(one, 12);
const Float4x4 ret = {
{_mm_castsi128_ps(x), _mm_castsi128_ps(_mm_slli_si128(x, 4)),
_mm_castsi128_ps(_mm_slli_si128(x, 8)),
OZZ_SSE_SELECT_F(mask000f, _mm_castsi128_ps(one), _v)}};
return ret;
} // math
Float4x4 Float4x4::Scaling(_SimdFloat4 _v) {
const __m128i zero = _mm_setzero_si128();
const __m128i ffff = _mm_cmpeq_epi32(zero, zero);
const __m128i if000 = _mm_srli_si128(ffff, 12);
const __m128i ione = _mm_srli_epi32(_mm_slli_epi32(ffff, 25), 2);
const Float4x4 ret = {
{_mm_and_ps(_v, _mm_castsi128_ps(if000)),
_mm_and_ps(_v, _mm_castsi128_ps(_mm_slli_si128(if000, 4))),
_mm_and_ps(_v, _mm_castsi128_ps(_mm_slli_si128(if000, 8))),
_mm_castsi128_ps(_mm_slli_si128(ione, 12))}};
return ret;
} // math
OZZ_INLINE Float4x4 Translate(const Float4x4& _m, _SimdFloat4 _v) {
const __m128 a01 = OZZ_MADD(_m.cols[0], OZZ_SSE_SPLAT_F(_v, 0),
_mm_mul_ps(_m.cols[1], OZZ_SSE_SPLAT_F(_v, 1)));
const __m128 m3 = OZZ_MADD(_m.cols[2], OZZ_SSE_SPLAT_F(_v, 2), _m.cols[3]);
const Float4x4 ret = {
{_m.cols[0], _m.cols[1], _m.cols[2], _mm_add_ps(a01, m3)}};
return ret;
}
OZZ_INLINE Float4x4 Scale(const Float4x4& _m, _SimdFloat4 _v) {
const Float4x4 ret = {{_mm_mul_ps(_m.cols[0], OZZ_SSE_SPLAT_F(_v, 0)),
_mm_mul_ps(_m.cols[1], OZZ_SSE_SPLAT_F(_v, 1)),
_mm_mul_ps(_m.cols[2], OZZ_SSE_SPLAT_F(_v, 2)),
_m.cols[3]}};
return ret;
}
OZZ_INLINE Float4x4 ColumnMultiply(const Float4x4& _m, _SimdFloat4 _v) {
const Float4x4 ret = {{_mm_mul_ps(_m.cols[0], _v), _mm_mul_ps(_m.cols[1], _v),
_mm_mul_ps(_m.cols[2], _v),
_mm_mul_ps(_m.cols[3], _v)}};
return ret;
}
inline SimdInt4 IsNormalized(const Float4x4& _m) {
const __m128 max = _mm_set_ps1(1.f + kNormalizationToleranceSq);
const __m128 min = _mm_set_ps1(1.f - kNormalizationToleranceSq);
const __m128 tmp0 = _mm_unpacklo_ps(_m.cols[0], _m.cols[2]);
const __m128 tmp1 = _mm_unpacklo_ps(_m.cols[1], _m.cols[3]);
const __m128 tmp2 = _mm_unpackhi_ps(_m.cols[0], _m.cols[2]);
const __m128 tmp3 = _mm_unpackhi_ps(_m.cols[1], _m.cols[3]);
const __m128 row0 = _mm_unpacklo_ps(tmp0, tmp1);
const __m128 row1 = _mm_unpackhi_ps(tmp0, tmp1);
const __m128 row2 = _mm_unpacklo_ps(tmp2, tmp3);
const __m128 dot =
OZZ_MADD(row0, row0, OZZ_MADD(row1, row1, _mm_mul_ps(row2, row2)));
const __m128 normalized =
_mm_and_ps(_mm_cmplt_ps(dot, max), _mm_cmpgt_ps(dot, min));
return _mm_castps_si128(
_mm_and_ps(normalized, _mm_castsi128_ps(simd_int4::mask_fff0())));
}
inline SimdInt4 IsNormalizedEst(const Float4x4& _m) {
const __m128 max = _mm_set_ps1(1.f + kNormalizationToleranceEstSq);
const __m128 min = _mm_set_ps1(1.f - kNormalizationToleranceEstSq);
const __m128 tmp0 = _mm_unpacklo_ps(_m.cols[0], _m.cols[2]);
const __m128 tmp1 = _mm_unpacklo_ps(_m.cols[1], _m.cols[3]);
const __m128 tmp2 = _mm_unpackhi_ps(_m.cols[0], _m.cols[2]);
const __m128 tmp3 = _mm_unpackhi_ps(_m.cols[1], _m.cols[3]);
const __m128 row0 = _mm_unpacklo_ps(tmp0, tmp1);
const __m128 row1 = _mm_unpackhi_ps(tmp0, tmp1);
const __m128 row2 = _mm_unpacklo_ps(tmp2, tmp3);
const __m128 dot =
OZZ_MADD(row0, row0, OZZ_MADD(row1, row1, _mm_mul_ps(row2, row2)));
const __m128 normalized =
_mm_and_ps(_mm_cmplt_ps(dot, max), _mm_cmpgt_ps(dot, min));
return _mm_castps_si128(
_mm_and_ps(normalized, _mm_castsi128_ps(simd_int4::mask_fff0())));
}
OZZ_INLINE SimdInt4 IsOrthogonal(const Float4x4& _m) {
const __m128 max = _mm_set_ss(1.f + kNormalizationToleranceSq);
const __m128 min = _mm_set_ss(1.f - kNormalizationToleranceSq);
const __m128 zero = _mm_setzero_ps();
// Use simd_float4::zero() if one of the normalization fails. _m will then be
// considered not orthogonal.
const SimdFloat4 cross = NormalizeSafe3(Cross3(_m.cols[0], _m.cols[1]), zero);
const SimdFloat4 at = NormalizeSafe3(_m.cols[2], zero);
SimdFloat4 dot;
OZZ_SSE_DOT3_F(cross, at, dot);
__m128 dotx000 = _mm_move_ss(zero, dot);
return _mm_castps_si128(
_mm_and_ps(_mm_cmplt_ss(dotx000, max), _mm_cmpgt_ss(dotx000, min)));
}
inline SimdFloat4 ToQuaternion(const Float4x4& _m) {
assert(AreAllTrue3(IsNormalizedEst(_m)));
assert(AreAllTrue1(IsOrthogonal(_m)));
// Prepares constants.
const __m128i zero = _mm_setzero_si128();
const __m128i ffff = _mm_cmpeq_epi32(zero, zero);
const __m128 half = _mm_set1_ps(0.5f);
const __m128i mask_f000 = _mm_srli_si128(ffff, 12);
const __m128i mask_000f = _mm_slli_si128(ffff, 12);
const __m128 one =
_mm_castsi128_ps(_mm_srli_epi32(_mm_slli_epi32(ffff, 25), 2));
const __m128i mask_0f00 = _mm_slli_si128(mask_f000, 4);
const __m128i mask_00f0 = _mm_slli_si128(mask_f000, 8);
const __m128 xx_yy = OZZ_SSE_SELECT_F(mask_0f00, _m.cols[1], _m.cols[0]);
const __m128 xx_yy_0010 = OZZ_SHUFFLE_PS1(xx_yy, _MM_SHUFFLE(0, 0, 1, 0));
const __m128 xx_yy_zz_xx =
OZZ_SSE_SELECT_F(mask_00f0, _m.cols[2], xx_yy_0010);
const __m128 yy_zz_xx_yy =
OZZ_SHUFFLE_PS1(xx_yy_zz_xx, _MM_SHUFFLE(1, 0, 2, 1));
const __m128 zz_xx_yy_zz =
OZZ_SHUFFLE_PS1(xx_yy_zz_xx, _MM_SHUFFLE(2, 1, 0, 2));
const __m128 diag_sum =
_mm_add_ps(_mm_add_ps(xx_yy_zz_xx, yy_zz_xx_yy), zz_xx_yy_zz);
const __m128 diag_diff =
_mm_sub_ps(_mm_sub_ps(xx_yy_zz_xx, yy_zz_xx_yy), zz_xx_yy_zz);
const __m128 radicand =
_mm_add_ps(OZZ_SSE_SELECT_F(mask_000f, diag_sum, diag_diff), one);
const __m128 invSqrt = one / _mm_sqrt_ps(radicand);
__m128 zy_xz_yx = OZZ_SSE_SELECT_F(mask_00f0, _m.cols[1], _m.cols[0]);
zy_xz_yx = OZZ_SHUFFLE_PS1(zy_xz_yx, _MM_SHUFFLE(0, 1, 2, 2));
zy_xz_yx =
OZZ_SSE_SELECT_F(mask_0f00, OZZ_SSE_SPLAT_F(_m.cols[2], 0), zy_xz_yx);
__m128 yz_zx_xy = OZZ_SSE_SELECT_F(mask_f000, _m.cols[1], _m.cols[0]);
yz_zx_xy = OZZ_SHUFFLE_PS1(yz_zx_xy, _MM_SHUFFLE(0, 0, 2, 0));
yz_zx_xy =
OZZ_SSE_SELECT_F(mask_f000, OZZ_SSE_SPLAT_F(_m.cols[2], 1), yz_zx_xy);
const __m128 sum = _mm_add_ps(zy_xz_yx, yz_zx_xy);
const __m128 diff = _mm_sub_ps(zy_xz_yx, yz_zx_xy);
const __m128 scale = _mm_mul_ps(invSqrt, half);
const __m128 sum0 = OZZ_SHUFFLE_PS1(sum, _MM_SHUFFLE(0, 1, 2, 0));
const __m128 sum1 = OZZ_SHUFFLE_PS1(sum, _MM_SHUFFLE(0, 0, 0, 2));
const __m128 sum2 = OZZ_SHUFFLE_PS1(sum, _MM_SHUFFLE(0, 0, 0, 1));
__m128 res0 = OZZ_SSE_SELECT_F(mask_000f, OZZ_SSE_SPLAT_F(diff, 0), sum0);
__m128 res1 = OZZ_SSE_SELECT_F(mask_000f, OZZ_SSE_SPLAT_F(diff, 1), sum1);
__m128 res2 = OZZ_SSE_SELECT_F(mask_000f, OZZ_SSE_SPLAT_F(diff, 2), sum2);
res0 = _mm_mul_ps(OZZ_SSE_SELECT_F(mask_f000, radicand, res0),
OZZ_SSE_SPLAT_F(scale, 0));
res1 = _mm_mul_ps(OZZ_SSE_SELECT_F(mask_0f00, radicand, res1),
OZZ_SSE_SPLAT_F(scale, 1));
res2 = _mm_mul_ps(OZZ_SSE_SELECT_F(mask_00f0, radicand, res2),
OZZ_SSE_SPLAT_F(scale, 2));
__m128 res3 = _mm_mul_ps(OZZ_SSE_SELECT_F(mask_000f, radicand, diff),
OZZ_SSE_SPLAT_F(scale, 3));
const __m128 xx = OZZ_SSE_SPLAT_F(_m.cols[0], 0);
const __m128 yy = OZZ_SSE_SPLAT_F(_m.cols[1], 1);
const __m128 zz = OZZ_SSE_SPLAT_F(_m.cols[2], 2);
const __m128i cond0 = _mm_castps_si128(_mm_cmpgt_ps(yy, xx));
const __m128i cond1 =
_mm_castps_si128(_mm_and_ps(_mm_cmpgt_ps(zz, xx), _mm_cmpgt_ps(zz, yy)));
const __m128i cond2 = _mm_castps_si128(
_mm_cmpgt_ps(OZZ_SSE_SPLAT_F(diag_sum, 0), _mm_castsi128_ps(zero)));
__m128 res = OZZ_SSE_SELECT_F(cond0, res1, res0);
res = OZZ_SSE_SELECT_F(cond1, res2, res);
res = OZZ_SSE_SELECT_F(cond2, res3, res);
assert(AreAllTrue1(IsNormalizedEst4(res)));
return res;
}
inline bool ToAffine(const Float4x4& _m, SimdFloat4* _translation,
SimdFloat4* _quaternion, SimdFloat4* _scale) {
const __m128 zero = _mm_setzero_ps();
const __m128 one = simd_float4::one();
const __m128i fff0 = simd_int4::mask_fff0();
const __m128 max = _mm_set_ps1(kOrthogonalisationToleranceSq);
const __m128 min = _mm_set_ps1(-kOrthogonalisationToleranceSq);
// Extracts translation.
*_translation = OZZ_SSE_SELECT_F(fff0, _m.cols[3], one);
// Extracts scale.
const __m128 m_tmp0 = _mm_unpacklo_ps(_m.cols[0], _m.cols[2]);
const __m128 m_tmp1 = _mm_unpacklo_ps(_m.cols[1], _m.cols[3]);
const __m128 m_tmp2 = _mm_unpackhi_ps(_m.cols[0], _m.cols[2]);
const __m128 m_tmp3 = _mm_unpackhi_ps(_m.cols[1], _m.cols[3]);
const __m128 m_row0 = _mm_unpacklo_ps(m_tmp0, m_tmp1);
const __m128 m_row1 = _mm_unpackhi_ps(m_tmp0, m_tmp1);
const __m128 m_row2 = _mm_unpacklo_ps(m_tmp2, m_tmp3);
const __m128 dot = OZZ_MADD(
m_row0, m_row0, OZZ_MADD(m_row1, m_row1, _mm_mul_ps(m_row2, m_row2)));
const __m128 abs_scale = _mm_sqrt_ps(dot);
const __m128 zero_axis =
_mm_and_ps(_mm_cmplt_ps(dot, max), _mm_cmpgt_ps(dot, min));
// Builds an orthonormal matrix in order to support quaternion extraction.
Float4x4 orthonormal;
int mask = _mm_movemask_ps(zero_axis);
if (mask & 1) {
if (mask & 6) {
return false;
}
orthonormal.cols[1] = _mm_div_ps(_m.cols[1], OZZ_SSE_SPLAT_F(abs_scale, 1));
orthonormal.cols[0] = Normalize3(Cross3(orthonormal.cols[1], _m.cols[2]));
orthonormal.cols[2] =
Normalize3(Cross3(orthonormal.cols[0], orthonormal.cols[1]));
} else if (mask & 4) {
if (mask & 3) {
return false;
}
orthonormal.cols[0] = _mm_div_ps(_m.cols[0], OZZ_SSE_SPLAT_F(abs_scale, 0));
orthonormal.cols[2] = Normalize3(Cross3(orthonormal.cols[0], _m.cols[1]));
orthonormal.cols[1] =
Normalize3(Cross3(orthonormal.cols[2], orthonormal.cols[0]));
} else { // Favor z axis in the default case
if (mask & 5) {
return false;
}
orthonormal.cols[2] = _mm_div_ps(_m.cols[2], OZZ_SSE_SPLAT_F(abs_scale, 2));
orthonormal.cols[1] = Normalize3(Cross3(orthonormal.cols[2], _m.cols[0]));
orthonormal.cols[0] =
Normalize3(Cross3(orthonormal.cols[1], orthonormal.cols[2]));
}
orthonormal.cols[3] = simd_float4::w_axis();
// Get back scale signs in case of reflexions
const __m128 o_tmp0 =
_mm_unpacklo_ps(orthonormal.cols[0], orthonormal.cols[2]);
const __m128 o_tmp1 =
_mm_unpacklo_ps(orthonormal.cols[1], orthonormal.cols[3]);
const __m128 o_tmp2 =
_mm_unpackhi_ps(orthonormal.cols[0], orthonormal.cols[2]);
const __m128 o_tmp3 =
_mm_unpackhi_ps(orthonormal.cols[1], orthonormal.cols[3]);
const __m128 o_row0 = _mm_unpacklo_ps(o_tmp0, o_tmp1);
const __m128 o_row1 = _mm_unpackhi_ps(o_tmp0, o_tmp1);
const __m128 o_row2 = _mm_unpacklo_ps(o_tmp2, o_tmp3);
const __m128 scale_dot = OZZ_MADD(
o_row0, m_row0, OZZ_MADD(o_row1, m_row1, _mm_mul_ps(o_row2, m_row2)));
const __m128i cond = _mm_castps_si128(_mm_cmpgt_ps(scale_dot, zero));
const __m128 cfalse = _mm_sub_ps(zero, abs_scale);
const __m128 scale = OZZ_SSE_SELECT_F(cond, abs_scale, cfalse);
*_scale = OZZ_SSE_SELECT_F(fff0, scale, one);
// Extracts quaternion.
*_quaternion = ToQuaternion(orthonormal);
return true;
}
inline Float4x4 Float4x4::FromEuler(_SimdFloat4 _v) {
const __m128 cos = Cos(_v);
const __m128 sin = Sin(_v);
const float cx = GetX(cos);
const float sx = GetX(sin);
const float cy = GetY(cos);
const float sy = GetY(sin);
const float cz = GetZ(cos);
const float sz = GetZ(sin);
const float sycz = sy * cz;
const float sysz = sy * sz;
const Float4x4 ret = {{simd_float4::Load(cx * cy, sx * sz - cx * sycz,
cx * sysz + sx * cz, 0.f),
simd_float4::Load(sy, cy * cz, -cy * sz, 0.f),
simd_float4::Load(-sx * cy, sx * sycz + cx * sz,
-sx * sysz + cx * cz, 0.f),
simd_float4::w_axis()}};
return ret;
}
inline Float4x4 Float4x4::FromAxisAngle(_SimdFloat4 _axis, _SimdFloat4 _angle) {
assert(AreAllTrue1(IsNormalizedEst3(_axis)));
const __m128i zero = _mm_setzero_si128();
const __m128i ffff = _mm_cmpeq_epi32(zero, zero);
const __m128i ione = _mm_srli_epi32(_mm_slli_epi32(ffff, 25), 2);
const __m128 fff0 = _mm_castsi128_ps(_mm_srli_si128(ffff, 4));
const __m128 one = _mm_castsi128_ps(ione);
const __m128 w_axis = _mm_castsi128_ps(_mm_slli_si128(ione, 12));
const __m128 sin = SplatX(SinX(_angle));
const __m128 cos = SplatX(CosX(_angle));
const __m128 one_minus_cos = _mm_sub_ps(one, cos);
const __m128 v0 =
_mm_mul_ps(_mm_mul_ps(one_minus_cos,
OZZ_SHUFFLE_PS1(_axis, _MM_SHUFFLE(3, 0, 2, 1))),
OZZ_SHUFFLE_PS1(_axis, _MM_SHUFFLE(3, 1, 0, 2)));
const __m128 r0 =
_mm_add_ps(_mm_mul_ps(_mm_mul_ps(one_minus_cos, _axis), _axis), cos);
const __m128 r1 = _mm_add_ps(_mm_mul_ps(sin, _axis), v0);
const __m128 r2 = _mm_sub_ps(v0, _mm_mul_ps(sin, _axis));
const __m128 r0fff0 = _mm_and_ps(r0, fff0);
const __m128 r1r22120 = _mm_shuffle_ps(r1, r2, _MM_SHUFFLE(2, 1, 2, 0));
const __m128 v1 = OZZ_SHUFFLE_PS1(r1r22120, _MM_SHUFFLE(0, 3, 2, 1));
const __m128 r1r20011 = _mm_shuffle_ps(r1, r2, _MM_SHUFFLE(0, 0, 1, 1));
const __m128 v2 = OZZ_SHUFFLE_PS1(r1r20011, _MM_SHUFFLE(2, 0, 2, 0));
const __m128 t0 = _mm_shuffle_ps(r0fff0, v1, _MM_SHUFFLE(1, 0, 3, 0));
const __m128 t1 = _mm_shuffle_ps(r0fff0, v1, _MM_SHUFFLE(3, 2, 3, 1));
const Float4x4 ret = {{OZZ_SHUFFLE_PS1(t0, _MM_SHUFFLE(1, 3, 2, 0)),
OZZ_SHUFFLE_PS1(t1, _MM_SHUFFLE(1, 3, 0, 2)),
_mm_shuffle_ps(v2, r0fff0, _MM_SHUFFLE(3, 2, 1, 0)),
w_axis}};
return ret;
}
inline Float4x4 Float4x4::FromQuaternion(_SimdFloat4 _quaternion) {
assert(AreAllTrue1(IsNormalizedEst4(_quaternion)));
const __m128i zero = _mm_setzero_si128();
const __m128i ffff = _mm_cmpeq_epi32(zero, zero);
const __m128i ione = _mm_srli_epi32(_mm_slli_epi32(ffff, 25), 2);
const __m128 fff0 = _mm_castsi128_ps(_mm_srli_si128(ffff, 4));
const __m128 c1110 = _mm_castsi128_ps(_mm_srli_si128(ione, 4));
const __m128 w_axis = _mm_castsi128_ps(_mm_slli_si128(ione, 12));
const __m128 vsum = _mm_add_ps(_quaternion, _quaternion);
const __m128 vms = _mm_mul_ps(_quaternion, vsum);
const __m128 r0 = _mm_sub_ps(
_mm_sub_ps(
c1110,
_mm_and_ps(OZZ_SHUFFLE_PS1(vms, _MM_SHUFFLE(3, 0, 0, 1)), fff0)),
_mm_and_ps(OZZ_SHUFFLE_PS1(vms, _MM_SHUFFLE(3, 1, 2, 2)), fff0));
const __m128 v0 =
_mm_mul_ps(OZZ_SHUFFLE_PS1(_quaternion, _MM_SHUFFLE(3, 1, 0, 0)),
OZZ_SHUFFLE_PS1(vsum, _MM_SHUFFLE(3, 2, 1, 2)));
const __m128 v1 =
_mm_mul_ps(OZZ_SHUFFLE_PS1(_quaternion, _MM_SHUFFLE(3, 3, 3, 3)),
OZZ_SHUFFLE_PS1(vsum, _MM_SHUFFLE(3, 0, 2, 1)));
const __m128 r1 = _mm_add_ps(v0, v1);
const __m128 r2 = _mm_sub_ps(v0, v1);
const __m128 r1r21021 = _mm_shuffle_ps(r1, r2, _MM_SHUFFLE(1, 0, 2, 1));
const __m128 v2 = OZZ_SHUFFLE_PS1(r1r21021, _MM_SHUFFLE(1, 3, 2, 0));
const __m128 r1r22200 = _mm_shuffle_ps(r1, r2, _MM_SHUFFLE(2, 2, 0, 0));
const __m128 v3 = OZZ_SHUFFLE_PS1(r1r22200, _MM_SHUFFLE(2, 0, 2, 0));
const __m128 q0 = _mm_shuffle_ps(r0, v2, _MM_SHUFFLE(1, 0, 3, 0));
const __m128 q1 = _mm_shuffle_ps(r0, v2, _MM_SHUFFLE(3, 2, 3, 1));
const Float4x4 ret = {{OZZ_SHUFFLE_PS1(q0, _MM_SHUFFLE(1, 3, 2, 0)),
OZZ_SHUFFLE_PS1(q1, _MM_SHUFFLE(1, 3, 0, 2)),
_mm_shuffle_ps(v3, r0, _MM_SHUFFLE(3, 2, 1, 0)),
w_axis}};
return ret;
}
inline Float4x4 Float4x4::FromAffine(_SimdFloat4 _translation,
_SimdFloat4 _quaternion,
_SimdFloat4 _scale) {
assert(AreAllTrue1(IsNormalizedEst4(_quaternion)));
const __m128i zero = _mm_setzero_si128();
const __m128i ffff = _mm_cmpeq_epi32(zero, zero);
const __m128i ione = _mm_srli_epi32(_mm_slli_epi32(ffff, 25), 2);
const __m128 fff0 = _mm_castsi128_ps(_mm_srli_si128(ffff, 4));
const __m128 c1110 = _mm_castsi128_ps(_mm_srli_si128(ione, 4));
const __m128 vsum = _mm_add_ps(_quaternion, _quaternion);
const __m128 vms = _mm_mul_ps(_quaternion, vsum);
const __m128 r0 = _mm_sub_ps(
_mm_sub_ps(
c1110,
_mm_and_ps(OZZ_SHUFFLE_PS1(vms, _MM_SHUFFLE(3, 0, 0, 1)), fff0)),
_mm_and_ps(OZZ_SHUFFLE_PS1(vms, _MM_SHUFFLE(3, 1, 2, 2)), fff0));
const __m128 v0 =
_mm_mul_ps(OZZ_SHUFFLE_PS1(_quaternion, _MM_SHUFFLE(3, 1, 0, 0)),
OZZ_SHUFFLE_PS1(vsum, _MM_SHUFFLE(3, 2, 1, 2)));
const __m128 v1 =
_mm_mul_ps(OZZ_SHUFFLE_PS1(_quaternion, _MM_SHUFFLE(3, 3, 3, 3)),
OZZ_SHUFFLE_PS1(vsum, _MM_SHUFFLE(3, 0, 2, 1)));
const __m128 r1 = _mm_add_ps(v0, v1);
const __m128 r2 = _mm_sub_ps(v0, v1);
const __m128 r1r21021 = _mm_shuffle_ps(r1, r2, _MM_SHUFFLE(1, 0, 2, 1));
const __m128 v2 = OZZ_SHUFFLE_PS1(r1r21021, _MM_SHUFFLE(1, 3, 2, 0));
const __m128 r1r22200 = _mm_shuffle_ps(r1, r2, _MM_SHUFFLE(2, 2, 0, 0));
const __m128 v3 = OZZ_SHUFFLE_PS1(r1r22200, _MM_SHUFFLE(2, 0, 2, 0));
const __m128 q0 = _mm_shuffle_ps(r0, v2, _MM_SHUFFLE(1, 0, 3, 0));
const __m128 q1 = _mm_shuffle_ps(r0, v2, _MM_SHUFFLE(3, 2, 3, 1));
const Float4x4 ret = {
{_mm_mul_ps(OZZ_SHUFFLE_PS1(q0, _MM_SHUFFLE(1, 3, 2, 0)),
OZZ_SSE_SPLAT_F(_scale, 0)),
_mm_mul_ps(OZZ_SHUFFLE_PS1(q1, _MM_SHUFFLE(1, 3, 0, 2)),
OZZ_SSE_SPLAT_F(_scale, 1)),
_mm_mul_ps(_mm_shuffle_ps(v3, r0, _MM_SHUFFLE(3, 2, 1, 0)),
OZZ_SSE_SPLAT_F(_scale, 2)),
_mm_movelh_ps(_translation, _mm_unpackhi_ps(_translation, c1110))}};
return ret;
}
OZZ_INLINE ozz::math::SimdFloat4 TransformPoint(const ozz::math::Float4x4& _m,
ozz::math::_SimdFloat4 _v) {
const __m128 xxxx = _mm_mul_ps(OZZ_SSE_SPLAT_F(_v, 0), _m.cols[0]);
const __m128 a23 = OZZ_MADD(OZZ_SSE_SPLAT_F(_v, 2), _m.cols[2], _m.cols[3]);
const __m128 a01 = OZZ_MADD(OZZ_SSE_SPLAT_F(_v, 1), _m.cols[1], xxxx);
return _mm_add_ps(a01, a23);
}
OZZ_INLINE ozz::math::SimdFloat4 TransformVector(const ozz::math::Float4x4& _m,
ozz::math::_SimdFloat4 _v) {
const __m128 xxxx = _mm_mul_ps(_m.cols[0], OZZ_SSE_SPLAT_F(_v, 0));
const __m128 zzzz = _mm_mul_ps(_m.cols[1], OZZ_SSE_SPLAT_F(_v, 1));
const __m128 a21 = OZZ_MADD(_m.cols[2], OZZ_SSE_SPLAT_F(_v, 2), xxxx);
return _mm_add_ps(zzzz, a21);
}
OZZ_INLINE ozz::math::SimdFloat4 operator*(const ozz::math::Float4x4& _m,
ozz::math::_SimdFloat4 _v) {
const __m128 xxxx = _mm_mul_ps(OZZ_SSE_SPLAT_F(_v, 0), _m.cols[0]);
const __m128 zzzz = _mm_mul_ps(OZZ_SSE_SPLAT_F(_v, 2), _m.cols[2]);
const __m128 a01 = OZZ_MADD(OZZ_SSE_SPLAT_F(_v, 1), _m.cols[1], xxxx);
const __m128 a23 = OZZ_MADD(OZZ_SSE_SPLAT_F(_v, 3), _m.cols[3], zzzz);
return _mm_add_ps(a01, a23);
}
inline ozz::math::Float4x4 operator*(const ozz::math::Float4x4& _a,
const ozz::math::Float4x4& _b) {
ozz::math::Float4x4 ret;
{
const __m128 xxxx = _mm_mul_ps(OZZ_SSE_SPLAT_F(_b.cols[0], 0), _a.cols[0]);
const __m128 zzzz = _mm_mul_ps(OZZ_SSE_SPLAT_F(_b.cols[0], 2), _a.cols[2]);
const __m128 a01 =
OZZ_MADD(OZZ_SSE_SPLAT_F(_b.cols[0], 1), _a.cols[1], xxxx);
const __m128 a23 =
OZZ_MADD(OZZ_SSE_SPLAT_F(_b.cols[0], 3), _a.cols[3], zzzz);
ret.cols[0] = _mm_add_ps(a01, a23);
}
{
const __m128 xxxx = _mm_mul_ps(OZZ_SSE_SPLAT_F(_b.cols[1], 0), _a.cols[0]);
const __m128 zzzz = _mm_mul_ps(OZZ_SSE_SPLAT_F(_b.cols[1], 2), _a.cols[2]);
const __m128 a01 =
OZZ_MADD(OZZ_SSE_SPLAT_F(_b.cols[1], 1), _a.cols[1], xxxx);
const __m128 a23 =
OZZ_MADD(OZZ_SSE_SPLAT_F(_b.cols[1], 3), _a.cols[3], zzzz);
ret.cols[1] = _mm_add_ps(a01, a23);
}
{
const __m128 xxxx = _mm_mul_ps(OZZ_SSE_SPLAT_F(_b.cols[2], 0), _a.cols[0]);
const __m128 zzzz = _mm_mul_ps(OZZ_SSE_SPLAT_F(_b.cols[2], 2), _a.cols[2]);
const __m128 a01 =
OZZ_MADD(OZZ_SSE_SPLAT_F(_b.cols[2], 1), _a.cols[1], xxxx);
const __m128 a23 =
OZZ_MADD(OZZ_SSE_SPLAT_F(_b.cols[2], 3), _a.cols[3], zzzz);
ret.cols[2] = _mm_add_ps(a01, a23);
}
{
const __m128 xxxx = _mm_mul_ps(OZZ_SSE_SPLAT_F(_b.cols[3], 0), _a.cols[0]);
const __m128 zzzz = _mm_mul_ps(OZZ_SSE_SPLAT_F(_b.cols[3], 2), _a.cols[2]);
const __m128 a01 =
OZZ_MADD(OZZ_SSE_SPLAT_F(_b.cols[3], 1), _a.cols[1], xxxx);
const __m128 a23 =
OZZ_MADD(OZZ_SSE_SPLAT_F(_b.cols[3], 3), _a.cols[3], zzzz);
ret.cols[3] = _mm_add_ps(a01, a23);
}
return ret;
}
OZZ_INLINE ozz::math::Float4x4 operator+(const ozz::math::Float4x4& _a,
const ozz::math::Float4x4& _b) {
const ozz::math::Float4x4 ret = {
{_mm_add_ps(_a.cols[0], _b.cols[0]), _mm_add_ps(_a.cols[1], _b.cols[1]),
_mm_add_ps(_a.cols[2], _b.cols[2]), _mm_add_ps(_a.cols[3], _b.cols[3])}};
return ret;
}
OZZ_INLINE ozz::math::Float4x4 operator-(const ozz::math::Float4x4& _a,
const ozz::math::Float4x4& _b) {
const ozz::math::Float4x4 ret = {
{_mm_sub_ps(_a.cols[0], _b.cols[0]), _mm_sub_ps(_a.cols[1], _b.cols[1]),
_mm_sub_ps(_a.cols[2], _b.cols[2]), _mm_sub_ps(_a.cols[3], _b.cols[3])}};
return ret;
}
} // namespace math
} // namespace ozz
#if !defined(OZZ_DISABLE_SSE_NATIVE_OPERATORS)
OZZ_INLINE ozz::math::SimdFloat4 operator+(ozz::math::_SimdFloat4 _a,
ozz::math::_SimdFloat4 _b) {
return _mm_add_ps(_a, _b);
}
OZZ_INLINE ozz::math::SimdFloat4 operator-(ozz::math::_SimdFloat4 _a,
ozz::math::_SimdFloat4 _b) {
return _mm_sub_ps(_a, _b);
}
OZZ_INLINE ozz::math::SimdFloat4 operator-(ozz::math::_SimdFloat4 _v) {
return _mm_sub_ps(_mm_setzero_ps(), _v);
}
OZZ_INLINE ozz::math::SimdFloat4 operator*(ozz::math::_SimdFloat4 _a,
ozz::math::_SimdFloat4 _b) {
return _mm_mul_ps(_a, _b);
}
OZZ_INLINE ozz::math::SimdFloat4 operator/(ozz::math::_SimdFloat4 _a,
ozz::math::_SimdFloat4 _b) {
return _mm_div_ps(_a, _b);
}
#endif // !defined(OZZ_DISABLE_SSE_NATIVE_OPERATORS)
namespace ozz {
namespace math {
OZZ_INLINE uint16_t FloatToHalf(float _f) {
const int h = _mm_cvtsi128_si32(FloatToHalf(_mm_set1_ps(_f)));
return static_cast<uint16_t>(h);
}
OZZ_INLINE float HalfToFloat(uint16_t _h) {
return _mm_cvtss_f32(HalfToFloat(_mm_set1_epi32(_h)));
}
// Half <-> Float implementation is based on:
// http://fgiesen.wordpress.com/2012/03/28/half-to-float-done-quic/.
inline SimdInt4 FloatToHalf(_SimdFloat4 _f) {
const __m128i mask_sign = _mm_set1_epi32(0x80000000u);
const __m128i mask_round = _mm_set1_epi32(~0xfffu);
const __m128i f32infty = _mm_set1_epi32(255 << 23);
const __m128 magic = _mm_castsi128_ps(_mm_set1_epi32(15 << 23));
const __m128i nanbit = _mm_set1_epi32(0x200);
const __m128i infty_as_fp16 = _mm_set1_epi32(0x7c00);
const __m128 clamp = _mm_castsi128_ps(_mm_set1_epi32((31 << 23) - 0x1000));
const __m128 msign = _mm_castsi128_ps(mask_sign);
const __m128 justsign = _mm_and_ps(msign, _f);
const __m128 absf = _mm_xor_ps(_f, justsign);
const __m128 mround = _mm_castsi128_ps(mask_round);
const __m128i absf_int = _mm_castps_si128(absf);
const __m128i b_isnan = _mm_cmpgt_epi32(absf_int, f32infty);
const __m128i b_isnormal = _mm_cmpgt_epi32(f32infty, _mm_castps_si128(absf));
const __m128i inf_or_nan =
_mm_or_si128(_mm_and_si128(b_isnan, nanbit), infty_as_fp16);
const __m128 fnosticky = _mm_and_ps(absf, mround);
const __m128 scaled = _mm_mul_ps(fnosticky, magic);
// Logically, we want PMINSD on "biased", but this should gen better code
const __m128 clamped = _mm_min_ps(scaled, clamp);
const __m128i biased =
_mm_sub_epi32(_mm_castps_si128(clamped), _mm_castps_si128(mround));
const __m128i shifted = _mm_srli_epi32(biased, 13);
const __m128i normal = _mm_and_si128(shifted, b_isnormal);
const __m128i not_normal = _mm_andnot_si128(b_isnormal, inf_or_nan);
const __m128i joined = _mm_or_si128(normal, not_normal);
const __m128i sign_shift = _mm_srli_epi32(_mm_castps_si128(justsign), 16);
return _mm_or_si128(joined, sign_shift);
}
OZZ_INLINE SimdFloat4 HalfToFloat(_SimdInt4 _h) {
const __m128i mask_nosign = _mm_set1_epi32(0x7fff);
const __m128 magic = _mm_castsi128_ps(_mm_set1_epi32((254 - 15) << 23));
const __m128i was_infnan = _mm_set1_epi32(0x7bff);
const __m128 exp_infnan = _mm_castsi128_ps(_mm_set1_epi32(255 << 23));
const __m128i expmant = _mm_and_si128(mask_nosign, _h);
const __m128i shifted = _mm_slli_epi32(expmant, 13);
const __m128 scaled = _mm_mul_ps(_mm_castsi128_ps(shifted), magic);
const __m128i b_wasinfnan = _mm_cmpgt_epi32(expmant, was_infnan);
const __m128i sign = _mm_slli_epi32(_mm_xor_si128(_h, expmant), 16);
const __m128 infnanexp =
_mm_and_ps(_mm_castsi128_ps(b_wasinfnan), exp_infnan);
const __m128 sign_inf = _mm_or_ps(_mm_castsi128_ps(sign), infnanexp);
return _mm_or_ps(scaled, sign_inf);
}
} // namespace math
} // namespace ozz
#undef OZZ_SHUFFLE_PS1
#undef OZZ_SSE_SPLAT_F
#undef OZZ_SSE_HADD2_F
#undef OZZ_SSE_HADD3_F
#undef OZZ_SSE_HADD4_F
#undef OZZ_SSE_DOT2_F
#undef OZZ_SSE_DOT3_F
#undef OZZ_SSE_DOT4_F
#undef OZZ_MADD
#undef OZZ_MSUB
#undef OZZ_NMADD
#undef OZZ_NMSUB
#undef OZZ_MADDX
#undef OZZ_MSUBX
#undef OZZ_NMADDX
#undef OZZ_NMSUBX
#undef OZZ_SSE_SELECT_F
#undef OZZ_SSE_SPLAT_I
#undef OZZ_SSE_SELECT_I
#endif // OZZ_OZZ_BASE_MATHS_INTERNAL_SIMD_MATH_SSE_INL_H_
|
import { fire } from 'simulant';
import { onWarn, initModule } from '../../helpers/test-config';
import { test } from 'qunit';
export default function() {
initModule('events/touch-events.js');
test("touch events safe to include when they don't exist in browser", t => {
t.expect(1);
onWarn(() => {}); // suppress
const ractive = new Ractive({
el: fixture,
template: `
<span id="test1" on-touchstart-touchend-touchleave-touchmove-touchcancel="foo"/>
<span id="test2" on-touchstart-mousedown="foo"/>`
});
ractive.on('foo', () => {
t.ok(true);
});
fire(ractive.find('#test2'), 'mousedown');
});
}
|
import React from 'react';
import {
View,
Text,
Image,
Dimensions,
FlatList,
} from 'react-native';
import { TuyaSceneApi } from '../../../sdk';
import HeadView from '../../common/HeadView'
import BaseComponent from '../../common/BaseComponent'
import CenterItem from '../../common/CenterItem'
const { width } = Dimensions.get('window');
export default class DevicesFunctionPage extends BaseComponent {
constructor(props) {
super(props);
const params = this.props.navigation.state.params;
this.state = {
devId: params.devId,
FunctionList: [],
devName: params.devName,
isFromScene: params.isFromScene,
};
}
componentDidMount() {
TuyaSceneApi.getDeviceConditionOperationList({
devId: this.state.devId,
})
.then((data) => {
this.setState({
FunctionList: data,
});
})
}
_renderFunList(data) {
return (
<View
style={{
width,
height: 50,
alignItems: 'center',
justifyContent: 'center',
flexDirection: 'row',
}}
>
<Image source={{ uri: data.item.iconUrl }} />
<Text style={{ fontSize: 16, color: 'black' }}>{data.item.name}</Text>
</View>
);
}
renderHeaderView() {
return <HeadView
leftOnPress={() => this.props.navigation.pop()}
centerText={'Selection function'}
/>
}
renderContent() {
return (
<View
style={{
backgroundColor: '#F8F8F8',
flexDirection: 'column',
alignItems: 'center',
justifyContent: 'flex-start',
flex: 1,
}}
>
<FlatList
data={this.state.FunctionList}
renderItem={
({ item }) => (
<CenterItem
onPress={() => {
if (item.type == 'bool') {
// 进入开关
this.props.navigation.navigate('ActionBoolPage', {
item,
devId: this.state.devId,
devName: this.state.devName,
isFromScene: this.state.isFromScene,
});
} else {
this.showToast('Other types of UI are not yet complete')
}
}}
text={item.name}
/>
)
}
style={{ width }}
/>
</View>
);
}
}
|
# for the purpose of creating visualizations
# changed to starcraft_dream
import numpy as np
import gym
import os
import json
from scipy.misc import imresize as resize
from scipy.misc import toimage as toimage
from gym.spaces.box import Box
from gym.utils import seeding
SCREEN_X = 64
SCREEN_Y = 64
FACTOR = 8
with open(os.path.join('initial_z', 'initial_z.json'), 'r') as f:
[initial_mu, initial_logvar] = json.load(f)
initial_mu_logvar = [list(elem) for elem in zip(initial_mu, initial_logvar)]
def get_pi_idx(x, pdf):
# samples from a categorial distribution
N = pdf.size
accumulate = 0
for i in range(0, N):
accumulate += pdf[i]
if (accumulate >= x):
return i
print('error with sampling ensemble')
return -1
class StarCraftDream(gym.Env):
metadata = {
'render.modes': ['human', 'rgb_array'],
'video.frames_per_second' : 22.4
}
def __init__(self, agent):
self.observation_space = Box(low=-50., high=50., shape=(32)) # , dtype=np.float32
self._seed()
self.agent = agent
self.vae = agent.net.vae
self.rnn = agent.net.rnn
self.feature_size = self.rnn.hps.output_seq_width
self.non_image_feature_size = 20
self.latent_image_fature_size = 32
self.action_space = 10
self.viewer = None
self.frame_count = None
self.z = None
self.temperature = 0.7
self.vae_frame = None
self.max_frame = 300
self._reset()
def _seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def _sample_z(self, mu, logvar):
z = mu + np.exp(logvar/2.0) * self.np_random.randn(*logvar.shape)
return z
def _reset(self):
idx = self.np_random.randint(0, len(initial_mu_logvar))
init_mu, init_logvar = initial_mu_logvar[idx]
init_mu = np.array(init_mu)/10000.
init_logvar = np.array(init_logvar)/10000.
self.z = self._sample_z(init_mu, init_logvar)
self.frame_count = 0
return self.z
def _sample_next_z(self, action):
s_model = self.rnn
temperature = self.temperature
sess = s_model.sess
hps = s_model.hps
OUTWIDTH = hps.output_seq_width
prev_x = np.zeros((1, 1, OUTWIDTH))
prev_x[0][0] = self.z
strokes = np.zeros((1, OUTWIDTH), dtype=np.float32)
input_x = np.concatenate((prev_x, action.reshape(1, 1, self.action_space)), axis=2)
feed = {s_model.input_x: input_x, s_model.initial_state:self.agent.state}
[logmix, mean, logstd, self.agent.state] = sess.run([s_model.out_logmix, s_model.out_mean, s_model.out_logstd, s_model.final_state], feed)
# adjust temperatures
logmix2 = np.copy(logmix)/temperature
logmix2 -= logmix2.max()
logmix2 = np.exp(logmix2)
logmix2 /= logmix2.sum(axis=1).reshape(OUTWIDTH, 1)
mixture_idx = np.zeros(OUTWIDTH)
chosen_mean = np.zeros(OUTWIDTH)
chosen_logstd = np.zeros(OUTWIDTH)
for j in range(OUTWIDTH):
idx = get_pi_idx(self.np_random.rand(), logmix2[j])
mixture_idx[j] = idx
chosen_mean[j] = mean[j][idx]
chosen_logstd[j] = logstd[j][idx]
rand_gaussian = self.np_random.randn(OUTWIDTH)*np.sqrt(temperature)
next_x = chosen_mean+np.exp(chosen_logstd)*rand_gaussian
next_z = next_x.reshape(OUTWIDTH)
return next_z
def _step(self, action):
self.frame_count += 1
next_z = self._sample_next_z(action)
reward = 0
done = False
if self.frame_count > self.max_frame:
done = True
self.z = next_z
return next_z, reward, done, {}
def decode_obs(self, z):
# decode the latent vector
img = self.vae.decode(z.reshape(1, self.latent_image_fature_size))
img = img.reshape(64, 64, 12)
return img
def _render(self, mode='human', close=False):
img = self.decode_obs(self.z)
img = resize(img, (int(np.round(SCREEN_Y*FACTOR)), int(np.round(SCREEN_X*FACTOR))))
if self.frame_count > 0:
pass
#toimage(img, cmin=0, cmax=255).save('output/'+str(self.frame_count)+'.png')
if close:
if self.viewer is not None:
self.viewer.close()
self.viewer = None
return
if mode == 'rgb_array':
return img
elif mode == 'human':
from gym.envs.classic_control import rendering
if self.viewer is None:
self.viewer = rendering.SimpleImageViewer()
self.viewer.imshow(img)
def make_env(env_name, agent, seed=-1, render_mode=False):
env = StarCraftDream(agent)
if seed <0:
seed = np.random.randint(2**31-1)
env.seed(seed)
return env
|
from django.db.models import fields
from rest_framework import serializers
from profiles_api import models
class HelloSerializer(serializers.Serializer):
"""Serializers a name field for testing our APIView"""
name = serializers.CharField(max_length=10)
class UserProfileSerializer(serializers.ModelSerializer):
"""Serializes a user profile object"""
class Meta:
model = models.UserProfile
fields = ('id', 'email', 'name', 'password')
extra_kwargs = {
'password': {
'write_only': True,
'style': {
'input_type': 'password'
}
}
}
def create(self, validated_data):
"""Create and return a new user"""
user = models.UserProfile.objects.create_user(
email=validated_data['email'],
name=validated_data['name'],
password=validated_data['password'],
)
return user
def update(self, instance, validated_data):
"""Handle updating user account"""
if 'password' in validated_data:
password = validated_data.pop('password')
instance.set_password(password)
return super().update(instance, validated_data)
class ProfileFeedItemSerializer(serializers.ModelSerializer):
"""Serializes profile feed items"""
class Meta:
model = models.ProfileFeedItem
fields = ('id', 'user_profile', 'status_text', 'created_on')
extra_kwargs = {'user_profile': {'read_only': True}}
|
"""An implementation of the CASTER model."""
from typing import Tuple
import torch
from chemicalx.data import DrugPairBatch
from chemicalx.models import Model
__all__ = [
"CASTER",
]
class CASTER(Model):
"""An implementation of the CASTER model from [huang2020]_.
.. seealso:: This model was suggested in https://github.com/AstraZeneca/chemicalx/issues/17
.. [huang2020] Huang, K., *et al.* (2020). `CASTER: Predicting drug interactions
with chemical substructure representation <https://doi.org/10.1609/aaai.v34i01.5412>`_.
*AAAI 2020 - 34th AAAI Conference on Artificial Intelligence*, 702–709.
"""
def __init__(
self,
*,
drug_channels: int,
encoder_hidden_channels: int = 32,
encoder_output_channels: int = 32,
decoder_hidden_channels: int = 32,
hidden_channels: int = 32,
out_hidden_channels: int = 32,
out_channels: int = 1,
lambda3: float = 1e-5,
magnifying_factor: int = 100,
):
"""Instantiate the CASTER model.
:param drug_channels: The number of drug features (recognised frequent substructures).
The original implementation recognised 1722 basis substructures in the BIOSNAP experiment.
:param encoder_hidden_channels: The number of hidden layer neurons in the encoder module.
:param encoder_output_channels: The number of output layer neurons in the encoder module.
:param decoder_hidden_channels: The number of hidden layer neurons in the decoder module.
:param hidden_channels: The number of hidden layer neurons in the predictor module.
:param out_hidden_channels: The last hidden layer channels before output.
:param out_channels: The number of output channels.
:param lambda3: regularisation coefficient in the dictionary encoder module.
:param magnifying_factor: The magnifying factor coefficient applied to the predictor module input.
"""
super().__init__()
self.lambda3 = lambda3
self.magnifying_factor = magnifying_factor
self.drug_channels = drug_channels
# encoder
self.encoder = torch.nn.Sequential(
torch.nn.Linear(self.drug_channels, encoder_hidden_channels),
torch.nn.ReLU(True),
torch.nn.Linear(encoder_hidden_channels, encoder_output_channels),
)
# decoder
self.decoder = torch.nn.Sequential(
torch.nn.Linear(encoder_output_channels, decoder_hidden_channels),
torch.nn.ReLU(True),
torch.nn.Linear(decoder_hidden_channels, drug_channels),
)
# predictor: eight layer NN
predictor_layers = []
predictor_layers.append(torch.nn.Linear(self.drug_channels, hidden_channels))
predictor_layers.append(torch.nn.ReLU(True))
for i in range(1, 6):
predictor_layers.append(torch.nn.BatchNorm1d(hidden_channels))
if i < 5:
predictor_layers.append(torch.nn.Linear(hidden_channels, hidden_channels))
else:
predictor_layers.append(torch.nn.Linear(hidden_channels, out_hidden_channels))
predictor_layers.append(torch.nn.ReLU(True))
predictor_layers.append(torch.nn.Linear(out_hidden_channels, out_channels))
predictor_layers.append(torch.nn.Sigmoid())
self.predictor = torch.nn.Sequential(*predictor_layers)
def unpack(self, batch: DrugPairBatch) -> Tuple[torch.FloatTensor]:
"""Return the "functional representation" of drug pairs, as defined in the original implementation.
:param batch: batch of drug pairs
:return: each pair is represented as a single vector with x^i = 1 if either x_1^i >= 1 or x_2^i >= 1
"""
pair_representation = (torch.maximum(batch.drug_features_left, batch.drug_features_right) >= 1.0).float()
return (pair_representation,)
def dictionary_encoder(
self, drug_pair_features_latent: torch.FloatTensor, dictionary_features_latent: torch.FloatTensor
) -> torch.FloatTensor:
"""Perform a forward pass of the dictionary encoder submodule.
:param drug_pair_features_latent: encoder output for the input drug_pair_features
(batch_size x encoder_output_channels)
:param dictionary_features_latent: projection of the drug_pair_features using the dictionary basis
(encoder_output_channels x drug_channels)
:return: sparse code X_o: (batch_size x drug_channels)
"""
dict_feat_squared = torch.matmul(dictionary_features_latent, dictionary_features_latent.transpose(2, 1))
dict_feat_squared_inv = torch.inverse(dict_feat_squared + self.lambda3 * (torch.eye(self.drug_channels)))
dict_feat_closed_form = torch.matmul(dict_feat_squared_inv, dictionary_features_latent)
r = drug_pair_features_latent[:, None, :].matmul(dict_feat_closed_form.transpose(2, 1)).squeeze(1)
return r
def forward(self, drug_pair_features: torch.FloatTensor) -> Tuple[torch.FloatTensor, ...]:
"""Run a forward pass of the CASTER model.
:param drug_pair_features: functional representation of each drug pair (see unpack method)
:return: (Tuple[torch.FloatTensor): a tuple of tensors including:
prediction_scores: predicted target scores for each drug pair
reconstructed: input drug pair vectors reconstructed by the encoder-decoder chain
dictionary_encoded: drug pair features encoded by the dictionary encoder submodule
dictionary_features_latent: projection of the encoded drug pair features using the dictionary basis
drug_pair_features_latent: encoder output for the input drug_pair_features
drug_pair_features: a copy of the input unpacked drug_pair_features (needed for loss calculation)
"""
drug_pair_features_latent = self.encoder(drug_pair_features)
dictionary_features_latent = self.encoder(torch.eye(self.drug_channels))
dictionary_features_latent = dictionary_features_latent.mul(drug_pair_features[:, :, None])
drug_pair_features_reconstructed = self.decoder(drug_pair_features_latent)
reconstructed = torch.sigmoid(drug_pair_features_reconstructed)
dictionary_encoded = self.dictionary_encoder(drug_pair_features_latent, dictionary_features_latent)
prediction_scores = self.predictor(self.magnifying_factor * dictionary_encoded)
return (
prediction_scores,
reconstructed,
dictionary_encoded,
dictionary_features_latent,
drug_pair_features_latent,
drug_pair_features,
)
|
# Copyright (C) 2020 Onepanel Inc.
#
# SPDX-License-Identifier: MIT
from django.urls import path, include
from . import views, views_overrides
from rest_framework import routers
from rest_framework import permissions
from drf_yasg.views import get_schema_view
from drf_yasg import openapi
router = routers.DefaultRouter(trailing_slash=False)
router.register('tasks', views_overrides.TaskViewSet)
urlpatterns = [
path('workflow_templates', views.list_workflow_templates),
path('workflow_templates/<slug:workflow_template_uid>/versions', views.list_workflow_template_versions),
path('workflow_templates/<slug:workflow_template_uid>/versions/<slug:version>', views.get_workflow_template),
path('get_node_pool', views.get_node_pool),
path('get_object_counts/<int:pk>', views.get_object_counts),
path('get_base_model', views.get_base_model),
path('execute_workflow/<int:pk>', views.execute_training_workflow),
path('get_available_dump_formats', views.get_available_dump_formats),
path('get_output_path/<int:pk>', views.generate_output_path),
path('get_annotation_path/<int:pk>', views.generate_dataset_path),
path('api/v1/', include(router.urls))
]
|
import { stringify } from 'qs'
import { emitter } from '../utils'
import { createPopup } from '../../broker/utils'
export const requestOriginAccess = async ({ state, dispatch, commit }, { origin, chain }) => {
const { requestOriginAccessActive } = state.app
if (!requestOriginAccessActive) {
commit('app/SET_ORIGIN_ACCESS_ACTIVE', { active: true }, { root: true })
await dispatch('requestUnlockWallet')
return new Promise((resolve, reject) => {
emitter.$once(`origin:${origin}`, (allowed, accountId, chain) => {
commit('app/SET_ORIGIN_ACCESS_ACTIVE', { active: false }, { root: true })
if (allowed) {
const { activeProxyAddressAddress, activeChainId } = state
commit('ADD_EXTERNAL_CONNECTION', {
origin,
proxyAddressAddress: activeProxyAddressAddress,
chainId: activeChainId
// origin, activeWalletId, accountId, chain
})
resolve(true)
} else {
reject(new Error('User denied'))
}
})
const query = stringify({ origin, chain })
createPopup(`/enable?${query}`)
})
}
}
|
/*++
Copyright (c) 2004 - 2014, Intel Corporation. All rights reserved.<BR>
This program and the accompanying materials are licensed and made available under
the terms and conditions of the BSD License that accompanies this distribution.
The full text of the license may be found at
http://opensource.org/licenses/bsd-license.php.
THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
Module Name:
MonoStatusCode.h
Abstract:
Monolithic single PEIM to provide the status code functionality.
The PEIM is a blend of libraries that correspond to the different status code
listeners that a platform installs.
--*/
#ifndef _MONO_STATUS_CODE_H_
#define _MONO_STATUS_CODE_H_
//
// Statements that include other files.
//
#include "PiPei.h"
#include "Pi/PiBootMode.h"
#include "Ppi/StatusCode.h"
#include "Ppi/MemoryDiscovered.h"
#include "Ppi/FvLoadFile.h"
#include "Library/HobLib.h"
#include "Library/DebugLib.h"
#include "Library/IoLib.h"
#include "Library/SerialPortLib.h"
#include "Protocol/StatusCode.h"
#ifndef _STATUS_CODE_ENABLER_H_
#define _STATUS_CODE_ENABLER_H_
#ifdef EFI_DEBUG
#define EFI_STATUS_CODE_ENABLER_HOB_GUID \
{ \
0x5ffc6cf3, 0x71ad, 0x46f5, 0xbd, 0x8b, 0x7e, 0x8f, 0xfe, 0x19, 0x7, 0xd7 \
}
extern EFI_GUID gEfiSerialStatusCodeEnablerHobGuid;
typedef struct _EFI_STATUS_CODE_INFO {
BOOLEAN StatusCodeDisable;
} EFI_STATUS_CODE_INFO;
#endif
#endif
//
// Platform specific function Declarations. These must be implemented in a
// subdirectory named PlatformName in a file named PlatformStatusCode.c.
//
//
// This is the platform function to initialize the listeners desired by the
// platform.
//
VOID
PlatformInitializeStatusCode (
IN EFI_FFS_FILE_HEADER *FfsHeader,
IN CONST EFI_PEI_SERVICES **PeiServices
);
//
// This is the platform function that calls all of the listeners desired by the
// platform.
//
EFI_STATUS
EFIAPI
PlatformReportStatusCode (
IN CONST EFI_PEI_SERVICES **PeiServices,
IN EFI_STATUS_CODE_TYPE CodeType,
IN EFI_STATUS_CODE_VALUE Value,
IN UINT32 Instance,
IN CONST EFI_GUID * CallerId,
IN CONST EFI_STATUS_CODE_DATA * Data OPTIONAL
);
//
// Platform independent function Declarations
//
//
// Initialize the status code listeners and publish the status code PPI.
//
VOID
EFIAPI
InitializeMonoStatusCode (
IN EFI_FFS_FILE_HEADER *FfsHeader,
IN const EFI_PEI_SERVICES **PeiServices
);
//
// Convert a DXE status code call into a PEI status code call.
//
EFI_STATUS
EFIAPI
TranslateDxeStatusCodeToPeiStatusCode (
IN EFI_STATUS_CODE_TYPE CodeType,
IN EFI_STATUS_CODE_VALUE Value,
IN UINT32 Instance,
IN EFI_GUID * CallerId,
IN EFI_STATUS_CODE_DATA * Data OPTIONAL
);
//
// Publish a HOB that contains the listener to be used by DXE.
//
EFI_STATUS
EFIAPI
InitializeDxeReportStatusCode (
IN const EFI_PEI_SERVICES **PeiServices
);
#endif
|
import sys
import json
import os
import datetime
def mongodb(rs, hosts, out):
os.system('mongodump --host %s/%s --out="%s"' % (rs, ','.join(hosts), out))
def mariadb(host, out):
os.system('mysqldump --host=%s --single-transaction --skip-comments --all-databases > %s' % (host, out))
def archivate(target, out):
os.system('tar -zcvf %s.tar.gz %s' % (out, target))
return '%s.tar.gz' % out
def rm(target):
os.system('rm -r %s' % target)
def mv(target, out):
os.system('mv %s %s' % (target, out))
def ln(target, out):
os.system('ln -s %s --target-directory="%s"' % (target, out))
def echo(message):
os.system('echo "%s"' % message)
# Execution
echo('Dump execution:')
root_dir = os.path.dirname(os.path.abspath(__file__))
with open(root_dir + '/config.json') as f:
config = json.load(f)
if len(sys.argv) < 2 or not sys.argv[1]:
echo('Bad request: %s.' % sys.argv)
exit(1)
task = sys.argv[1]
echo('Task selected: %s.' % task)
if task == 'mongodb' and 'mongodb' in config:
mongodbConfig = config['mongodb']
mongodb(mongodbConfig['rs'], mongodbConfig['hosts'], './tmp')
name = '%s_mongodb' % datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S")
name = archivate('./tmp/*', name)
if task == 'mariadb' and 'mariadb' in config:
mariadbConfig = config['mariadb']
mariadb(mariadbConfig['host'], './tmp/dump.sh')
name = '%s_mariadb' % datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S")
name = archivate('./tmp/*', name)
if 'name' in locals():
rm('./tmp')
mv(name, config['out'])
name = '%s/%s' % (config['out'], name)
echo('Dump created: %s.' % name)
if 'distribution' in config:
ln(name, config['distribution'])
else:
echo('Wrong configuration')
exit(1)
|