id
stringlengths
1
265
text
stringlengths
6
5.19M
dataset_id
stringclasses
7 values
/Gratheory-0.1.1-py3-none-any.whl/gratheory.py
#vertices are labelled 0-(n-1), pass this as an integer n #edges is a list of lists length 3 containing the starting vertices, the ending vertices and the weights, use weight of 1 for unweighted graphs #connectFrom and connectTo both specify where any connections come from or to and their weights as a list of lists length 2 #find shortest path finds the shortest path between two vertices (passed through as parameters) and returns a list #of vertices which are the shortest path followed by its weight in a tuple from queue import Queue, PriorityQueue from collections import defaultdict class inf: def __eq__(self, o): return False def __lt__(self, o): return False def __gt__(self, o): return True def __add__(self, o): return self def __sub__(self, o): return self def __rsub__(self, o): return self def __radd__(self,o): return self class Vertex: def __init__(self,n): self.connectTo = [] self.connectFrom = [] self.number = n class Graph: def __init__(self,edges,oneWayEdges=True): self.vertices = [] self.length = 0 self.oneWay = oneWayEdges self.edges = edges self.namesToNumbers = {} self.numbersToNames = {} for i in edges: if i[0] not in self.namesToNumbers.keys(): self.addVertex(i[0]) if i[1] not in self.namesToNumbers.keys(): self.addVertex(i[1]) self.addEdge(i) def findShortestPath(self,start,end,searchType = 'Ford', raiseError=False): if not (start in self.namesToNumbers.keys() and end in self.namesToNumbers.keys()): if raiseError: raise Exception("Start or end not in graph") else: return None, None if end not in self.BFS(start): if raiseError: raise Exception('Path does not exist') else: return None, None if searchType == 'Ford': if not (start in self.namesToNumbers.keys() and end in self.namesToNumbers.keys()): raise Exception("Start or end not in graph") start = self.namesToNumbers[start] end = self.namesToNumbers[end] vertices = [[i,inf()] for i in range(self.length)] vertices[start] = [0,0] changes = 1 while changes != 0: changes = 0 for i in range(self.length): for j in self.vertices[i].connectFrom: if vertices[i][1] > (vertices[j[0]][1]+j[1]) and not isinstance(vertices[j[0]][1], inf): vertices[i][1] = (vertices[j[0]][1]+j[1]) changes += 1 currentPoint = end path = [end] count = 0 while currentPoint != start and count <= self.length: for i in self.vertices[currentPoint].connectFrom: if i[1] == (vertices[currentPoint][1] - vertices[i[0]][1]): path.append(i[0]) currentPoint = i[0] break count += 1 else: path = [self.numbersToNames[i] for i in path[::-1]] return path, vertices[end][1] if searchType == 'Djikstra': start = self.namesToNumbers[start] end = self.namesToNumbers[end] queue = PriorityQueue() queue.put((0, start)) visited = defaultdict(lambda :inf()) currentNode = start while not queue.empty() and currentNode != end: dist, currentNode = queue.get() neighbours = self.vertices[currentNode].connectTo visited[currentNode] = dist for i in neighbours: if i[0] not in visited.keys(): queue.put((dist+i[1], i[0])) visited[i[0]] = dist+i[1] path = [end] while currentNode != start: for i in self.vertices[currentNode].connectFrom: if i[1] == visited[currentNode] - visited[i[0]]: path.append(i[0]) currentNode = i[0] return [self.numbersToNames[i] for i in path[::-1]], visited[end] def DFS(self, vertex, reverseEdges=False): stack = [self.namesToNumbers[vertex]] visited = [] while stack != []: nextNode = stack.pop() if nextNode not in visited: visited.append(nextNode) if reverseEdges: connects = self.vertices[nextNode].connectFrom else: connects = self.vertices[nextNode].connectTo for i in connects: stack.append(i[0]) return [self.numbersToNames[i] for i in visited] def BFS(self, vertex, reverseEdges=False): queue = Queue() queue.put(self.namesToNumbers[vertex]) visited = [] while not queue.empty(): nextNode = queue.get() if nextNode not in visited: visited.append(nextNode) if reverseEdges: connects = self.vertices[nextNode].connectFrom else: connects = self.vertices[nextNode].connectTo for i in connects: queue.put(i[0]) return [self.numbersToNames[i] for i in visited] def addVertex(self, v, raiseError = True): if v in self.namesToNumbers.keys(): if raiseError: raise Exception("Vertex already exists") return self.namesToNumbers[v] else: self.vertices.append(Vertex(self.length)) self.namesToNumbers[v] = self.length self.numbersToNames[self.length] = v self.length += 1 return (self.length-1) def addEdge(self, e, oneWay=True , raiseError = True): if e[0] not in self.namesToNumbers.keys(): if raiseError: raise Exception("First node not in graph") self.addVertex(e[0]) if e[1] not in self.namesToNumbers.keys(): if raiseError: raise Exception("Second node not in graph") self.addVertex(e[1]) if oneWay and not self.oneWay: oneWay = self.oneWay self.vertices[self.namesToNumbers[e[0]]].connectTo.append([self.namesToNumbers[e[1]], e[2]]) self.vertices[self.namesToNumbers[e[1]]].connectFrom.append([self.namesToNumbers[e[0]], e[2]]) if not oneWay: self.vertices[self.namesToNumbers[e[1]]].connectTo.append([self.namesToNumbers[e[0]], e[2]]) self.vertices[self.namesToNumbers[e[0]]].connectFrom.append([self.namesToNumbers[e[1]], e[2]])
PypiClean
/Envelopes-0.4.tar.gz/Envelopes-0.4/docs/api/connstack.rst
Connection stack ================ The connection stack allows you to use Envelopes' SMTP connection wrapper in threaded apps. Consult the example Flask app to see it in action. Code of this module has been adapted from `RQ <http://python-rq.org/>`_ by `Vincent Driessen <http://nvie.com/about/>`_. .. autofunction:: envelopes.connstack.get_current_connection .. autofunction:: envelopes.connstack.pop_connection .. autofunction:: envelopes.connstack.push_connection .. autofunction:: envelopes.connstack.resolve_connection .. autofunction:: envelopes.connstack.use_connection
PypiClean
/node_managment_application-0.0.1.tar.gz/node_managment_application-0.0.1/nms_app/static/rest_framework/js/coreapi-0.1.1.js
(function(f){if(typeof exports==="object"&&typeof module!=="undefined"){module.exports=f()}else if(typeof define==="function"&&define.amd){define([],f)}else{var g;if(typeof window!=="undefined"){g=window}else if(typeof global!=="undefined"){g=global}else if(typeof self!=="undefined"){g=self}else{g=this}g.coreapi = f()}})(function(){var define,module,exports;return (function e(t,n,r){function s(o,u){if(!n[o]){if(!t[o]){var a=typeof require=="function"&&require;if(!u&&a)return a(o,!0);if(i)return i(o,!0);var f=new Error("Cannot find module '"+o+"'");throw f.code="MODULE_NOT_FOUND",f}var l=n[o]={exports:{}};t[o][0].call(l.exports,function(e){var n=t[o][1][e];return s(n?n:e)},l,l.exports,e,t,n,r)}return n[o].exports}var i=typeof require=="function"&&require;for(var o=0;o<r.length;o++)s(r[o]);return s})({1:[function(require,module,exports){ 'use strict'; var _createClass = function () { function defineProperties(target, props) { for (var i = 0; i < props.length; i++) { var descriptor = props[i]; descriptor.enumerable = descriptor.enumerable || false; descriptor.configurable = true; if ("value" in descriptor) descriptor.writable = true; Object.defineProperty(target, descriptor.key, descriptor); } } return function (Constructor, protoProps, staticProps) { if (protoProps) defineProperties(Constructor.prototype, protoProps); if (staticProps) defineProperties(Constructor, staticProps); return Constructor; }; }(); function _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError("Cannot call a class as a function"); } } var BasicAuthentication = function () { function BasicAuthentication() { var options = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : {}; _classCallCheck(this, BasicAuthentication); var username = options.username; var password = options.password; var hash = window.btoa(username + ':' + password); this.auth = 'Basic ' + hash; } _createClass(BasicAuthentication, [{ key: 'authenticate', value: function authenticate(options) { options.headers['Authorization'] = this.auth; return options; } }]); return BasicAuthentication; }(); module.exports = { BasicAuthentication: BasicAuthentication }; },{}],2:[function(require,module,exports){ 'use strict'; var basic = require('./basic'); var session = require('./session'); var token = require('./token'); module.exports = { BasicAuthentication: basic.BasicAuthentication, SessionAuthentication: session.SessionAuthentication, TokenAuthentication: token.TokenAuthentication }; },{"./basic":1,"./session":3,"./token":4}],3:[function(require,module,exports){ 'use strict'; var _createClass = function () { function defineProperties(target, props) { for (var i = 0; i < props.length; i++) { var descriptor = props[i]; descriptor.enumerable = descriptor.enumerable || false; descriptor.configurable = true; if ("value" in descriptor) descriptor.writable = true; Object.defineProperty(target, descriptor.key, descriptor); } } return function (Constructor, protoProps, staticProps) { if (protoProps) defineProperties(Constructor.prototype, protoProps); if (staticProps) defineProperties(Constructor, staticProps); return Constructor; }; }(); function _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError("Cannot call a class as a function"); } } var utils = require('../utils'); function trim(str) { return str.replace(/^\s\s*/, '').replace(/\s\s*$/, ''); } function getCookie(cookieName, cookieString) { cookieString = cookieString || window.document.cookie; if (cookieString && cookieString !== '') { var cookies = cookieString.split(';'); for (var i = 0; i < cookies.length; i++) { var cookie = trim(cookies[i]); // Does this cookie string begin with the name we want? if (cookie.substring(0, cookieName.length + 1) === cookieName + '=') { return decodeURIComponent(cookie.substring(cookieName.length + 1)); } } } return null; } var SessionAuthentication = function () { function SessionAuthentication() { var options = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : {}; _classCallCheck(this, SessionAuthentication); this.csrfToken = getCookie(options.csrfCookieName, options.cookieString); this.csrfHeaderName = options.csrfHeaderName; } _createClass(SessionAuthentication, [{ key: 'authenticate', value: function authenticate(options) { options.credentials = 'same-origin'; if (this.csrfToken && !utils.csrfSafeMethod(options.method)) { options.headers[this.csrfHeaderName] = this.csrfToken; } return options; } }]); return SessionAuthentication; }(); module.exports = { SessionAuthentication: SessionAuthentication }; },{"../utils":15}],4:[function(require,module,exports){ 'use strict'; var _createClass = function () { function defineProperties(target, props) { for (var i = 0; i < props.length; i++) { var descriptor = props[i]; descriptor.enumerable = descriptor.enumerable || false; descriptor.configurable = true; if ("value" in descriptor) descriptor.writable = true; Object.defineProperty(target, descriptor.key, descriptor); } } return function (Constructor, protoProps, staticProps) { if (protoProps) defineProperties(Constructor.prototype, protoProps); if (staticProps) defineProperties(Constructor, staticProps); return Constructor; }; }(); function _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError("Cannot call a class as a function"); } } var TokenAuthentication = function () { function TokenAuthentication() { var options = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : {}; _classCallCheck(this, TokenAuthentication); this.token = options.token; this.scheme = options.scheme || 'Bearer'; } _createClass(TokenAuthentication, [{ key: 'authenticate', value: function authenticate(options) { options.headers['Authorization'] = this.scheme + ' ' + this.token; return options; } }]); return TokenAuthentication; }(); module.exports = { TokenAuthentication: TokenAuthentication }; },{}],5:[function(require,module,exports){ 'use strict'; var _createClass = function () { function defineProperties(target, props) { for (var i = 0; i < props.length; i++) { var descriptor = props[i]; descriptor.enumerable = descriptor.enumerable || false; descriptor.configurable = true; if ("value" in descriptor) descriptor.writable = true; Object.defineProperty(target, descriptor.key, descriptor); } } return function (Constructor, protoProps, staticProps) { if (protoProps) defineProperties(Constructor.prototype, protoProps); if (staticProps) defineProperties(Constructor, staticProps); return Constructor; }; }(); function _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError("Cannot call a class as a function"); } } var document = require('./document'); var codecs = require('./codecs'); var errors = require('./errors'); var transports = require('./transports'); var utils = require('./utils'); function lookupLink(node, keys) { var _iteratorNormalCompletion = true; var _didIteratorError = false; var _iteratorError = undefined; try { for (var _iterator = keys[Symbol.iterator](), _step; !(_iteratorNormalCompletion = (_step = _iterator.next()).done); _iteratorNormalCompletion = true) { var key = _step.value; if (node instanceof document.Document) { node = node.content[key]; } else { node = node[key]; } if (node === undefined) { throw new errors.LinkLookupError('Invalid link lookup: ' + JSON.stringify(keys)); } } } catch (err) { _didIteratorError = true; _iteratorError = err; } finally { try { if (!_iteratorNormalCompletion && _iterator.return) { _iterator.return(); } } finally { if (_didIteratorError) { throw _iteratorError; } } } if (!(node instanceof document.Link)) { throw new errors.LinkLookupError('Invalid link lookup: ' + JSON.stringify(keys)); } return node; } var Client = function () { function Client() { var options = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : {}; _classCallCheck(this, Client); var transportOptions = { auth: options.auth || null, headers: options.headers || {}, requestCallback: options.requestCallback, responseCallback: options.responseCallback }; this.decoders = options.decoders || [new codecs.CoreJSONCodec(), new codecs.JSONCodec(), new codecs.TextCodec()]; this.transports = options.transports || [new transports.HTTPTransport(transportOptions)]; } _createClass(Client, [{ key: 'action', value: function action(document, keys) { var params = arguments.length > 2 && arguments[2] !== undefined ? arguments[2] : {}; var link = lookupLink(document, keys); var transport = utils.determineTransport(this.transports, link.url); return transport.action(link, this.decoders, params); } }, { key: 'get', value: function get(url) { var link = new document.Link(url, 'get'); var transport = utils.determineTransport(this.transports, url); return transport.action(link, this.decoders); } }]); return Client; }(); module.exports = { Client: Client }; },{"./codecs":7,"./document":10,"./errors":11,"./transports":14,"./utils":15}],6:[function(require,module,exports){ 'use strict'; var _createClass = function () { function defineProperties(target, props) { for (var i = 0; i < props.length; i++) { var descriptor = props[i]; descriptor.enumerable = descriptor.enumerable || false; descriptor.configurable = true; if ("value" in descriptor) descriptor.writable = true; Object.defineProperty(target, descriptor.key, descriptor); } } return function (Constructor, protoProps, staticProps) { if (protoProps) defineProperties(Constructor.prototype, protoProps); if (staticProps) defineProperties(Constructor, staticProps); return Constructor; }; }(); var _typeof = typeof Symbol === "function" && typeof Symbol.iterator === "symbol" ? function (obj) { return typeof obj; } : function (obj) { return obj && typeof Symbol === "function" && obj.constructor === Symbol && obj !== Symbol.prototype ? "symbol" : typeof obj; }; function _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError("Cannot call a class as a function"); } } var document = require('../document'); var URL = require('url-parse'); function unescapeKey(key) { if (key.match(/__(type|meta)$/)) { return key.substring(1); } return key; } function getString(obj, key) { var value = obj[key]; if (typeof value === 'string') { return value; } return ''; } function getBoolean(obj, key) { var value = obj[key]; if (typeof value === 'boolean') { return value; } return false; } function getObject(obj, key) { var value = obj[key]; if ((typeof value === 'undefined' ? 'undefined' : _typeof(value)) === 'object') { return value; } return {}; } function getArray(obj, key) { var value = obj[key]; if (value instanceof Array) { return value; } return []; } function getContent(data, baseUrl) { var excluded = ['_type', '_meta']; var content = {}; for (var property in data) { if (data.hasOwnProperty(property) && !excluded.includes(property)) { var key = unescapeKey(property); var value = primitiveToNode(data[property], baseUrl); content[key] = value; } } return content; } function primitiveToNode(data, baseUrl) { var isObject = data instanceof Object && !(data instanceof Array); if (isObject && data._type === 'document') { // Document var meta = getObject(data, '_meta'); var relativeUrl = getString(meta, 'url'); var url = relativeUrl ? URL(relativeUrl, baseUrl).toString() : ''; var title = getString(meta, 'title'); var description = getString(meta, 'description'); var content = getContent(data, url); return new document.Document(url, title, description, content); } else if (isObject && data._type === 'link') { // Link var _relativeUrl = getString(data, 'url'); var _url = _relativeUrl ? URL(_relativeUrl, baseUrl).toString() : ''; var method = getString(data, 'action') || 'get'; var _title = getString(data, 'title'); var _description = getString(data, 'description'); var fieldsData = getArray(data, 'fields'); var fields = []; for (var idx = 0, len = fieldsData.length; idx < len; idx++) { var value = fieldsData[idx]; var name = getString(value, 'name'); var required = getBoolean(value, 'required'); var location = getString(value, 'location'); var fieldDescription = getString(value, 'fieldDescription'); var field = new document.Field(name, required, location, fieldDescription); fields.push(field); } return new document.Link(_url, method, 'application/json', fields, _title, _description); } else if (isObject) { // Object var _content = {}; for (var key in data) { if (data.hasOwnProperty(key)) { _content[key] = primitiveToNode(data[key], baseUrl); } } return _content; } else if (data instanceof Array) { // Object var _content2 = []; for (var _idx = 0, _len = data.length; _idx < _len; _idx++) { _content2.push(primitiveToNode(data[_idx], baseUrl)); } return _content2; } // Primitive return data; } var CoreJSONCodec = function () { function CoreJSONCodec() { _classCallCheck(this, CoreJSONCodec); this.mediaType = 'application/coreapi+json'; } _createClass(CoreJSONCodec, [{ key: 'decode', value: function decode(text) { var options = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : {}; var data = text; if (options.preloaded === undefined || !options.preloaded) { data = JSON.parse(text); } return primitiveToNode(data, options.url); } }]); return CoreJSONCodec; }(); module.exports = { CoreJSONCodec: CoreJSONCodec }; },{"../document":10,"url-parse":19}],7:[function(require,module,exports){ 'use strict'; var corejson = require('./corejson'); var json = require('./json'); var text = require('./text'); module.exports = { CoreJSONCodec: corejson.CoreJSONCodec, JSONCodec: json.JSONCodec, TextCodec: text.TextCodec }; },{"./corejson":6,"./json":8,"./text":9}],8:[function(require,module,exports){ 'use strict'; var _createClass = function () { function defineProperties(target, props) { for (var i = 0; i < props.length; i++) { var descriptor = props[i]; descriptor.enumerable = descriptor.enumerable || false; descriptor.configurable = true; if ("value" in descriptor) descriptor.writable = true; Object.defineProperty(target, descriptor.key, descriptor); } } return function (Constructor, protoProps, staticProps) { if (protoProps) defineProperties(Constructor.prototype, protoProps); if (staticProps) defineProperties(Constructor, staticProps); return Constructor; }; }(); function _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError("Cannot call a class as a function"); } } var JSONCodec = function () { function JSONCodec() { _classCallCheck(this, JSONCodec); this.mediaType = 'application/json'; } _createClass(JSONCodec, [{ key: 'decode', value: function decode(text) { var options = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : {}; return JSON.parse(text); } }]); return JSONCodec; }(); module.exports = { JSONCodec: JSONCodec }; },{}],9:[function(require,module,exports){ 'use strict'; var _createClass = function () { function defineProperties(target, props) { for (var i = 0; i < props.length; i++) { var descriptor = props[i]; descriptor.enumerable = descriptor.enumerable || false; descriptor.configurable = true; if ("value" in descriptor) descriptor.writable = true; Object.defineProperty(target, descriptor.key, descriptor); } } return function (Constructor, protoProps, staticProps) { if (protoProps) defineProperties(Constructor.prototype, protoProps); if (staticProps) defineProperties(Constructor, staticProps); return Constructor; }; }(); function _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError("Cannot call a class as a function"); } } var TextCodec = function () { function TextCodec() { _classCallCheck(this, TextCodec); this.mediaType = 'text/*'; } _createClass(TextCodec, [{ key: 'decode', value: function decode(text) { var options = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : {}; return text; } }]); return TextCodec; }(); module.exports = { TextCodec: TextCodec }; },{}],10:[function(require,module,exports){ 'use strict'; function _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError("Cannot call a class as a function"); } } var Document = function Document() { var url = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : ''; var title = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : ''; var description = arguments.length > 2 && arguments[2] !== undefined ? arguments[2] : ''; var content = arguments.length > 3 && arguments[3] !== undefined ? arguments[3] : {}; _classCallCheck(this, Document); this.url = url; this.title = title; this.description = description; this.content = content; }; var Link = function Link(url, method) { var encoding = arguments.length > 2 && arguments[2] !== undefined ? arguments[2] : 'application/json'; var fields = arguments.length > 3 && arguments[3] !== undefined ? arguments[3] : []; var title = arguments.length > 4 && arguments[4] !== undefined ? arguments[4] : ''; var description = arguments.length > 5 && arguments[5] !== undefined ? arguments[5] : ''; _classCallCheck(this, Link); if (url === undefined) { throw new Error('url argument is required'); } if (method === undefined) { throw new Error('method argument is required'); } this.url = url; this.method = method; this.encoding = encoding; this.fields = fields; this.title = title; this.description = description; }; var Field = function Field(name) { var required = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : false; var location = arguments.length > 2 && arguments[2] !== undefined ? arguments[2] : ''; var description = arguments.length > 3 && arguments[3] !== undefined ? arguments[3] : ''; _classCallCheck(this, Field); if (name === undefined) { throw new Error('name argument is required'); } this.name = name; this.required = required; this.location = location; this.description = description; }; module.exports = { Document: Document, Link: Link, Field: Field }; },{}],11:[function(require,module,exports){ 'use strict'; function _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError("Cannot call a class as a function"); } } function _possibleConstructorReturn(self, call) { if (!self) { throw new ReferenceError("this hasn't been initialised - super() hasn't been called"); } return call && (typeof call === "object" || typeof call === "function") ? call : self; } function _inherits(subClass, superClass) { if (typeof superClass !== "function" && superClass !== null) { throw new TypeError("Super expression must either be null or a function, not " + typeof superClass); } subClass.prototype = Object.create(superClass && superClass.prototype, { constructor: { value: subClass, enumerable: false, writable: true, configurable: true } }); if (superClass) Object.setPrototypeOf ? Object.setPrototypeOf(subClass, superClass) : subClass.__proto__ = superClass; } var ParameterError = function (_Error) { _inherits(ParameterError, _Error); function ParameterError(message) { _classCallCheck(this, ParameterError); var _this = _possibleConstructorReturn(this, (ParameterError.__proto__ || Object.getPrototypeOf(ParameterError)).call(this, message)); _this.message = message; _this.name = 'ParameterError'; return _this; } return ParameterError; }(Error); var LinkLookupError = function (_Error2) { _inherits(LinkLookupError, _Error2); function LinkLookupError(message) { _classCallCheck(this, LinkLookupError); var _this2 = _possibleConstructorReturn(this, (LinkLookupError.__proto__ || Object.getPrototypeOf(LinkLookupError)).call(this, message)); _this2.message = message; _this2.name = 'LinkLookupError'; return _this2; } return LinkLookupError; }(Error); var ErrorMessage = function (_Error3) { _inherits(ErrorMessage, _Error3); function ErrorMessage(message, content) { _classCallCheck(this, ErrorMessage); var _this3 = _possibleConstructorReturn(this, (ErrorMessage.__proto__ || Object.getPrototypeOf(ErrorMessage)).call(this, message)); _this3.message = message; _this3.content = content; _this3.name = 'ErrorMessage'; return _this3; } return ErrorMessage; }(Error); module.exports = { ParameterError: ParameterError, LinkLookupError: LinkLookupError, ErrorMessage: ErrorMessage }; },{}],12:[function(require,module,exports){ 'use strict'; var auth = require('./auth'); var client = require('./client'); var codecs = require('./codecs'); var document = require('./document'); var errors = require('./errors'); var transports = require('./transports'); var utils = require('./utils'); var coreapi = { Client: client.Client, Document: document.Document, Link: document.Link, auth: auth, codecs: codecs, errors: errors, transports: transports, utils: utils }; module.exports = coreapi; },{"./auth":2,"./client":5,"./codecs":7,"./document":10,"./errors":11,"./transports":14,"./utils":15}],13:[function(require,module,exports){ 'use strict'; var _createClass = function () { function defineProperties(target, props) { for (var i = 0; i < props.length; i++) { var descriptor = props[i]; descriptor.enumerable = descriptor.enumerable || false; descriptor.configurable = true; if ("value" in descriptor) descriptor.writable = true; Object.defineProperty(target, descriptor.key, descriptor); } } return function (Constructor, protoProps, staticProps) { if (protoProps) defineProperties(Constructor.prototype, protoProps); if (staticProps) defineProperties(Constructor, staticProps); return Constructor; }; }(); function _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError("Cannot call a class as a function"); } } var fetch = require('isomorphic-fetch'); var errors = require('../errors'); var utils = require('../utils'); var URL = require('url-parse'); var urlTemplate = require('url-template'); var parseResponse = function parseResponse(response, decoders, responseCallback) { return response.text().then(function (text) { if (responseCallback) { responseCallback(response, text); } var contentType = response.headers.get('Content-Type'); var decoder = utils.negotiateDecoder(decoders, contentType); var options = { url: response.url }; return decoder.decode(text, options); }); }; var HTTPTransport = function () { function HTTPTransport() { var options = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : {}; _classCallCheck(this, HTTPTransport); this.schemes = ['http', 'https']; this.auth = options.auth || null; this.headers = options.headers || {}; this.fetch = options.fetch || fetch; this.FormData = options.FormData || window.FormData; this.requestCallback = options.requestCallback; this.responseCallback = options.responseCallback; } _createClass(HTTPTransport, [{ key: 'buildRequest', value: function buildRequest(link, decoders) { var params = arguments.length > 2 && arguments[2] !== undefined ? arguments[2] : {}; var fields = link.fields; var method = link.method.toUpperCase(); var queryParams = {}; var pathParams = {}; var formParams = {}; var fieldNames = []; var hasBody = false; for (var idx = 0, len = fields.length; idx < len; idx++) { var field = fields[idx]; // Ensure any required fields are included if (!params.hasOwnProperty(field.name)) { if (field.required) { throw new errors.ParameterError('Missing required field: "' + field.name + '"'); } else { continue; } } fieldNames.push(field.name); if (field.location === 'query') { queryParams[field.name] = params[field.name]; } else if (field.location === 'path') { pathParams[field.name] = params[field.name]; } else if (field.location === 'form') { formParams[field.name] = params[field.name]; hasBody = true; } else if (field.location === 'body') { formParams = params[field.name]; hasBody = true; } } // Check for any parameters that did not have a matching field for (var property in params) { if (params.hasOwnProperty(property) && !fieldNames.includes(property)) { throw new errors.ParameterError('Unknown parameter: "' + property + '"'); } } var requestOptions = { method: method, headers: {} }; Object.assign(requestOptions.headers, this.headers); if (hasBody) { if (link.encoding === 'application/json') { requestOptions.body = JSON.stringify(formParams); requestOptions.headers['Content-Type'] = 'application/json'; } else if (link.encoding === 'multipart/form-data') { var form = new this.FormData(); for (var paramKey in formParams) { form.append(paramKey, formParams[paramKey]); } requestOptions.body = form; } else if (link.encoding === 'application/x-www-form-urlencoded') { var formBody = []; for (var _paramKey in formParams) { var encodedKey = encodeURIComponent(_paramKey); var encodedValue = encodeURIComponent(formParams[_paramKey]); formBody.push(encodedKey + '=' + encodedValue); } formBody = formBody.join('&'); requestOptions.body = formBody; requestOptions.headers['Content-Type'] = 'application/x-www-form-urlencoded'; } } if (this.auth) { requestOptions = this.auth.authenticate(requestOptions); } var parsedUrl = urlTemplate.parse(link.url); parsedUrl = parsedUrl.expand(pathParams); parsedUrl = new URL(parsedUrl); parsedUrl.set('query', queryParams); return { url: parsedUrl.toString(), options: requestOptions }; } }, { key: 'action', value: function action(link, decoders) { var params = arguments.length > 2 && arguments[2] !== undefined ? arguments[2] : {}; var responseCallback = this.responseCallback; var request = this.buildRequest(link, decoders, params); if (this.requestCallback) { this.requestCallback(request); } return this.fetch(request.url, request.options).then(function (response) { if (response.status === 204) { return; } return parseResponse(response, decoders, responseCallback).then(function (data) { if (response.ok) { return data; } else { var title = response.status + ' ' + response.statusText; var error = new errors.ErrorMessage(title, data); return Promise.reject(error); } }); }); } }]); return HTTPTransport; }(); module.exports = { HTTPTransport: HTTPTransport }; },{"../errors":11,"../utils":15,"isomorphic-fetch":16,"url-parse":19,"url-template":21}],14:[function(require,module,exports){ 'use strict'; var http = require('./http'); module.exports = { HTTPTransport: http.HTTPTransport }; },{"./http":13}],15:[function(require,module,exports){ 'use strict'; var URL = require('url-parse'); var determineTransport = function determineTransport(transports, url) { var parsedUrl = new URL(url); var scheme = parsedUrl.protocol.replace(':', ''); var _iteratorNormalCompletion = true; var _didIteratorError = false; var _iteratorError = undefined; try { for (var _iterator = transports[Symbol.iterator](), _step; !(_iteratorNormalCompletion = (_step = _iterator.next()).done); _iteratorNormalCompletion = true) { var transport = _step.value; if (transport.schemes.includes(scheme)) { return transport; } } } catch (err) { _didIteratorError = true; _iteratorError = err; } finally { try { if (!_iteratorNormalCompletion && _iterator.return) { _iterator.return(); } } finally { if (_didIteratorError) { throw _iteratorError; } } } throw Error('Unsupported scheme in URL: ' + url); }; var negotiateDecoder = function negotiateDecoder(decoders, contentType) { if (contentType === undefined || contentType === null) { return decoders[0]; } var fullType = contentType.toLowerCase().split(';')[0].trim(); var mainType = fullType.split('/')[0] + '/*'; var wildcardType = '*/*'; var acceptableTypes = [fullType, mainType, wildcardType]; var _iteratorNormalCompletion2 = true; var _didIteratorError2 = false; var _iteratorError2 = undefined; try { for (var _iterator2 = decoders[Symbol.iterator](), _step2; !(_iteratorNormalCompletion2 = (_step2 = _iterator2.next()).done); _iteratorNormalCompletion2 = true) { var decoder = _step2.value; if (acceptableTypes.includes(decoder.mediaType)) { return decoder; } } } catch (err) { _didIteratorError2 = true; _iteratorError2 = err; } finally { try { if (!_iteratorNormalCompletion2 && _iterator2.return) { _iterator2.return(); } } finally { if (_didIteratorError2) { throw _iteratorError2; } } } throw Error('Unsupported media in Content-Type header: ' + contentType); }; var csrfSafeMethod = function csrfSafeMethod(method) { // these HTTP methods do not require CSRF protection return (/^(GET|HEAD|OPTIONS|TRACE)$/.test(method) ); }; module.exports = { determineTransport: determineTransport, negotiateDecoder: negotiateDecoder, csrfSafeMethod: csrfSafeMethod }; },{"url-parse":19}],16:[function(require,module,exports){ // the whatwg-fetch polyfill installs the fetch() function // on the global object (window or self) // // Return that as the export for use in Webpack, Browserify etc. require('whatwg-fetch'); module.exports = self.fetch.bind(self); },{"whatwg-fetch":22}],17:[function(require,module,exports){ 'use strict'; var has = Object.prototype.hasOwnProperty; /** * Simple query string parser. * * @param {String} query The query string that needs to be parsed. * @returns {Object} * @api public */ function querystring(query) { var parser = /([^=?&]+)=?([^&]*)/g , result = {} , part; // // Little nifty parsing hack, leverage the fact that RegExp.exec increments // the lastIndex property so we can continue executing this loop until we've // parsed all results. // for (; part = parser.exec(query); result[decodeURIComponent(part[1])] = decodeURIComponent(part[2]) ); return result; } /** * Transform a query string to an object. * * @param {Object} obj Object that should be transformed. * @param {String} prefix Optional prefix. * @returns {String} * @api public */ function querystringify(obj, prefix) { prefix = prefix || ''; var pairs = []; // // Optionally prefix with a '?' if needed // if ('string' !== typeof prefix) prefix = '?'; for (var key in obj) { if (has.call(obj, key)) { pairs.push(encodeURIComponent(key) +'='+ encodeURIComponent(obj[key])); } } return pairs.length ? prefix + pairs.join('&') : ''; } // // Expose the module. // exports.stringify = querystringify; exports.parse = querystring; },{}],18:[function(require,module,exports){ 'use strict'; /** * Check if we're required to add a port number. * * @see https://url.spec.whatwg.org/#default-port * @param {Number|String} port Port number we need to check * @param {String} protocol Protocol we need to check against. * @returns {Boolean} Is it a default port for the given protocol * @api private */ module.exports = function required(port, protocol) { protocol = protocol.split(':')[0]; port = +port; if (!port) return false; switch (protocol) { case 'http': case 'ws': return port !== 80; case 'https': case 'wss': return port !== 443; case 'ftp': return port !== 21; case 'gopher': return port !== 70; case 'file': return false; } return port !== 0; }; },{}],19:[function(require,module,exports){ 'use strict'; var required = require('requires-port') , lolcation = require('./lolcation') , qs = require('querystringify') , protocolre = /^([a-z][a-z0-9.+-]*:)?(\/\/)?([\S\s]*)/i; /** * These are the parse rules for the URL parser, it informs the parser * about: * * 0. The char it Needs to parse, if it's a string it should be done using * indexOf, RegExp using exec and NaN means set as current value. * 1. The property we should set when parsing this value. * 2. Indication if it's backwards or forward parsing, when set as number it's * the value of extra chars that should be split off. * 3. Inherit from location if non existing in the parser. * 4. `toLowerCase` the resulting value. */ var rules = [ ['#', 'hash'], // Extract from the back. ['?', 'query'], // Extract from the back. ['/', 'pathname'], // Extract from the back. ['@', 'auth', 1], // Extract from the front. [NaN, 'host', undefined, 1, 1], // Set left over value. [/:(\d+)$/, 'port', undefined, 1], // RegExp the back. [NaN, 'hostname', undefined, 1, 1] // Set left over. ]; /** * @typedef ProtocolExtract * @type Object * @property {String} protocol Protocol matched in the URL, in lowercase. * @property {Boolean} slashes `true` if protocol is followed by "//", else `false`. * @property {String} rest Rest of the URL that is not part of the protocol. */ /** * Extract protocol information from a URL with/without double slash ("//"). * * @param {String} address URL we want to extract from. * @return {ProtocolExtract} Extracted information. * @api private */ function extractProtocol(address) { var match = protocolre.exec(address); return { protocol: match[1] ? match[1].toLowerCase() : '', slashes: !!match[2], rest: match[3] }; } /** * Resolve a relative URL pathname against a base URL pathname. * * @param {String} relative Pathname of the relative URL. * @param {String} base Pathname of the base URL. * @return {String} Resolved pathname. * @api private */ function resolve(relative, base) { var path = (base || '/').split('/').slice(0, -1).concat(relative.split('/')) , i = path.length , last = path[i - 1] , unshift = false , up = 0; while (i--) { if (path[i] === '.') { path.splice(i, 1); } else if (path[i] === '..') { path.splice(i, 1); up++; } else if (up) { if (i === 0) unshift = true; path.splice(i, 1); up--; } } if (unshift) path.unshift(''); if (last === '.' || last === '..') path.push(''); return path.join('/'); } /** * The actual URL instance. Instead of returning an object we've opted-in to * create an actual constructor as it's much more memory efficient and * faster and it pleases my OCD. * * @constructor * @param {String} address URL we want to parse. * @param {Object|String} location Location defaults for relative paths. * @param {Boolean|Function} parser Parser for the query string. * @api public */ function URL(address, location, parser) { if (!(this instanceof URL)) { return new URL(address, location, parser); } var relative, extracted, parse, instruction, index, key , instructions = rules.slice() , type = typeof location , url = this , i = 0; // // The following if statements allows this module two have compatibility with // 2 different API: // // 1. Node.js's `url.parse` api which accepts a URL, boolean as arguments // where the boolean indicates that the query string should also be parsed. // // 2. The `URL` interface of the browser which accepts a URL, object as // arguments. The supplied object will be used as default values / fall-back // for relative paths. // if ('object' !== type && 'string' !== type) { parser = location; location = null; } if (parser && 'function' !== typeof parser) parser = qs.parse; location = lolcation(location); // // Extract protocol information before running the instructions. // extracted = extractProtocol(address || ''); relative = !extracted.protocol && !extracted.slashes; url.slashes = extracted.slashes || relative && location.slashes; url.protocol = extracted.protocol || location.protocol || ''; address = extracted.rest; // // When the authority component is absent the URL starts with a path // component. // if (!extracted.slashes) instructions[2] = [/(.*)/, 'pathname']; for (; i < instructions.length; i++) { instruction = instructions[i]; parse = instruction[0]; key = instruction[1]; if (parse !== parse) { url[key] = address; } else if ('string' === typeof parse) { if (~(index = address.indexOf(parse))) { if ('number' === typeof instruction[2]) { url[key] = address.slice(0, index); address = address.slice(index + instruction[2]); } else { url[key] = address.slice(index); address = address.slice(0, index); } } } else if (index = parse.exec(address)) { url[key] = index[1]; address = address.slice(0, index.index); } url[key] = url[key] || ( relative && instruction[3] ? location[key] || '' : '' ); // // Hostname, host and protocol should be lowercased so they can be used to // create a proper `origin`. // if (instruction[4]) url[key] = url[key].toLowerCase(); } // // Also parse the supplied query string in to an object. If we're supplied // with a custom parser as function use that instead of the default build-in // parser. // if (parser) url.query = parser(url.query); // // If the URL is relative, resolve the pathname against the base URL. // if ( relative && location.slashes && url.pathname.charAt(0) !== '/' && (url.pathname !== '' || location.pathname !== '') ) { url.pathname = resolve(url.pathname, location.pathname); } // // We should not add port numbers if they are already the default port number // for a given protocol. As the host also contains the port number we're going // override it with the hostname which contains no port number. // if (!required(url.port, url.protocol)) { url.host = url.hostname; url.port = ''; } // // Parse down the `auth` for the username and password. // url.username = url.password = ''; if (url.auth) { instruction = url.auth.split(':'); url.username = instruction[0] || ''; url.password = instruction[1] || ''; } url.origin = url.protocol && url.host && url.protocol !== 'file:' ? url.protocol +'//'+ url.host : 'null'; // // The href is just the compiled result. // url.href = url.toString(); } /** * This is convenience method for changing properties in the URL instance to * insure that they all propagate correctly. * * @param {String} part Property we need to adjust. * @param {Mixed} value The newly assigned value. * @param {Boolean|Function} fn When setting the query, it will be the function * used to parse the query. * When setting the protocol, double slash will be * removed from the final url if it is true. * @returns {URL} * @api public */ URL.prototype.set = function set(part, value, fn) { var url = this; switch (part) { case 'query': if ('string' === typeof value && value.length) { value = (fn || qs.parse)(value); } url[part] = value; break; case 'port': url[part] = value; if (!required(value, url.protocol)) { url.host = url.hostname; url[part] = ''; } else if (value) { url.host = url.hostname +':'+ value; } break; case 'hostname': url[part] = value; if (url.port) value += ':'+ url.port; url.host = value; break; case 'host': url[part] = value; if (/:\d+$/.test(value)) { value = value.split(':'); url.port = value.pop(); url.hostname = value.join(':'); } else { url.hostname = value; url.port = ''; } break; case 'protocol': url.protocol = value.toLowerCase(); url.slashes = !fn; break; case 'pathname': url.pathname = value.length && value.charAt(0) !== '/' ? '/' + value : value; break; default: url[part] = value; } for (var i = 0; i < rules.length; i++) { var ins = rules[i]; if (ins[4]) url[ins[1]] = url[ins[1]].toLowerCase(); } url.origin = url.protocol && url.host && url.protocol !== 'file:' ? url.protocol +'//'+ url.host : 'null'; url.href = url.toString(); return url; }; /** * Transform the properties back in to a valid and full URL string. * * @param {Function} stringify Optional query stringify function. * @returns {String} * @api public */ URL.prototype.toString = function toString(stringify) { if (!stringify || 'function' !== typeof stringify) stringify = qs.stringify; var query , url = this , protocol = url.protocol; if (protocol && protocol.charAt(protocol.length - 1) !== ':') protocol += ':'; var result = protocol + (url.slashes ? '//' : ''); if (url.username) { result += url.username; if (url.password) result += ':'+ url.password; result += '@'; } result += url.host + url.pathname; query = 'object' === typeof url.query ? stringify(url.query) : url.query; if (query) result += '?' !== query.charAt(0) ? '?'+ query : query; if (url.hash) result += url.hash; return result; }; // // Expose the URL parser and some additional properties that might be useful for // others or testing. // URL.extractProtocol = extractProtocol; URL.location = lolcation; URL.qs = qs; module.exports = URL; },{"./lolcation":20,"querystringify":17,"requires-port":18}],20:[function(require,module,exports){ (function (global){ 'use strict'; var slashes = /^[A-Za-z][A-Za-z0-9+-.]*:\/\//; /** * These properties should not be copied or inherited from. This is only needed * for all non blob URL's as a blob URL does not include a hash, only the * origin. * * @type {Object} * @private */ var ignore = { hash: 1, query: 1 } , URL; /** * The location object differs when your code is loaded through a normal page, * Worker or through a worker using a blob. And with the blobble begins the * trouble as the location object will contain the URL of the blob, not the * location of the page where our code is loaded in. The actual origin is * encoded in the `pathname` so we can thankfully generate a good "default" * location from it so we can generate proper relative URL's again. * * @param {Object|String} loc Optional default location object. * @returns {Object} lolcation object. * @api public */ module.exports = function lolcation(loc) { loc = loc || global.location || {}; URL = URL || require('./'); var finaldestination = {} , type = typeof loc , key; if ('blob:' === loc.protocol) { finaldestination = new URL(unescape(loc.pathname), {}); } else if ('string' === type) { finaldestination = new URL(loc, {}); for (key in ignore) delete finaldestination[key]; } else if ('object' === type) { for (key in loc) { if (key in ignore) continue; finaldestination[key] = loc[key]; } if (finaldestination.slashes === undefined) { finaldestination.slashes = slashes.test(loc.href); } } return finaldestination; }; }).call(this,typeof global !== "undefined" ? global : typeof self !== "undefined" ? self : typeof window !== "undefined" ? window : {}) },{"./":19}],21:[function(require,module,exports){ (function (root, factory) { if (typeof exports === 'object') { module.exports = factory(); } else if (typeof define === 'function' && define.amd) { define([], factory); } else { root.urltemplate = factory(); } }(this, function () { /** * @constructor */ function UrlTemplate() { } /** * @private * @param {string} str * @return {string} */ UrlTemplate.prototype.encodeReserved = function (str) { return str.split(/(%[0-9A-Fa-f]{2})/g).map(function (part) { if (!/%[0-9A-Fa-f]/.test(part)) { part = encodeURI(part).replace(/%5B/g, '[').replace(/%5D/g, ']'); } return part; }).join(''); }; /** * @private * @param {string} str * @return {string} */ UrlTemplate.prototype.encodeUnreserved = function (str) { return encodeURIComponent(str).replace(/[!'()*]/g, function (c) { return '%' + c.charCodeAt(0).toString(16).toUpperCase(); }); } /** * @private * @param {string} operator * @param {string} value * @param {string} key * @return {string} */ UrlTemplate.prototype.encodeValue = function (operator, value, key) { value = (operator === '+' || operator === '#') ? this.encodeReserved(value) : this.encodeUnreserved(value); if (key) { return this.encodeUnreserved(key) + '=' + value; } else { return value; } }; /** * @private * @param {*} value * @return {boolean} */ UrlTemplate.prototype.isDefined = function (value) { return value !== undefined && value !== null; }; /** * @private * @param {string} * @return {boolean} */ UrlTemplate.prototype.isKeyOperator = function (operator) { return operator === ';' || operator === '&' || operator === '?'; }; /** * @private * @param {Object} context * @param {string} operator * @param {string} key * @param {string} modifier */ UrlTemplate.prototype.getValues = function (context, operator, key, modifier) { var value = context[key], result = []; if (this.isDefined(value) && value !== '') { if (typeof value === 'string' || typeof value === 'number' || typeof value === 'boolean') { value = value.toString(); if (modifier && modifier !== '*') { value = value.substring(0, parseInt(modifier, 10)); } result.push(this.encodeValue(operator, value, this.isKeyOperator(operator) ? key : null)); } else { if (modifier === '*') { if (Array.isArray(value)) { value.filter(this.isDefined).forEach(function (value) { result.push(this.encodeValue(operator, value, this.isKeyOperator(operator) ? key : null)); }, this); } else { Object.keys(value).forEach(function (k) { if (this.isDefined(value[k])) { result.push(this.encodeValue(operator, value[k], k)); } }, this); } } else { var tmp = []; if (Array.isArray(value)) { value.filter(this.isDefined).forEach(function (value) { tmp.push(this.encodeValue(operator, value)); }, this); } else { Object.keys(value).forEach(function (k) { if (this.isDefined(value[k])) { tmp.push(this.encodeUnreserved(k)); tmp.push(this.encodeValue(operator, value[k].toString())); } }, this); } if (this.isKeyOperator(operator)) { result.push(this.encodeUnreserved(key) + '=' + tmp.join(',')); } else if (tmp.length !== 0) { result.push(tmp.join(',')); } } } } else { if (operator === ';') { if (this.isDefined(value)) { result.push(this.encodeUnreserved(key)); } } else if (value === '' && (operator === '&' || operator === '?')) { result.push(this.encodeUnreserved(key) + '='); } else if (value === '') { result.push(''); } } return result; }; /** * @param {string} template * @return {function(Object):string} */ UrlTemplate.prototype.parse = function (template) { var that = this; var operators = ['+', '#', '.', '/', ';', '?', '&']; return { expand: function (context) { return template.replace(/\{([^\{\}]+)\}|([^\{\}]+)/g, function (_, expression, literal) { if (expression) { var operator = null, values = []; if (operators.indexOf(expression.charAt(0)) !== -1) { operator = expression.charAt(0); expression = expression.substr(1); } expression.split(/,/g).forEach(function (variable) { var tmp = /([^:\*]*)(?::(\d+)|(\*))?/.exec(variable); values.push.apply(values, that.getValues(context, operator, tmp[1], tmp[2] || tmp[3])); }); if (operator && operator !== '+') { var separator = ','; if (operator === '?') { separator = '&'; } else if (operator !== '#') { separator = operator; } return (values.length !== 0 ? operator : '') + values.join(separator); } else { return values.join(','); } } else { return that.encodeReserved(literal); } }); } }; }; return new UrlTemplate(); })); },{}],22:[function(require,module,exports){ (function(self) { 'use strict'; if (self.fetch) { return } var support = { searchParams: 'URLSearchParams' in self, iterable: 'Symbol' in self && 'iterator' in Symbol, blob: 'FileReader' in self && 'Blob' in self && (function() { try { new Blob() return true } catch(e) { return false } })(), formData: 'FormData' in self, arrayBuffer: 'ArrayBuffer' in self } if (support.arrayBuffer) { var viewClasses = [ '[object Int8Array]', '[object Uint8Array]', '[object Uint8ClampedArray]', '[object Int16Array]', '[object Uint16Array]', '[object Int32Array]', '[object Uint32Array]', '[object Float32Array]', '[object Float64Array]' ] var isDataView = function(obj) { return obj && DataView.prototype.isPrototypeOf(obj) } var isArrayBufferView = ArrayBuffer.isView || function(obj) { return obj && viewClasses.indexOf(Object.prototype.toString.call(obj)) > -1 } } function normalizeName(name) { if (typeof name !== 'string') { name = String(name) } if (/[^a-z0-9\-#$%&'*+.\^_`|~]/i.test(name)) { throw new TypeError('Invalid character in header field name') } return name.toLowerCase() } function normalizeValue(value) { if (typeof value !== 'string') { value = String(value) } return value } // Build a destructive iterator for the value list function iteratorFor(items) { var iterator = { next: function() { var value = items.shift() return {done: value === undefined, value: value} } } if (support.iterable) { iterator[Symbol.iterator] = function() { return iterator } } return iterator } function Headers(headers) { this.map = {} if (headers instanceof Headers) { headers.forEach(function(value, name) { this.append(name, value) }, this) } else if (headers) { Object.getOwnPropertyNames(headers).forEach(function(name) { this.append(name, headers[name]) }, this) } } Headers.prototype.append = function(name, value) { name = normalizeName(name) value = normalizeValue(value) var oldValue = this.map[name] this.map[name] = oldValue ? oldValue+','+value : value } Headers.prototype['delete'] = function(name) { delete this.map[normalizeName(name)] } Headers.prototype.get = function(name) { name = normalizeName(name) return this.has(name) ? this.map[name] : null } Headers.prototype.has = function(name) { return this.map.hasOwnProperty(normalizeName(name)) } Headers.prototype.set = function(name, value) { this.map[normalizeName(name)] = normalizeValue(value) } Headers.prototype.forEach = function(callback, thisArg) { for (var name in this.map) { if (this.map.hasOwnProperty(name)) { callback.call(thisArg, this.map[name], name, this) } } } Headers.prototype.keys = function() { var items = [] this.forEach(function(value, name) { items.push(name) }) return iteratorFor(items) } Headers.prototype.values = function() { var items = [] this.forEach(function(value) { items.push(value) }) return iteratorFor(items) } Headers.prototype.entries = function() { var items = [] this.forEach(function(value, name) { items.push([name, value]) }) return iteratorFor(items) } if (support.iterable) { Headers.prototype[Symbol.iterator] = Headers.prototype.entries } function consumed(body) { if (body.bodyUsed) { return Promise.reject(new TypeError('Already read')) } body.bodyUsed = true } function fileReaderReady(reader) { return new Promise(function(resolve, reject) { reader.onload = function() { resolve(reader.result) } reader.onerror = function() { reject(reader.error) } }) } function readBlobAsArrayBuffer(blob) { var reader = new FileReader() var promise = fileReaderReady(reader) reader.readAsArrayBuffer(blob) return promise } function readBlobAsText(blob) { var reader = new FileReader() var promise = fileReaderReady(reader) reader.readAsText(blob) return promise } function bufferClone(buf) { if (buf.slice) { return buf.slice(0) } else { var view = new Uint8Array(buf.byteLength) view.set(new Uint8Array(buf)) return view.buffer } } function Body() { this.bodyUsed = false this._initBody = function(body) { this._bodyInit = body if (!body) { this._bodyText = '' } else if (typeof body === 'string') { this._bodyText = body } else if (support.blob && Blob.prototype.isPrototypeOf(body)) { this._bodyBlob = body } else if (support.formData && FormData.prototype.isPrototypeOf(body)) { this._bodyFormData = body } else if (support.searchParams && URLSearchParams.prototype.isPrototypeOf(body)) { this._bodyText = body.toString() } else if (support.arrayBuffer && support.blob && isDataView(body)) { this._bodyArrayBuffer = bufferClone(body.buffer) // IE 10-11 can't handle a DataView body. this._bodyInit = new Blob([this._bodyArrayBuffer]) } else if (support.arrayBuffer && (ArrayBuffer.prototype.isPrototypeOf(body) || isArrayBufferView(body))) { this._bodyArrayBuffer = bufferClone(body) } else { throw new Error('unsupported BodyInit type') } if (!this.headers.get('content-type')) { if (typeof body === 'string') { this.headers.set('content-type', 'text/plain;charset=UTF-8') } else if (this._bodyBlob && this._bodyBlob.type) { this.headers.set('content-type', this._bodyBlob.type) } else if (support.searchParams && URLSearchParams.prototype.isPrototypeOf(body)) { this.headers.set('content-type', 'application/x-www-form-urlencoded;charset=UTF-8') } } } if (support.blob) { this.blob = function() { var rejected = consumed(this) if (rejected) { return rejected } if (this._bodyBlob) { return Promise.resolve(this._bodyBlob) } else if (this._bodyArrayBuffer) { return Promise.resolve(new Blob([this._bodyArrayBuffer])) } else if (this._bodyFormData) { throw new Error('could not read FormData body as blob') } else { return Promise.resolve(new Blob([this._bodyText])) } } } this.text = function() { var rejected = consumed(this) if (rejected) { return rejected } if (this._bodyBlob) { return readBlobAsText(this._bodyBlob) } else if (this._bodyArrayBuffer) { var view = new Uint8Array(this._bodyArrayBuffer) var str = String.fromCharCode.apply(null, view) return Promise.resolve(str) } else if (this._bodyFormData) { throw new Error('could not read FormData body as text') } else { return Promise.resolve(this._bodyText) } } if (support.arrayBuffer) { this.arrayBuffer = function() { if (this._bodyArrayBuffer) { return consumed(this) || Promise.resolve(this._bodyArrayBuffer) } else { return this.blob().then(readBlobAsArrayBuffer) } } } if (support.formData) { this.formData = function() { return this.text().then(decode) } } this.json = function() { return this.text().then(JSON.parse) } return this } // HTTP methods whose capitalization should be normalized var methods = ['DELETE', 'GET', 'HEAD', 'OPTIONS', 'POST', 'PUT'] function normalizeMethod(method) { var upcased = method.toUpperCase() return (methods.indexOf(upcased) > -1) ? upcased : method } function Request(input, options) { options = options || {} var body = options.body if (typeof input === 'string') { this.url = input } else { if (input.bodyUsed) { throw new TypeError('Already read') } this.url = input.url this.credentials = input.credentials if (!options.headers) { this.headers = new Headers(input.headers) } this.method = input.method this.mode = input.mode if (!body && input._bodyInit != null) { body = input._bodyInit input.bodyUsed = true } } this.credentials = options.credentials || this.credentials || 'omit' if (options.headers || !this.headers) { this.headers = new Headers(options.headers) } this.method = normalizeMethod(options.method || this.method || 'GET') this.mode = options.mode || this.mode || null this.referrer = null if ((this.method === 'GET' || this.method === 'HEAD') && body) { throw new TypeError('Body not allowed for GET or HEAD requests') } this._initBody(body) } Request.prototype.clone = function() { return new Request(this, { body: this._bodyInit }) } function decode(body) { var form = new FormData() body.trim().split('&').forEach(function(bytes) { if (bytes) { var split = bytes.split('=') var name = split.shift().replace(/\+/g, ' ') var value = split.join('=').replace(/\+/g, ' ') form.append(decodeURIComponent(name), decodeURIComponent(value)) } }) return form } function parseHeaders(rawHeaders) { var headers = new Headers() rawHeaders.split('\r\n').forEach(function(line) { var parts = line.split(':') var key = parts.shift().trim() if (key) { var value = parts.join(':').trim() headers.append(key, value) } }) return headers } Body.call(Request.prototype) function Response(bodyInit, options) { if (!options) { options = {} } this.type = 'default' this.status = 'status' in options ? options.status : 200 this.ok = this.status >= 200 && this.status < 300 this.statusText = 'statusText' in options ? options.statusText : 'OK' this.headers = new Headers(options.headers) this.url = options.url || '' this._initBody(bodyInit) } Body.call(Response.prototype) Response.prototype.clone = function() { return new Response(this._bodyInit, { status: this.status, statusText: this.statusText, headers: new Headers(this.headers), url: this.url }) } Response.error = function() { var response = new Response(null, {status: 0, statusText: ''}) response.type = 'error' return response } var redirectStatuses = [301, 302, 303, 307, 308] Response.redirect = function(url, status) { if (redirectStatuses.indexOf(status) === -1) { throw new RangeError('Invalid status code') } return new Response(null, {status: status, headers: {location: url}}) } self.Headers = Headers self.Request = Request self.Response = Response self.fetch = function(input, init) { return new Promise(function(resolve, reject) { var request = new Request(input, init) var xhr = new XMLHttpRequest() xhr.onload = function() { var options = { status: xhr.status, statusText: xhr.statusText, headers: parseHeaders(xhr.getAllResponseHeaders() || '') } options.url = 'responseURL' in xhr ? xhr.responseURL : options.headers.get('X-Request-URL') var body = 'response' in xhr ? xhr.response : xhr.responseText resolve(new Response(body, options)) } xhr.onerror = function() { reject(new TypeError('Network request failed')) } xhr.ontimeout = function() { reject(new TypeError('Network request failed')) } xhr.open(request.method, request.url, true) if (request.credentials === 'include') { xhr.withCredentials = true } if ('responseType' in xhr && support.blob) { xhr.responseType = 'blob' } request.headers.forEach(function(value, name) { xhr.setRequestHeader(name, value) }) xhr.send(typeof request._bodyInit === 'undefined' ? null : request._bodyInit) }) } self.fetch.polyfill = true })(typeof self !== 'undefined' ? self : this); },{}]},{},[12])(12) }); //# sourceMappingURL=data:application/json;charset=utf-8;base64,{"version":3,"sources":["node_modules/browser-pack/_prelude.js","lib/auth/basic.js","lib/auth/index.js","lib/auth/session.js","lib/auth/token.js","lib/client.js","lib/codecs/corejson.js","lib/codecs/index.js","lib/codecs/json.js","lib/codecs/text.js","lib/document.js","lib/errors.js","lib/index.js","lib/transports/http.js","lib/transports/index.js","lib/utils.js","node_modules/isomorphic-fetch/fetch-npm-browserify.js","node_modules/querystringify/index.js","node_modules/requires-port/index.js","node_modules/url-parse/index.js","node_modules/url-parse/lolcation.js","node_modules/url-template/lib/url-template.js","node_modules/whatwg-fetch/fetch.js"],"names":[],"mappings":"AAAA;;;;;;;ICAM,mB;AACJ,iCAA2B;AAAA,QAAd,OAAc,uEAAJ,EAAI;;AAAA;;AACzB,QAAM,WAAW,QAAQ,QAAzB;AACA,QAAM,WAAW,QAAQ,QAAzB;AACA,QAAM,OAAO,OAAO,IAAP,CAAY,WAAW,GAAX,GAAiB,QAA7B,CAAb;AACA,SAAK,IAAL,GAAY,WAAW,IAAvB;AACD;;;;iCAEa,O,EAAS;AACrB,cAAQ,OAAR,CAAgB,eAAhB,IAAmC,KAAK,IAAxC;AACA,aAAO,OAAP;AACD;;;;;;AAGH,OAAO,OAAP,GAAiB;AACf,uBAAqB;AADN,CAAjB;;;;;ACdA,IAAM,QAAQ,QAAQ,SAAR,CAAd;AACA,IAAM,UAAU,QAAQ,WAAR,CAAhB;AACA,IAAM,QAAQ,QAAQ,SAAR,CAAd;;AAEA,OAAO,OAAP,GAAiB;AACf,uBAAqB,MAAM,mBADZ;AAEf,yBAAuB,QAAQ,qBAFhB;AAGf,uBAAqB,MAAM;AAHZ,CAAjB;;;;;;;;;ACJA,IAAM,QAAQ,QAAQ,UAAR,CAAd;;AAEA,SAAS,IAAT,CAAe,GAAf,EAAoB;AAClB,SAAO,IAAI,OAAJ,CAAY,QAAZ,EAAsB,EAAtB,EAA0B,OAA1B,CAAkC,QAAlC,EAA4C,EAA5C,CAAP;AACD;;AAED,SAAS,SAAT,CAAoB,UAApB,EAAgC,YAAhC,EAA8C;AAC5C,iBAAe,gBAAgB,OAAO,QAAP,CAAgB,MAA/C;AACA,MAAI,gBAAgB,iBAAiB,EAArC,EAAyC;AACvC,QAAM,UAAU,aAAa,KAAb,CAAmB,GAAnB,CAAhB;AACA,SAAK,IAAI,IAAI,CAAb,EAAgB,IAAI,QAAQ,MAA5B,EAAoC,GAApC,EAAyC;AACvC,UAAM,SAAS,KAAK,QAAQ,CAAR,CAAL,CAAf;AACA;AACA,UAAI,OAAO,SAAP,CAAiB,CAAjB,EAAoB,WAAW,MAAX,GAAoB,CAAxC,MAAgD,aAAa,GAAjE,EAAuE;AACrE,eAAO,mBAAmB,OAAO,SAAP,CAAiB,WAAW,MAAX,GAAoB,CAArC,CAAnB,CAAP;AACD;AACF;AACF;AACD,SAAO,IAAP;AACD;;IAEK,qB;AACJ,mCAA2B;AAAA,QAAd,OAAc,uEAAJ,EAAI;;AAAA;;AACzB,SAAK,SAAL,GAAiB,UAAU,QAAQ,cAAlB,EAAkC,QAAQ,YAA1C,CAAjB;AACA,SAAK,cAAL,GAAsB,QAAQ,cAA9B;AACD;;;;iCAEa,O,EAAS;AACrB,cAAQ,WAAR,GAAsB,aAAtB;AACA,UAAI,KAAK,SAAL,IAAkB,CAAC,MAAM,cAAN,CAAqB,QAAQ,MAA7B,CAAvB,EAA6D;AAC3D,gBAAQ,OAAR,CAAgB,KAAK,cAArB,IAAuC,KAAK,SAA5C;AACD;AACD,aAAO,OAAP;AACD;;;;;;AAGH,OAAO,OAAP,GAAiB;AACf,yBAAuB;AADR,CAAjB;;;;;;;;;ICpCM,mB;AACJ,iCAA2B;AAAA,QAAd,OAAc,uEAAJ,EAAI;;AAAA;;AACzB,SAAK,KAAL,GAAa,QAAQ,KAArB;AACA,SAAK,MAAL,GAAc,QAAQ,MAAR,IAAkB,QAAhC;AACD;;;;iCAEa,O,EAAS;AACrB,cAAQ,OAAR,CAAgB,eAAhB,IAAmC,KAAK,MAAL,GAAc,GAAd,GAAoB,KAAK,KAA5D;AACA,aAAO,OAAP;AACD;;;;;;AAGH,OAAO,OAAP,GAAiB;AACf,uBAAqB;AADN,CAAjB;;;;;;;;;ACZA,IAAM,WAAW,QAAQ,YAAR,CAAjB;AACA,IAAM,SAAS,QAAQ,UAAR,CAAf;AACA,IAAM,SAAS,QAAQ,UAAR,CAAf;AACA,IAAM,aAAa,QAAQ,cAAR,CAAnB;AACA,IAAM,QAAQ,QAAQ,SAAR,CAAd;;AAEA,SAAS,UAAT,CAAqB,IAArB,EAA2B,IAA3B,EAAiC;AAAA;AAAA;AAAA;;AAAA;AAC/B,yBAAgB,IAAhB,8HAAsB;AAAA,UAAb,GAAa;;AACpB,UAAI,gBAAgB,SAAS,QAA7B,EAAuC;AACrC,eAAO,KAAK,OAAL,CAAa,GAAb,CAAP;AACD,OAFD,MAEO;AACL,eAAO,KAAK,GAAL,CAAP;AACD;AACD,UAAI,SAAS,SAAb,EAAwB;AACtB,cAAM,IAAI,OAAO,eAAX,2BAAmD,KAAK,SAAL,CAAe,IAAf,CAAnD,CAAN;AACD;AACF;AAV8B;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;;AAW/B,MAAI,EAAE,gBAAgB,SAAS,IAA3B,CAAJ,EAAsC;AACpC,UAAM,IAAI,OAAO,eAAX,2BAAmD,KAAK,SAAL,CAAe,IAAf,CAAnD,CAAN;AACD;AACD,SAAO,IAAP;AACD;;IAEK,M;AACJ,oBAA2B;AAAA,QAAd,OAAc,uEAAJ,EAAI;;AAAA;;AACzB,QAAM,mBAAmB;AACvB,YAAM,QAAQ,IAAR,IAAgB,IADC;AAEvB,eAAS,QAAQ,OAAR,IAAmB,EAFL;AAGvB,uBAAiB,QAAQ,eAHF;AAIvB,wBAAkB,QAAQ;AAJH,KAAzB;;AAOA,SAAK,QAAL,GAAgB,QAAQ,QAAR,IAAoB,CAAC,IAAI,OAAO,aAAX,EAAD,EAA6B,IAAI,OAAO,SAAX,EAA7B,EAAqD,IAAI,OAAO,SAAX,EAArD,CAApC;AACA,SAAK,UAAL,GAAkB,QAAQ,UAAR,IAAsB,CAAC,IAAI,WAAW,aAAf,CAA6B,gBAA7B,CAAD,CAAxC;AACD;;;;2BAEO,Q,EAAU,I,EAAmB;AAAA,UAAb,MAAa,uEAAJ,EAAI;;AACnC,UAAM,OAAO,WAAW,QAAX,EAAqB,IAArB,CAAb;AACA,UAAM,YAAY,MAAM,kBAAN,CAAyB,KAAK,UAA9B,EAA0C,KAAK,GAA/C,CAAlB;AACA,aAAO,UAAU,MAAV,CAAiB,IAAjB,EAAuB,KAAK,QAA5B,EAAsC,MAAtC,CAAP;AACD;;;wBAEI,G,EAAK;AACR,UAAM,OAAO,IAAI,SAAS,IAAb,CAAkB,GAAlB,EAAuB,KAAvB,CAAb;AACA,UAAM,YAAY,MAAM,kBAAN,CAAyB,KAAK,UAA9B,EAA0C,GAA1C,CAAlB;AACA,aAAO,UAAU,MAAV,CAAiB,IAAjB,EAAuB,KAAK,QAA5B,CAAP;AACD;;;;;;AAGH,OAAO,OAAP,GAAiB;AACf,UAAQ;AADO,CAAjB;;;;;;;;;;;ACjDA,IAAM,WAAW,QAAQ,aAAR,CAAjB;AACA,IAAM,MAAM,QAAQ,WAAR,CAAZ;;AAEA,SAAS,WAAT,CAAsB,GAAtB,EAA2B;AACzB,MAAI,IAAI,KAAJ,CAAU,gBAAV,CAAJ,EAAiC;AAC/B,WAAO,IAAI,SAAJ,CAAc,CAAd,CAAP;AACD;AACD,SAAO,GAAP;AACD;;AAED,SAAS,SAAT,CAAoB,GAApB,EAAyB,GAAzB,EAA8B;AAC5B,MAAM,QAAQ,IAAI,GAAJ,CAAd;AACA,MAAI,OAAQ,KAAR,KAAmB,QAAvB,EAAiC;AAC/B,WAAO,KAAP;AACD;AACD,SAAO,EAAP;AACD;;AAED,SAAS,UAAT,CAAqB,GAArB,EAA0B,GAA1B,EAA+B;AAC7B,MAAM,QAAQ,IAAI,GAAJ,CAAd;AACA,MAAI,OAAQ,KAAR,KAAmB,SAAvB,EAAkC;AAChC,WAAO,KAAP;AACD;AACD,SAAO,KAAP;AACD;;AAED,SAAS,SAAT,CAAoB,GAApB,EAAyB,GAAzB,EAA8B;AAC5B,MAAM,QAAQ,IAAI,GAAJ,CAAd;AACA,MAAI,QAAQ,KAAR,yCAAQ,KAAR,OAAmB,QAAvB,EAAiC;AAC/B,WAAO,KAAP;AACD;AACD,SAAO,EAAP;AACD;;AAED,SAAS,QAAT,CAAmB,GAAnB,EAAwB,GAAxB,EAA6B;AAC3B,MAAM,QAAQ,IAAI,GAAJ,CAAd;AACA,MAAI,iBAAiB,KAArB,EAA4B;AAC1B,WAAO,KAAP;AACD;AACD,SAAO,EAAP;AACD;;AAED,SAAS,UAAT,CAAqB,IAArB,EAA2B,OAA3B,EAAoC;AAClC,MAAM,WAAW,CAAC,OAAD,EAAU,OAAV,CAAjB;AACA,MAAI,UAAU,EAAd;AACA,OAAK,IAAI,QAAT,IAAqB,IAArB,EAA2B;AACzB,QAAI,KAAK,cAAL,CAAoB,QAApB,KAAiC,CAAC,SAAS,QAAT,CAAkB,QAAlB,CAAtC,EAAmE;AACjE,UAAM,MAAM,YAAY,QAAZ,CAAZ;AACA,UAAM,QAAQ,gBAAgB,KAAK,QAAL,CAAhB,EAAgC,OAAhC,CAAd;AACA,cAAQ,GAAR,IAAe,KAAf;AACD;AACF;AACD,SAAO,OAAP;AACD;;AAED,SAAS,eAAT,CAA0B,IAA1B,EAAgC,OAAhC,EAAyC;AACvC,MAAM,WAAW,gBAAgB,MAAhB,IAA0B,EAAE,gBAAgB,KAAlB,CAA3C;;AAEA,MAAI,YAAY,KAAK,KAAL,KAAe,UAA/B,EAA2C;AACzC;AACA,QAAM,OAAO,UAAU,IAAV,EAAgB,OAAhB,CAAb;AACA,QAAM,cAAc,UAAU,IAAV,EAAgB,KAAhB,CAApB;AACA,QAAM,MAAM,cAAc,IAAI,WAAJ,EAAiB,OAAjB,EAA0B,QAA1B,EAAd,GAAqD,EAAjE;AACA,QAAM,QAAQ,UAAU,IAAV,EAAgB,OAAhB,CAAd;AACA,QAAM,cAAc,UAAU,IAAV,EAAgB,aAAhB,CAApB;AACA,QAAM,UAAU,WAAW,IAAX,EAAiB,GAAjB,CAAhB;AACA,WAAO,IAAI,SAAS,QAAb,CAAsB,GAAtB,EAA2B,KAA3B,EAAkC,WAAlC,EAA+C,OAA/C,CAAP;AACD,GATD,MASO,IAAI,YAAY,KAAK,KAAL,KAAe,MAA/B,EAAuC;AAC5C;AACA,QAAM,eAAc,UAAU,IAAV,EAAgB,KAAhB,CAApB;AACA,QAAM,OAAM,eAAc,IAAI,YAAJ,EAAiB,OAAjB,EAA0B,QAA1B,EAAd,GAAqD,EAAjE;AACA,QAAM,SAAS,UAAU,IAAV,EAAgB,QAAhB,KAA6B,KAA5C;AACA,QAAM,SAAQ,UAAU,IAAV,EAAgB,OAAhB,CAAd;AACA,QAAM,eAAc,UAAU,IAAV,EAAgB,aAAhB,CAApB;AACA,QAAM,aAAa,SAAS,IAAT,EAAe,QAAf,CAAnB;AACA,QAAI,SAAS,EAAb;AACA,SAAK,IAAI,MAAM,CAAV,EAAa,MAAM,WAAW,MAAnC,EAA2C,MAAM,GAAjD,EAAsD,KAAtD,EAA6D;AAC3D,UAAI,QAAQ,WAAW,GAAX,CAAZ;AACA,UAAI,OAAO,UAAU,KAAV,EAAiB,MAAjB,CAAX;AACA,UAAI,WAAW,WAAW,KAAX,EAAkB,UAAlB,CAAf;AACA,UAAI,WAAW,UAAU,KAAV,EAAiB,UAAjB,CAAf;AACA,UAAI,mBAAmB,UAAU,KAAV,EAAiB,kBAAjB,CAAvB;AACA,UAAI,QAAQ,IAAI,SAAS,KAAb,CAAmB,IAAnB,EAAyB,QAAzB,EAAmC,QAAnC,EAA6C,gBAA7C,CAAZ;AACA,aAAO,IAAP,CAAY,KAAZ;AACD;AACD,WAAO,IAAI,SAAS,IAAb,CAAkB,IAAlB,EAAuB,MAAvB,EAA+B,kBAA/B,EAAmD,MAAnD,EAA2D,MAA3D,EAAkE,YAAlE,CAAP;AACD,GAnBM,MAmBA,IAAI,QAAJ,EAAc;AACnB;AACA,QAAI,WAAU,EAAd;AACA,SAAK,IAAI,GAAT,IAAgB,IAAhB,EAAsB;AACpB,UAAI,KAAK,cAAL,CAAoB,GAApB,CAAJ,EAA8B;AAC5B,iBAAQ,GAAR,IAAe,gBAAgB,KAAK,GAAL,CAAhB,EAA2B,OAA3B,CAAf;AACD;AACF;AACD,WAAO,QAAP;AACD,GATM,MASA,IAAI,gBAAgB,KAApB,EAA2B;AAChC;AACA,QAAI,YAAU,EAAd;AACA,SAAK,IAAI,OAAM,CAAV,EAAa,OAAM,KAAK,MAA7B,EAAqC,OAAM,IAA3C,EAAgD,MAAhD,EAAuD;AACrD,gBAAQ,IAAR,CAAa,gBAAgB,KAAK,IAAL,CAAhB,EAA2B,OAA3B,CAAb;AACD;AACD,WAAO,SAAP;AACD;AACD;AACA,SAAO,IAAP;AACD;;IAEK,a;AACJ,2BAAe;AAAA;;AACb,SAAK,SAAL,GAAiB,0BAAjB;AACD;;;;2BAEO,I,EAAoB;AAAA,UAAd,OAAc,uEAAJ,EAAI;;AAC1B,UAAI,OAAO,IAAX;AACA,UAAI,QAAQ,SAAR,KAAsB,SAAtB,IAAmC,CAAC,QAAQ,SAAhD,EAA2D;AACzD,eAAO,KAAK,KAAL,CAAW,IAAX,CAAP;AACD;AACD,aAAO,gBAAgB,IAAhB,EAAsB,QAAQ,GAA9B,CAAP;AACD;;;;;;AAGH,OAAO,OAAP,GAAiB;AACf,iBAAe;AADA,CAAjB;;;;;ACzHA,IAAM,WAAW,QAAQ,YAAR,CAAjB;AACA,IAAM,OAAO,QAAQ,QAAR,CAAb;AACA,IAAM,OAAO,QAAQ,QAAR,CAAb;;AAEA,OAAO,OAAP,GAAiB;AACf,iBAAe,SAAS,aADT;AAEf,aAAW,KAAK,SAFD;AAGf,aAAW,KAAK;AAHD,CAAjB;;;;;;;;;ICJM,S;AACJ,uBAAe;AAAA;;AACb,SAAK,SAAL,GAAiB,kBAAjB;AACD;;;;2BAEO,I,EAAoB;AAAA,UAAd,OAAc,uEAAJ,EAAI;;AAC1B,aAAO,KAAK,KAAL,CAAW,IAAX,CAAP;AACD;;;;;;AAGH,OAAO,OAAP,GAAiB;AACf,aAAW;AADI,CAAjB;;;;;;;;;ICVM,S;AACJ,uBAAe;AAAA;;AACb,SAAK,SAAL,GAAiB,QAAjB;AACD;;;;2BAEO,I,EAAoB;AAAA,UAAd,OAAc,uEAAJ,EAAI;;AAC1B,aAAO,IAAP;AACD;;;;;;AAGH,OAAO,OAAP,GAAiB;AACf,aAAW;AADI,CAAjB;;;;;;;ICVM,Q,GACJ,oBAAmE;AAAA,MAAtD,GAAsD,uEAAhD,EAAgD;AAAA,MAA5C,KAA4C,uEAApC,EAAoC;AAAA,MAAhC,WAAgC,uEAAlB,EAAkB;AAAA,MAAd,OAAc,uEAAJ,EAAI;;AAAA;;AACjE,OAAK,GAAL,GAAW,GAAX;AACA,OAAK,KAAL,GAAa,KAAb;AACA,OAAK,WAAL,GAAmB,WAAnB;AACA,OAAK,OAAL,GAAe,OAAf;AACD,C;;IAGG,I,GACJ,cAAa,GAAb,EAAkB,MAAlB,EAAoG;AAAA,MAA1E,QAA0E,uEAA/D,kBAA+D;AAAA,MAA3C,MAA2C,uEAAlC,EAAkC;AAAA,MAA9B,KAA8B,uEAAtB,EAAsB;AAAA,MAAlB,WAAkB,uEAAJ,EAAI;;AAAA;;AAClG,MAAI,QAAQ,SAAZ,EAAuB;AACrB,UAAM,IAAI,KAAJ,CAAU,0BAAV,CAAN;AACD;;AAED,MAAI,WAAW,SAAf,EAA0B;AACxB,UAAM,IAAI,KAAJ,CAAU,6BAAV,CAAN;AACD;;AAED,OAAK,GAAL,GAAW,GAAX;AACA,OAAK,MAAL,GAAc,MAAd;AACA,OAAK,QAAL,GAAgB,QAAhB;AACA,OAAK,MAAL,GAAc,MAAd;AACA,OAAK,KAAL,GAAa,KAAb;AACA,OAAK,WAAL,GAAmB,WAAnB;AACD,C;;IAGG,K,GACJ,eAAa,IAAb,EAAsE;AAAA,MAAnD,QAAmD,uEAAxC,KAAwC;AAAA,MAAjC,QAAiC,uEAAtB,EAAsB;AAAA,MAAlB,WAAkB,uEAAJ,EAAI;;AAAA;;AACpE,MAAI,SAAS,SAAb,EAAwB;AACtB,UAAM,IAAI,KAAJ,CAAU,2BAAV,CAAN;AACD;;AAED,OAAK,IAAL,GAAY,IAAZ;AACA,OAAK,QAAL,GAAgB,QAAhB;AACA,OAAK,QAAL,GAAgB,QAAhB;AACA,OAAK,WAAL,GAAmB,WAAnB;AACD,C;;AAGH,OAAO,OAAP,GAAiB;AACf,YAAU,QADK;AAEf,QAAM,IAFS;AAGf,SAAO;AAHQ,CAAjB;;;;;;;;;;;ICzCM,c;;;AACJ,0BAAa,OAAb,EAAsB;AAAA;;AAAA,gIACd,OADc;;AAEpB,UAAK,OAAL,GAAe,OAAf;AACA,UAAK,IAAL,GAAY,gBAAZ;AAHoB;AAIrB;;;EAL0B,K;;IAQvB,e;;;AACJ,2BAAa,OAAb,EAAsB;AAAA;;AAAA,mIACd,OADc;;AAEpB,WAAK,OAAL,GAAe,OAAf;AACA,WAAK,IAAL,GAAY,iBAAZ;AAHoB;AAIrB;;;EAL2B,K;;IAQxB,Y;;;AACJ,wBAAa,OAAb,EAAsB,OAAtB,EAA+B;AAAA;;AAAA,6HACvB,OADuB;;AAE7B,WAAK,OAAL,GAAe,OAAf;AACA,WAAK,OAAL,GAAe,OAAf;AACA,WAAK,IAAL,GAAY,cAAZ;AAJ6B;AAK9B;;;EANwB,K;;AAS3B,OAAO,OAAP,GAAiB;AACf,kBAAgB,cADD;AAEf,mBAAiB,eAFF;AAGf,gBAAc;AAHC,CAAjB;;;;;ACzBA,IAAM,OAAO,QAAQ,QAAR,CAAb;AACA,IAAM,SAAS,QAAQ,UAAR,CAAf;AACA,IAAM,SAAS,QAAQ,UAAR,CAAf;AACA,IAAM,WAAW,QAAQ,YAAR,CAAjB;AACA,IAAM,SAAS,QAAQ,UAAR,CAAf;AACA,IAAM,aAAa,QAAQ,cAAR,CAAnB;AACA,IAAM,QAAQ,QAAQ,SAAR,CAAd;;AAEA,IAAM,UAAU;AACd,UAAQ,OAAO,MADD;AAEd,YAAU,SAAS,QAFL;AAGd,QAAM,SAAS,IAHD;AAId,QAAM,IAJQ;AAKd,UAAQ,MALM;AAMd,UAAQ,MANM;AAOd,cAAY,UAPE;AAQd,SAAO;AARO,CAAhB;;AAWA,OAAO,OAAP,GAAiB,OAAjB;;;;;;;;;ACnBA,IAAM,QAAQ,QAAQ,kBAAR,CAAd;AACA,IAAM,SAAS,QAAQ,WAAR,CAAf;AACA,IAAM,QAAQ,QAAQ,UAAR,CAAd;AACA,IAAM,MAAM,QAAQ,WAAR,CAAZ;AACA,IAAM,cAAc,QAAQ,cAAR,CAApB;;AAEA,IAAM,gBAAgB,SAAhB,aAAgB,CAAC,QAAD,EAAW,QAAX,EAAqB,gBAArB,EAA0C;AAC9D,SAAO,SAAS,IAAT,GAAgB,IAAhB,CAAqB,gBAAQ;AAClC,QAAI,gBAAJ,EAAsB;AACpB,uBAAiB,QAAjB,EAA2B,IAA3B;AACD;AACD,QAAM,cAAc,SAAS,OAAT,CAAiB,GAAjB,CAAqB,cAArB,CAApB;AACA,QAAM,UAAU,MAAM,gBAAN,CAAuB,QAAvB,EAAiC,WAAjC,CAAhB;AACA,QAAM,UAAU,EAAC,KAAK,SAAS,GAAf,EAAhB;AACA,WAAO,QAAQ,MAAR,CAAe,IAAf,EAAqB,OAArB,CAAP;AACD,GARM,CAAP;AASD,CAVD;;IAYM,a;AACJ,2BAA2B;AAAA,QAAd,OAAc,uEAAJ,EAAI;;AAAA;;AACzB,SAAK,OAAL,GAAe,CAAC,MAAD,EAAS,OAAT,CAAf;AACA,SAAK,IAAL,GAAY,QAAQ,IAAR,IAAgB,IAA5B;AACA,SAAK,OAAL,GAAe,QAAQ,OAAR,IAAmB,EAAlC;AACA,SAAK,KAAL,GAAa,QAAQ,KAAR,IAAiB,KAA9B;AACA,SAAK,QAAL,GAAgB,QAAQ,QAAR,IAAoB,OAAO,QAA3C;AACA,SAAK,eAAL,GAAuB,QAAQ,eAA/B;AACA,SAAK,gBAAL,GAAwB,QAAQ,gBAAhC;AACD;;;;iCAEa,I,EAAM,Q,EAAuB;AAAA,UAAb,MAAa,uEAAJ,EAAI;;AACzC,UAAM,SAAS,KAAK,MAApB;AACA,UAAM,SAAS,KAAK,MAAL,CAAY,WAAZ,EAAf;AACA,UAAI,cAAc,EAAlB;AACA,UAAI,aAAa,EAAjB;AACA,UAAI,aAAa,EAAjB;AACA,UAAI,aAAa,EAAjB;AACA,UAAI,UAAU,KAAd;;AAEA,WAAK,IAAI,MAAM,CAAV,EAAa,MAAM,OAAO,MAA/B,EAAuC,MAAM,GAA7C,EAAkD,KAAlD,EAAyD;AACvD,YAAM,QAAQ,OAAO,GAAP,CAAd;;AAEA;AACA,YAAI,CAAC,OAAO,cAAP,CAAsB,MAAM,IAA5B,CAAL,EAAwC;AACtC,cAAI,MAAM,QAAV,EAAoB;AAClB,kBAAM,IAAI,OAAO,cAAX,+BAAsD,MAAM,IAA5D,OAAN;AACD,WAFD,MAEO;AACL;AACD;AACF;;AAED,mBAAW,IAAX,CAAgB,MAAM,IAAtB;AACA,YAAI,MAAM,QAAN,KAAmB,OAAvB,EAAgC;AAC9B,sBAAY,MAAM,IAAlB,IAA0B,OAAO,MAAM,IAAb,CAA1B;AACD,SAFD,MAEO,IAAI,MAAM,QAAN,KAAmB,MAAvB,EAA+B;AACpC,qBAAW,MAAM,IAAjB,IAAyB,OAAO,MAAM,IAAb,CAAzB;AACD,SAFM,MAEA,IAAI,MAAM,QAAN,KAAmB,MAAvB,EAA+B;AACpC,qBAAW,MAAM,IAAjB,IAAyB,OAAO,MAAM,IAAb,CAAzB;AACA,oBAAU,IAAV;AACD,SAHM,MAGA,IAAI,MAAM,QAAN,KAAmB,MAAvB,EAA+B;AACpC,uBAAa,OAAO,MAAM,IAAb,CAAb;AACA,oBAAU,IAAV;AACD;AACF;;AAED;AACA,WAAK,IAAI,QAAT,IAAqB,MAArB,EAA6B;AAC3B,YAAI,OAAO,cAAP,CAAsB,QAAtB,KAAmC,CAAC,WAAW,QAAX,CAAoB,QAApB,CAAxC,EAAuE;AACrE,gBAAM,IAAI,OAAO,cAAX,0BAAiD,QAAjD,OAAN;AACD;AACF;;AAED,UAAI,iBAAiB,EAAC,QAAQ,MAAT,EAAiB,SAAS,EAA1B,EAArB;;AAEA,aAAO,MAAP,CAAc,eAAe,OAA7B,EAAsC,KAAK,OAA3C;;AAEA,UAAI,OAAJ,EAAa;AACX,YAAI,KAAK,QAAL,KAAkB,kBAAtB,EAA0C;AACxC,yBAAe,IAAf,GAAsB,KAAK,SAAL,CAAe,UAAf,CAAtB;AACA,yBAAe,OAAf,CAAuB,cAAvB,IAAyC,kBAAzC;AACD,SAHD,MAGO,IAAI,KAAK,QAAL,KAAkB,qBAAtB,EAA6C;AAClD,cAAI,OAAO,IAAI,KAAK,QAAT,EAAX;;AAEA,eAAK,IAAI,QAAT,IAAqB,UAArB,EAAiC;AAC/B,iBAAK,MAAL,CAAY,QAAZ,EAAsB,WAAW,QAAX,CAAtB;AACD;AACD,yBAAe,IAAf,GAAsB,IAAtB;AACD,SAPM,MAOA,IAAI,KAAK,QAAL,KAAkB,mCAAtB,EAA2D;AAChE,cAAI,WAAW,EAAf;AACA,eAAK,IAAI,SAAT,IAAqB,UAArB,EAAiC;AAC/B,gBAAM,aAAa,mBAAmB,SAAnB,CAAnB;AACA,gBAAM,eAAe,mBAAmB,WAAW,SAAX,CAAnB,CAArB;AACA,qBAAS,IAAT,CAAc,aAAa,GAAb,GAAmB,YAAjC;AACD;AACD,qBAAW,SAAS,IAAT,CAAc,GAAd,CAAX;;AAEA,yBAAe,IAAf,GAAsB,QAAtB;AACA,yBAAe,OAAf,CAAuB,cAAvB,IAAyC,mCAAzC;AACD;AACF;;AAED,UAAI,KAAK,IAAT,EAAe;AACb,yBAAiB,KAAK,IAAL,CAAU,YAAV,CAAuB,cAAvB,CAAjB;AACD;;AAED,UAAI,YAAY,YAAY,KAAZ,CAAkB,KAAK,GAAvB,CAAhB;AACA,kBAAY,UAAU,MAAV,CAAiB,UAAjB,CAAZ;AACA,kBAAY,IAAI,GAAJ,CAAQ,SAAR,CAAZ;AACA,gBAAU,GAAV,CAAc,OAAd,EAAuB,WAAvB;;AAEA,aAAO;AACL,aAAK,UAAU,QAAV,EADA;AAEL,iBAAS;AAFJ,OAAP;AAID;;;2BAEO,I,EAAM,Q,EAAuB;AAAA,UAAb,MAAa,uEAAJ,EAAI;;AACnC,UAAM,mBAAmB,KAAK,gBAA9B;AACA,UAAM,UAAU,KAAK,YAAL,CAAkB,IAAlB,EAAwB,QAAxB,EAAkC,MAAlC,CAAhB;;AAEA,UAAI,KAAK,eAAT,EAA0B;AACxB,aAAK,eAAL,CAAqB,OAArB;AACD;;AAED,aAAO,KAAK,KAAL,CAAW,QAAQ,GAAnB,EAAwB,QAAQ,OAAhC,EACJ,IADI,CACC,UAAU,QAAV,EAAoB;AACxB,YAAI,SAAS,MAAT,KAAoB,GAAxB,EAA6B;AAC3B;AACD;AACD,eAAO,cAAc,QAAd,EAAwB,QAAxB,EAAkC,gBAAlC,EACJ,IADI,CACC,UAAU,IAAV,EAAgB;AACpB,cAAI,SAAS,EAAb,EAAiB;AACf,mBAAO,IAAP;AACD,WAFD,MAEO;AACL,gBAAM,QAAQ,SAAS,MAAT,GAAkB,GAAlB,GAAwB,SAAS,UAA/C;AACA,gBAAM,QAAQ,IAAI,OAAO,YAAX,CAAwB,KAAxB,EAA+B,IAA/B,CAAd;AACA,mBAAO,QAAQ,MAAR,CAAe,KAAf,CAAP;AACD;AACF,SATI,CAAP;AAUD,OAfI,CAAP;AAgBD;;;;;;AAGH,OAAO,OAAP,GAAiB;AACf,iBAAe;AADA,CAAjB;;;;;AC9IA,IAAM,OAAO,QAAQ,QAAR,CAAb;;AAEA,OAAO,OAAP,GAAiB;AACf,iBAAe,KAAK;AADL,CAAjB;;;;;ACFA,IAAM,MAAM,QAAQ,WAAR,CAAZ;;AAEA,IAAM,qBAAqB,SAArB,kBAAqB,CAAU,UAAV,EAAsB,GAAtB,EAA2B;AACpD,MAAM,YAAY,IAAI,GAAJ,CAAQ,GAAR,CAAlB;AACA,MAAM,SAAS,UAAU,QAAV,CAAmB,OAAnB,CAA2B,GAA3B,EAAgC,EAAhC,CAAf;;AAFoD;AAAA;AAAA;;AAAA;AAIpD,yBAAsB,UAAtB,8HAAkC;AAAA,UAAzB,SAAyB;;AAChC,UAAI,UAAU,OAAV,CAAkB,QAAlB,CAA2B,MAA3B,CAAJ,EAAwC;AACtC,eAAO,SAAP;AACD;AACF;AARmD;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;;AAUpD,QAAM,sCAAoC,GAApC,CAAN;AACD,CAXD;;AAaA,IAAM,mBAAmB,SAAnB,gBAAmB,CAAU,QAAV,EAAoB,WAApB,EAAiC;AACxD,MAAI,gBAAgB,SAAhB,IAA6B,gBAAgB,IAAjD,EAAuD;AACrD,WAAO,SAAS,CAAT,CAAP;AACD;;AAED,MAAM,WAAW,YAAY,WAAZ,GAA0B,KAA1B,CAAgC,GAAhC,EAAqC,CAArC,EAAwC,IAAxC,EAAjB;AACA,MAAM,WAAW,SAAS,KAAT,CAAe,GAAf,EAAoB,CAApB,IAAyB,IAA1C;AACA,MAAM,eAAe,KAArB;AACA,MAAM,kBAAkB,CAAC,QAAD,EAAW,QAAX,EAAqB,YAArB,CAAxB;;AARwD;AAAA;AAAA;;AAAA;AAUxD,0BAAoB,QAApB,mIAA8B;AAAA,UAArB,OAAqB;;AAC5B,UAAI,gBAAgB,QAAhB,CAAyB,QAAQ,SAAjC,CAAJ,EAAiD;AAC/C,eAAO,OAAP;AACD;AACF;AAduD;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;;AAgBxD,QAAM,qDAAmD,WAAnD,CAAN;AACD,CAjBD;;AAmBA,IAAM,iBAAiB,SAAjB,cAAiB,CAAU,MAAV,EAAkB;AACvC;AACA,SAAQ,8BAA6B,IAA7B,CAAkC,MAAlC;AAAR;AACD,CAHD;;AAKA,OAAO,OAAP,GAAiB;AACf,sBAAoB,kBADL;AAEf,oBAAkB,gBAFH;AAGf,kBAAgB;AAHD,CAAjB;;;ACvCA;AACA;AACA;AACA;AACA;AACA;AACA;;ACNA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;;AC7DA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;;ACtCA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;;;ACrWA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;;;;ACrDA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;;AChMA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA","file":"generated.js","sourceRoot":"","sourcesContent":["(function e(t,n,r){function s(o,u){if(!n[o]){if(!t[o]){var a=typeof require==\"function\"&&require;if(!u&&a)return a(o,!0);if(i)return i(o,!0);var f=new Error(\"Cannot find module '\"+o+\"'\");throw f.code=\"MODULE_NOT_FOUND\",f}var l=n[o]={exports:{}};t[o][0].call(l.exports,function(e){var n=t[o][1][e];return s(n?n:e)},l,l.exports,e,t,n,r)}return n[o].exports}var i=typeof require==\"function\"&&require;for(var o=0;o<r.length;o++)s(r[o]);return s})","class BasicAuthentication {\n  constructor (options = {}) {\n    const username = options.username\n    const password = options.password\n    const hash = window.btoa(username + ':' + password)\n    this.auth = 'Basic ' + hash\n  }\n\n  authenticate (options) {\n    options.headers['Authorization'] = this.auth\n    return options\n  }\n}\n\nmodule.exports = {\n  BasicAuthentication: BasicAuthentication\n}\n","const basic = require('./basic')\nconst session = require('./session')\nconst token = require('./token')\n\nmodule.exports = {\n  BasicAuthentication: basic.BasicAuthentication,\n  SessionAuthentication: session.SessionAuthentication,\n  TokenAuthentication: token.TokenAuthentication\n}\n","const utils = require('../utils')\n\nfunction trim (str) {\n  return str.replace(/^\\s\\s*/, '').replace(/\\s\\s*$/, '')\n}\n\nfunction getCookie (cookieName, cookieString) {\n  cookieString = cookieString || window.document.cookie\n  if (cookieString && cookieString !== '') {\n    const cookies = cookieString.split(';')\n    for (var i = 0; i < cookies.length; i++) {\n      const cookie = trim(cookies[i])\n      // Does this cookie string begin with the name we want?\n      if (cookie.substring(0, cookieName.length + 1) === (cookieName + '=')) {\n        return decodeURIComponent(cookie.substring(cookieName.length + 1))\n      }\n    }\n  }\n  return null\n}\n\nclass SessionAuthentication {\n  constructor (options = {}) {\n    this.csrfToken = getCookie(options.csrfCookieName, options.cookieString)\n    this.csrfHeaderName = options.csrfHeaderName\n  }\n\n  authenticate (options) {\n    options.credentials = 'same-origin'\n    if (this.csrfToken && !utils.csrfSafeMethod(options.method)) {\n      options.headers[this.csrfHeaderName] = this.csrfToken\n    }\n    return options\n  }\n}\n\nmodule.exports = {\n  SessionAuthentication: SessionAuthentication\n}\n","class TokenAuthentication {\n  constructor (options = {}) {\n    this.token = options.token\n    this.scheme = options.scheme || 'Bearer'\n  }\n\n  authenticate (options) {\n    options.headers['Authorization'] = this.scheme + ' ' + this.token\n    return options\n  }\n}\n\nmodule.exports = {\n  TokenAuthentication: TokenAuthentication\n}\n","const document = require('./document')\nconst codecs = require('./codecs')\nconst errors = require('./errors')\nconst transports = require('./transports')\nconst utils = require('./utils')\n\nfunction lookupLink (node, keys) {\n  for (let key of keys) {\n    if (node instanceof document.Document) {\n      node = node.content[key]\n    } else {\n      node = node[key]\n    }\n    if (node === undefined) {\n      throw new errors.LinkLookupError(`Invalid link lookup: ${JSON.stringify(keys)}`)\n    }\n  }\n  if (!(node instanceof document.Link)) {\n    throw new errors.LinkLookupError(`Invalid link lookup: ${JSON.stringify(keys)}`)\n  }\n  return node\n}\n\nclass Client {\n  constructor (options = {}) {\n    const transportOptions = {\n      auth: options.auth || null,\n      headers: options.headers || {},\n      requestCallback: options.requestCallback,\n      responseCallback: options.responseCallback\n    }\n\n    this.decoders = options.decoders || [new codecs.CoreJSONCodec(), new codecs.JSONCodec(), new codecs.TextCodec()]\n    this.transports = options.transports || [new transports.HTTPTransport(transportOptions)]\n  }\n\n  action (document, keys, params = {}) {\n    const link = lookupLink(document, keys)\n    const transport = utils.determineTransport(this.transports, link.url)\n    return transport.action(link, this.decoders, params)\n  }\n\n  get (url) {\n    const link = new document.Link(url, 'get')\n    const transport = utils.determineTransport(this.transports, url)\n    return transport.action(link, this.decoders)\n  }\n}\n\nmodule.exports = {\n  Client: Client\n}\n","const document = require('../document')\nconst URL = require('url-parse')\n\nfunction unescapeKey (key) {\n  if (key.match(/__(type|meta)$/)) {\n    return key.substring(1)\n  }\n  return key\n}\n\nfunction getString (obj, key) {\n  const value = obj[key]\n  if (typeof (value) === 'string') {\n    return value\n  }\n  return ''\n}\n\nfunction getBoolean (obj, key) {\n  const value = obj[key]\n  if (typeof (value) === 'boolean') {\n    return value\n  }\n  return false\n}\n\nfunction getObject (obj, key) {\n  const value = obj[key]\n  if (typeof (value) === 'object') {\n    return value\n  }\n  return {}\n}\n\nfunction getArray (obj, key) {\n  const value = obj[key]\n  if (value instanceof Array) {\n    return value\n  }\n  return []\n}\n\nfunction getContent (data, baseUrl) {\n  const excluded = ['_type', '_meta']\n  var content = {}\n  for (var property in data) {\n    if (data.hasOwnProperty(property) && !excluded.includes(property)) {\n      const key = unescapeKey(property)\n      const value = primitiveToNode(data[property], baseUrl)\n      content[key] = value\n    }\n  }\n  return content\n}\n\nfunction primitiveToNode (data, baseUrl) {\n  const isObject = data instanceof Object && !(data instanceof Array)\n\n  if (isObject && data._type === 'document') {\n    // Document\n    const meta = getObject(data, '_meta')\n    const relativeUrl = getString(meta, 'url')\n    const url = relativeUrl ? URL(relativeUrl, baseUrl).toString() : ''\n    const title = getString(meta, 'title')\n    const description = getString(meta, 'description')\n    const content = getContent(data, url)\n    return new document.Document(url, title, description, content)\n  } else if (isObject && data._type === 'link') {\n    // Link\n    const relativeUrl = getString(data, 'url')\n    const url = relativeUrl ? URL(relativeUrl, baseUrl).toString() : ''\n    const method = getString(data, 'action') || 'get'\n    const title = getString(data, 'title')\n    const description = getString(data, 'description')\n    const fieldsData = getArray(data, 'fields')\n    var fields = []\n    for (let idx = 0, len = fieldsData.length; idx < len; idx++) {\n      let value = fieldsData[idx]\n      let name = getString(value, 'name')\n      let required = getBoolean(value, 'required')\n      let location = getString(value, 'location')\n      let fieldDescription = getString(value, 'fieldDescription')\n      let field = new document.Field(name, required, location, fieldDescription)\n      fields.push(field)\n    }\n    return new document.Link(url, method, 'application/json', fields, title, description)\n  } else if (isObject) {\n    // Object\n    let content = {}\n    for (let key in data) {\n      if (data.hasOwnProperty(key)) {\n        content[key] = primitiveToNode(data[key], baseUrl)\n      }\n    }\n    return content\n  } else if (data instanceof Array) {\n    // Object\n    let content = []\n    for (let idx = 0, len = data.length; idx < len; idx++) {\n      content.push(primitiveToNode(data[idx], baseUrl))\n    }\n    return content\n  }\n  // Primitive\n  return data\n}\n\nclass CoreJSONCodec {\n  constructor () {\n    this.mediaType = 'application/coreapi+json'\n  }\n\n  decode (text, options = {}) {\n    let data = text\n    if (options.preloaded === undefined || !options.preloaded) {\n      data = JSON.parse(text)\n    }\n    return primitiveToNode(data, options.url)\n  }\n}\n\nmodule.exports = {\n  CoreJSONCodec: CoreJSONCodec\n}\n","const corejson = require('./corejson')\nconst json = require('./json')\nconst text = require('./text')\n\nmodule.exports = {\n  CoreJSONCodec: corejson.CoreJSONCodec,\n  JSONCodec: json.JSONCodec,\n  TextCodec: text.TextCodec\n}\n","class JSONCodec {\n  constructor () {\n    this.mediaType = 'application/json'\n  }\n\n  decode (text, options = {}) {\n    return JSON.parse(text)\n  }\n}\n\nmodule.exports = {\n  JSONCodec: JSONCodec\n}\n","class TextCodec {\n  constructor () {\n    this.mediaType = 'text/*'\n  }\n\n  decode (text, options = {}) {\n    return text\n  }\n}\n\nmodule.exports = {\n  TextCodec: TextCodec\n}\n","class Document {\n  constructor (url = '', title = '', description = '', content = {}) {\n    this.url = url\n    this.title = title\n    this.description = description\n    this.content = content\n  }\n}\n\nclass Link {\n  constructor (url, method, encoding = 'application/json', fields = [], title = '', description = '') {\n    if (url === undefined) {\n      throw new Error('url argument is required')\n    }\n\n    if (method === undefined) {\n      throw new Error('method argument is required')\n    }\n\n    this.url = url\n    this.method = method\n    this.encoding = encoding\n    this.fields = fields\n    this.title = title\n    this.description = description\n  }\n}\n\nclass Field {\n  constructor (name, required = false, location = '', description = '') {\n    if (name === undefined) {\n      throw new Error('name argument is required')\n    }\n\n    this.name = name\n    this.required = required\n    this.location = location\n    this.description = description\n  }\n}\n\nmodule.exports = {\n  Document: Document,\n  Link: Link,\n  Field: Field\n}\n","class ParameterError extends Error {\n  constructor (message) {\n    super(message)\n    this.message = message\n    this.name = 'ParameterError'\n  }\n}\n\nclass LinkLookupError extends Error {\n  constructor (message) {\n    super(message)\n    this.message = message\n    this.name = 'LinkLookupError'\n  }\n}\n\nclass ErrorMessage extends Error {\n  constructor (message, content) {\n    super(message)\n    this.message = message\n    this.content = content\n    this.name = 'ErrorMessage'\n  }\n}\n\nmodule.exports = {\n  ParameterError: ParameterError,\n  LinkLookupError: LinkLookupError,\n  ErrorMessage: ErrorMessage\n}\n","const auth = require('./auth')\nconst client = require('./client')\nconst codecs = require('./codecs')\nconst document = require('./document')\nconst errors = require('./errors')\nconst transports = require('./transports')\nconst utils = require('./utils')\n\nconst coreapi = {\n  Client: client.Client,\n  Document: document.Document,\n  Link: document.Link,\n  auth: auth,\n  codecs: codecs,\n  errors: errors,\n  transports: transports,\n  utils: utils\n}\n\nmodule.exports = coreapi\n","const fetch = require('isomorphic-fetch')\nconst errors = require('../errors')\nconst utils = require('../utils')\nconst URL = require('url-parse')\nconst urlTemplate = require('url-template')\n\nconst parseResponse = (response, decoders, responseCallback) => {\n  return response.text().then(text => {\n    if (responseCallback) {\n      responseCallback(response, text)\n    }\n    const contentType = response.headers.get('Content-Type')\n    const decoder = utils.negotiateDecoder(decoders, contentType)\n    const options = {url: response.url}\n    return decoder.decode(text, options)\n  })\n}\n\nclass HTTPTransport {\n  constructor (options = {}) {\n    this.schemes = ['http', 'https']\n    this.auth = options.auth || null\n    this.headers = options.headers || {}\n    this.fetch = options.fetch || fetch\n    this.FormData = options.FormData || window.FormData\n    this.requestCallback = options.requestCallback\n    this.responseCallback = options.responseCallback\n  }\n\n  buildRequest (link, decoders, params = {}) {\n    const fields = link.fields\n    const method = link.method.toUpperCase()\n    let queryParams = {}\n    let pathParams = {}\n    let formParams = {}\n    let fieldNames = []\n    let hasBody = false\n\n    for (let idx = 0, len = fields.length; idx < len; idx++) {\n      const field = fields[idx]\n\n      // Ensure any required fields are included\n      if (!params.hasOwnProperty(field.name)) {\n        if (field.required) {\n          throw new errors.ParameterError(`Missing required field: \"${field.name}\"`)\n        } else {\n          continue\n        }\n      }\n\n      fieldNames.push(field.name)\n      if (field.location === 'query') {\n        queryParams[field.name] = params[field.name]\n      } else if (field.location === 'path') {\n        pathParams[field.name] = params[field.name]\n      } else if (field.location === 'form') {\n        formParams[field.name] = params[field.name]\n        hasBody = true\n      } else if (field.location === 'body') {\n        formParams = params[field.name]\n        hasBody = true\n      }\n    }\n\n    // Check for any parameters that did not have a matching field\n    for (var property in params) {\n      if (params.hasOwnProperty(property) && !fieldNames.includes(property)) {\n        throw new errors.ParameterError(`Unknown parameter: \"${property}\"`)\n      }\n    }\n\n    let requestOptions = {method: method, headers: {}}\n\n    Object.assign(requestOptions.headers, this.headers)\n\n    if (hasBody) {\n      if (link.encoding === 'application/json') {\n        requestOptions.body = JSON.stringify(formParams)\n        requestOptions.headers['Content-Type'] = 'application/json'\n      } else if (link.encoding === 'multipart/form-data') {\n        let form = new this.FormData()\n\n        for (let paramKey in formParams) {\n          form.append(paramKey, formParams[paramKey])\n        }\n        requestOptions.body = form\n      } else if (link.encoding === 'application/x-www-form-urlencoded') {\n        let formBody = []\n        for (let paramKey in formParams) {\n          const encodedKey = encodeURIComponent(paramKey)\n          const encodedValue = encodeURIComponent(formParams[paramKey])\n          formBody.push(encodedKey + '=' + encodedValue)\n        }\n        formBody = formBody.join('&')\n\n        requestOptions.body = formBody\n        requestOptions.headers['Content-Type'] = 'application/x-www-form-urlencoded'\n      }\n    }\n\n    if (this.auth) {\n      requestOptions = this.auth.authenticate(requestOptions)\n    }\n\n    let parsedUrl = urlTemplate.parse(link.url)\n    parsedUrl = parsedUrl.expand(pathParams)\n    parsedUrl = new URL(parsedUrl)\n    parsedUrl.set('query', queryParams)\n\n    return {\n      url: parsedUrl.toString(),\n      options: requestOptions\n    }\n  }\n\n  action (link, decoders, params = {}) {\n    const responseCallback = this.responseCallback\n    const request = this.buildRequest(link, decoders, params)\n\n    if (this.requestCallback) {\n      this.requestCallback(request)\n    }\n\n    return this.fetch(request.url, request.options)\n      .then(function (response) {\n        if (response.status === 204) {\n          return\n        }\n        return parseResponse(response, decoders, responseCallback)\n          .then(function (data) {\n            if (response.ok) {\n              return data\n            } else {\n              const title = response.status + ' ' + response.statusText\n              const error = new errors.ErrorMessage(title, data)\n              return Promise.reject(error)\n            }\n          })\n      })\n  }\n}\n\nmodule.exports = {\n  HTTPTransport: HTTPTransport\n}\n","const http = require('./http')\n\nmodule.exports = {\n  HTTPTransport: http.HTTPTransport\n}\n","const URL = require('url-parse')\n\nconst determineTransport = function (transports, url) {\n  const parsedUrl = new URL(url)\n  const scheme = parsedUrl.protocol.replace(':', '')\n\n  for (let transport of transports) {\n    if (transport.schemes.includes(scheme)) {\n      return transport\n    }\n  }\n\n  throw Error(`Unsupported scheme in URL: ${url}`)\n}\n\nconst negotiateDecoder = function (decoders, contentType) {\n  if (contentType === undefined || contentType === null) {\n    return decoders[0]\n  }\n\n  const fullType = contentType.toLowerCase().split(';')[0].trim()\n  const mainType = fullType.split('/')[0] + '/*'\n  const wildcardType = '*/*'\n  const acceptableTypes = [fullType, mainType, wildcardType]\n\n  for (let decoder of decoders) {\n    if (acceptableTypes.includes(decoder.mediaType)) {\n      return decoder\n    }\n  }\n\n  throw Error(`Unsupported media in Content-Type header: ${contentType}`)\n}\n\nconst csrfSafeMethod = function (method) {\n  // these HTTP methods do not require CSRF protection\n  return (/^(GET|HEAD|OPTIONS|TRACE)$/.test(method))\n}\n\nmodule.exports = {\n  determineTransport: determineTransport,\n  negotiateDecoder: negotiateDecoder,\n  csrfSafeMethod: csrfSafeMethod\n}\n","// the whatwg-fetch polyfill installs the fetch() function\n// on the global object (window or self)\n//\n// Return that as the export for use in Webpack, Browserify etc.\nrequire('whatwg-fetch');\nmodule.exports = self.fetch.bind(self);\n","'use strict';\n\nvar has = Object.prototype.hasOwnProperty;\n\n/**\n * Simple query string parser.\n *\n * @param {String} query The query string that needs to be parsed.\n * @returns {Object}\n * @api public\n */\nfunction querystring(query) {\n  var parser = /([^=?&]+)=?([^&]*)/g\n    , result = {}\n    , part;\n\n  //\n  // Little nifty parsing hack, leverage the fact that RegExp.exec increments\n  // the lastIndex property so we can continue executing this loop until we've\n  // parsed all results.\n  //\n  for (;\n    part = parser.exec(query);\n    result[decodeURIComponent(part[1])] = decodeURIComponent(part[2])\n  );\n\n  return result;\n}\n\n/**\n * Transform a query string to an object.\n *\n * @param {Object} obj Object that should be transformed.\n * @param {String} prefix Optional prefix.\n * @returns {String}\n * @api public\n */\nfunction querystringify(obj, prefix) {\n  prefix = prefix || '';\n\n  var pairs = [];\n\n  //\n  // Optionally prefix with a '?' if needed\n  //\n  if ('string' !== typeof prefix) prefix = '?';\n\n  for (var key in obj) {\n    if (has.call(obj, key)) {\n      pairs.push(encodeURIComponent(key) +'='+ encodeURIComponent(obj[key]));\n    }\n  }\n\n  return pairs.length ? prefix + pairs.join('&') : '';\n}\n\n//\n// Expose the module.\n//\nexports.stringify = querystringify;\nexports.parse = querystring;\n","'use strict';\n\n/**\n * Check if we're required to add a port number.\n *\n * @see https://url.spec.whatwg.org/#default-port\n * @param {Number|String} port Port number we need to check\n * @param {String} protocol Protocol we need to check against.\n * @returns {Boolean} Is it a default port for the given protocol\n * @api private\n */\nmodule.exports = function required(port, protocol) {\n  protocol = protocol.split(':')[0];\n  port = +port;\n\n  if (!port) return false;\n\n  switch (protocol) {\n    case 'http':\n    case 'ws':\n    return port !== 80;\n\n    case 'https':\n    case 'wss':\n    return port !== 443;\n\n    case 'ftp':\n    return port !== 21;\n\n    case 'gopher':\n    return port !== 70;\n\n    case 'file':\n    return false;\n  }\n\n  return port !== 0;\n};\n","'use strict';\n\nvar required = require('requires-port')\n  , lolcation = require('./lolcation')\n  , qs = require('querystringify')\n  , protocolre = /^([a-z][a-z0-9.+-]*:)?(\\/\\/)?([\\S\\s]*)/i;\n\n/**\n * These are the parse rules for the URL parser, it informs the parser\n * about:\n *\n * 0. The char it Needs to parse, if it's a string it should be done using\n *    indexOf, RegExp using exec and NaN means set as current value.\n * 1. The property we should set when parsing this value.\n * 2. Indication if it's backwards or forward parsing, when set as number it's\n *    the value of extra chars that should be split off.\n * 3. Inherit from location if non existing in the parser.\n * 4. `toLowerCase` the resulting value.\n */\nvar rules = [\n  ['#', 'hash'],                        // Extract from the back.\n  ['?', 'query'],                       // Extract from the back.\n  ['/', 'pathname'],                    // Extract from the back.\n  ['@', 'auth', 1],                     // Extract from the front.\n  [NaN, 'host', undefined, 1, 1],       // Set left over value.\n  [/:(\\d+)$/, 'port', undefined, 1],    // RegExp the back.\n  [NaN, 'hostname', undefined, 1, 1]    // Set left over.\n];\n\n/**\n * @typedef ProtocolExtract\n * @type Object\n * @property {String} protocol Protocol matched in the URL, in lowercase.\n * @property {Boolean} slashes `true` if protocol is followed by \"//\", else `false`.\n * @property {String} rest Rest of the URL that is not part of the protocol.\n */\n\n/**\n * Extract protocol information from a URL with/without double slash (\"//\").\n *\n * @param {String} address URL we want to extract from.\n * @return {ProtocolExtract} Extracted information.\n * @api private\n */\nfunction extractProtocol(address) {\n  var match = protocolre.exec(address);\n\n  return {\n    protocol: match[1] ? match[1].toLowerCase() : '',\n    slashes: !!match[2],\n    rest: match[3]\n  };\n}\n\n/**\n * Resolve a relative URL pathname against a base URL pathname.\n *\n * @param {String} relative Pathname of the relative URL.\n * @param {String} base Pathname of the base URL.\n * @return {String} Resolved pathname.\n * @api private\n */\nfunction resolve(relative, base) {\n  var path = (base || '/').split('/').slice(0, -1).concat(relative.split('/'))\n    , i = path.length\n    , last = path[i - 1]\n    , unshift = false\n    , up = 0;\n\n  while (i--) {\n    if (path[i] === '.') {\n      path.splice(i, 1);\n    } else if (path[i] === '..') {\n      path.splice(i, 1);\n      up++;\n    } else if (up) {\n      if (i === 0) unshift = true;\n      path.splice(i, 1);\n      up--;\n    }\n  }\n\n  if (unshift) path.unshift('');\n  if (last === '.' || last === '..') path.push('');\n\n  return path.join('/');\n}\n\n/**\n * The actual URL instance. Instead of returning an object we've opted-in to\n * create an actual constructor as it's much more memory efficient and\n * faster and it pleases my OCD.\n *\n * @constructor\n * @param {String} address URL we want to parse.\n * @param {Object|String} location Location defaults for relative paths.\n * @param {Boolean|Function} parser Parser for the query string.\n * @api public\n */\nfunction URL(address, location, parser) {\n  if (!(this instanceof URL)) {\n    return new URL(address, location, parser);\n  }\n\n  var relative, extracted, parse, instruction, index, key\n    , instructions = rules.slice()\n    , type = typeof location\n    , url = this\n    , i = 0;\n\n  //\n  // The following if statements allows this module two have compatibility with\n  // 2 different API:\n  //\n  // 1. Node.js's `url.parse` api which accepts a URL, boolean as arguments\n  //    where the boolean indicates that the query string should also be parsed.\n  //\n  // 2. The `URL` interface of the browser which accepts a URL, object as\n  //    arguments. The supplied object will be used as default values / fall-back\n  //    for relative paths.\n  //\n  if ('object' !== type && 'string' !== type) {\n    parser = location;\n    location = null;\n  }\n\n  if (parser && 'function' !== typeof parser) parser = qs.parse;\n\n  location = lolcation(location);\n\n  //\n  // Extract protocol information before running the instructions.\n  //\n  extracted = extractProtocol(address || '');\n  relative = !extracted.protocol && !extracted.slashes;\n  url.slashes = extracted.slashes || relative && location.slashes;\n  url.protocol = extracted.protocol || location.protocol || '';\n  address = extracted.rest;\n\n  //\n  // When the authority component is absent the URL starts with a path\n  // component.\n  //\n  if (!extracted.slashes) instructions[2] = [/(.*)/, 'pathname'];\n\n  for (; i < instructions.length; i++) {\n    instruction = instructions[i];\n    parse = instruction[0];\n    key = instruction[1];\n\n    if (parse !== parse) {\n      url[key] = address;\n    } else if ('string' === typeof parse) {\n      if (~(index = address.indexOf(parse))) {\n        if ('number' === typeof instruction[2]) {\n          url[key] = address.slice(0, index);\n          address = address.slice(index + instruction[2]);\n        } else {\n          url[key] = address.slice(index);\n          address = address.slice(0, index);\n        }\n      }\n    } else if (index = parse.exec(address)) {\n      url[key] = index[1];\n      address = address.slice(0, index.index);\n    }\n\n    url[key] = url[key] || (\n      relative && instruction[3] ? location[key] || '' : ''\n    );\n\n    //\n    // Hostname, host and protocol should be lowercased so they can be used to\n    // create a proper `origin`.\n    //\n    if (instruction[4]) url[key] = url[key].toLowerCase();\n  }\n\n  //\n  // Also parse the supplied query string in to an object. If we're supplied\n  // with a custom parser as function use that instead of the default build-in\n  // parser.\n  //\n  if (parser) url.query = parser(url.query);\n\n  //\n  // If the URL is relative, resolve the pathname against the base URL.\n  //\n  if (\n      relative\n    && location.slashes\n    && url.pathname.charAt(0) !== '/'\n    && (url.pathname !== '' || location.pathname !== '')\n  ) {\n    url.pathname = resolve(url.pathname, location.pathname);\n  }\n\n  //\n  // We should not add port numbers if they are already the default port number\n  // for a given protocol. As the host also contains the port number we're going\n  // override it with the hostname which contains no port number.\n  //\n  if (!required(url.port, url.protocol)) {\n    url.host = url.hostname;\n    url.port = '';\n  }\n\n  //\n  // Parse down the `auth` for the username and password.\n  //\n  url.username = url.password = '';\n  if (url.auth) {\n    instruction = url.auth.split(':');\n    url.username = instruction[0] || '';\n    url.password = instruction[1] || '';\n  }\n\n  url.origin = url.protocol && url.host && url.protocol !== 'file:'\n    ? url.protocol +'//'+ url.host\n    : 'null';\n\n  //\n  // The href is just the compiled result.\n  //\n  url.href = url.toString();\n}\n\n/**\n * This is convenience method for changing properties in the URL instance to\n * insure that they all propagate correctly.\n *\n * @param {String} part          Property we need to adjust.\n * @param {Mixed} value          The newly assigned value.\n * @param {Boolean|Function} fn  When setting the query, it will be the function\n *                               used to parse the query.\n *                               When setting the protocol, double slash will be\n *                               removed from the final url if it is true.\n * @returns {URL}\n * @api public\n */\nURL.prototype.set = function set(part, value, fn) {\n  var url = this;\n\n  switch (part) {\n    case 'query':\n      if ('string' === typeof value && value.length) {\n        value = (fn || qs.parse)(value);\n      }\n\n      url[part] = value;\n      break;\n\n    case 'port':\n      url[part] = value;\n\n      if (!required(value, url.protocol)) {\n        url.host = url.hostname;\n        url[part] = '';\n      } else if (value) {\n        url.host = url.hostname +':'+ value;\n      }\n\n      break;\n\n    case 'hostname':\n      url[part] = value;\n\n      if (url.port) value += ':'+ url.port;\n      url.host = value;\n      break;\n\n    case 'host':\n      url[part] = value;\n\n      if (/:\\d+$/.test(value)) {\n        value = value.split(':');\n        url.port = value.pop();\n        url.hostname = value.join(':');\n      } else {\n        url.hostname = value;\n        url.port = '';\n      }\n\n      break;\n\n    case 'protocol':\n      url.protocol = value.toLowerCase();\n      url.slashes = !fn;\n      break;\n\n    case 'pathname':\n      url.pathname = value.length && value.charAt(0) !== '/' ? '/' + value : value;\n\n      break;\n\n    default:\n      url[part] = value;\n  }\n\n  for (var i = 0; i < rules.length; i++) {\n    var ins = rules[i];\n\n    if (ins[4]) url[ins[1]] = url[ins[1]].toLowerCase();\n  }\n\n  url.origin = url.protocol && url.host && url.protocol !== 'file:'\n    ? url.protocol +'//'+ url.host\n    : 'null';\n\n  url.href = url.toString();\n\n  return url;\n};\n\n/**\n * Transform the properties back in to a valid and full URL string.\n *\n * @param {Function} stringify Optional query stringify function.\n * @returns {String}\n * @api public\n */\nURL.prototype.toString = function toString(stringify) {\n  if (!stringify || 'function' !== typeof stringify) stringify = qs.stringify;\n\n  var query\n    , url = this\n    , protocol = url.protocol;\n\n  if (protocol && protocol.charAt(protocol.length - 1) !== ':') protocol += ':';\n\n  var result = protocol + (url.slashes ? '//' : '');\n\n  if (url.username) {\n    result += url.username;\n    if (url.password) result += ':'+ url.password;\n    result += '@';\n  }\n\n  result += url.host + url.pathname;\n\n  query = 'object' === typeof url.query ? stringify(url.query) : url.query;\n  if (query) result += '?' !== query.charAt(0) ? '?'+ query : query;\n\n  if (url.hash) result += url.hash;\n\n  return result;\n};\n\n//\n// Expose the URL parser and some additional properties that might be useful for\n// others or testing.\n//\nURL.extractProtocol = extractProtocol;\nURL.location = lolcation;\nURL.qs = qs;\n\nmodule.exports = URL;\n","'use strict';\n\nvar slashes = /^[A-Za-z][A-Za-z0-9+-.]*:\\/\\//;\n\n/**\n * These properties should not be copied or inherited from. This is only needed\n * for all non blob URL's as a blob URL does not include a hash, only the\n * origin.\n *\n * @type {Object}\n * @private\n */\nvar ignore = { hash: 1, query: 1 }\n  , URL;\n\n/**\n * The location object differs when your code is loaded through a normal page,\n * Worker or through a worker using a blob. And with the blobble begins the\n * trouble as the location object will contain the URL of the blob, not the\n * location of the page where our code is loaded in. The actual origin is\n * encoded in the `pathname` so we can thankfully generate a good \"default\"\n * location from it so we can generate proper relative URL's again.\n *\n * @param {Object|String} loc Optional default location object.\n * @returns {Object} lolcation object.\n * @api public\n */\nmodule.exports = function lolcation(loc) {\n  loc = loc || global.location || {};\n  URL = URL || require('./');\n\n  var finaldestination = {}\n    , type = typeof loc\n    , key;\n\n  if ('blob:' === loc.protocol) {\n    finaldestination = new URL(unescape(loc.pathname), {});\n  } else if ('string' === type) {\n    finaldestination = new URL(loc, {});\n    for (key in ignore) delete finaldestination[key];\n  } else if ('object' === type) {\n    for (key in loc) {\n      if (key in ignore) continue;\n      finaldestination[key] = loc[key];\n    }\n\n    if (finaldestination.slashes === undefined) {\n      finaldestination.slashes = slashes.test(loc.href);\n    }\n  }\n\n  return finaldestination;\n};\n","(function (root, factory) {\n    if (typeof exports === 'object') {\n        module.exports = factory();\n    } else if (typeof define === 'function' && define.amd) {\n        define([], factory);\n    } else {\n        root.urltemplate = factory();\n    }\n}(this, function () {\n  /**\n   * @constructor\n   */\n  function UrlTemplate() {\n  }\n\n  /**\n   * @private\n   * @param {string} str\n   * @return {string}\n   */\n  UrlTemplate.prototype.encodeReserved = function (str) {\n    return str.split(/(%[0-9A-Fa-f]{2})/g).map(function (part) {\n      if (!/%[0-9A-Fa-f]/.test(part)) {\n        part = encodeURI(part).replace(/%5B/g, '[').replace(/%5D/g, ']');\n      }\n      return part;\n    }).join('');\n  };\n\n  /**\n   * @private\n   * @param {string} str\n   * @return {string}\n   */\n  UrlTemplate.prototype.encodeUnreserved = function (str) {\n    return encodeURIComponent(str).replace(/[!'()*]/g, function (c) {\n      return '%' + c.charCodeAt(0).toString(16).toUpperCase();\n    });\n  }\n\n  /**\n   * @private\n   * @param {string} operator\n   * @param {string} value\n   * @param {string} key\n   * @return {string}\n   */\n  UrlTemplate.prototype.encodeValue = function (operator, value, key) {\n    value = (operator === '+' || operator === '#') ? this.encodeReserved(value) : this.encodeUnreserved(value);\n\n    if (key) {\n      return this.encodeUnreserved(key) + '=' + value;\n    } else {\n      return value;\n    }\n  };\n\n  /**\n   * @private\n   * @param {*} value\n   * @return {boolean}\n   */\n  UrlTemplate.prototype.isDefined = function (value) {\n    return value !== undefined && value !== null;\n  };\n\n  /**\n   * @private\n   * @param {string}\n   * @return {boolean}\n   */\n  UrlTemplate.prototype.isKeyOperator = function (operator) {\n    return operator === ';' || operator === '&' || operator === '?';\n  };\n\n  /**\n   * @private\n   * @param {Object} context\n   * @param {string} operator\n   * @param {string} key\n   * @param {string} modifier\n   */\n  UrlTemplate.prototype.getValues = function (context, operator, key, modifier) {\n    var value = context[key],\n        result = [];\n\n    if (this.isDefined(value) && value !== '') {\n      if (typeof value === 'string' || typeof value === 'number' || typeof value === 'boolean') {\n        value = value.toString();\n\n        if (modifier && modifier !== '*') {\n          value = value.substring(0, parseInt(modifier, 10));\n        }\n\n        result.push(this.encodeValue(operator, value, this.isKeyOperator(operator) ? key : null));\n      } else {\n        if (modifier === '*') {\n          if (Array.isArray(value)) {\n            value.filter(this.isDefined).forEach(function (value) {\n              result.push(this.encodeValue(operator, value, this.isKeyOperator(operator) ? key : null));\n            }, this);\n          } else {\n            Object.keys(value).forEach(function (k) {\n              if (this.isDefined(value[k])) {\n                result.push(this.encodeValue(operator, value[k], k));\n              }\n            }, this);\n          }\n        } else {\n          var tmp = [];\n\n          if (Array.isArray(value)) {\n            value.filter(this.isDefined).forEach(function (value) {\n              tmp.push(this.encodeValue(operator, value));\n            }, this);\n          } else {\n            Object.keys(value).forEach(function (k) {\n              if (this.isDefined(value[k])) {\n                tmp.push(this.encodeUnreserved(k));\n                tmp.push(this.encodeValue(operator, value[k].toString()));\n              }\n            }, this);\n          }\n\n          if (this.isKeyOperator(operator)) {\n            result.push(this.encodeUnreserved(key) + '=' + tmp.join(','));\n          } else if (tmp.length !== 0) {\n            result.push(tmp.join(','));\n          }\n        }\n      }\n    } else {\n      if (operator === ';') {\n        if (this.isDefined(value)) {\n          result.push(this.encodeUnreserved(key));\n        }\n      } else if (value === '' && (operator === '&' || operator === '?')) {\n        result.push(this.encodeUnreserved(key) + '=');\n      } else if (value === '') {\n        result.push('');\n      }\n    }\n    return result;\n  };\n\n  /**\n   * @param {string} template\n   * @return {function(Object):string}\n   */\n  UrlTemplate.prototype.parse = function (template) {\n    var that = this;\n    var operators = ['+', '#', '.', '/', ';', '?', '&'];\n\n    return {\n      expand: function (context) {\n        return template.replace(/\\{([^\\{\\}]+)\\}|([^\\{\\}]+)/g, function (_, expression, literal) {\n          if (expression) {\n            var operator = null,\n                values = [];\n\n            if (operators.indexOf(expression.charAt(0)) !== -1) {\n              operator = expression.charAt(0);\n              expression = expression.substr(1);\n            }\n\n            expression.split(/,/g).forEach(function (variable) {\n              var tmp = /([^:\\*]*)(?::(\\d+)|(\\*))?/.exec(variable);\n              values.push.apply(values, that.getValues(context, operator, tmp[1], tmp[2] || tmp[3]));\n            });\n\n            if (operator && operator !== '+') {\n              var separator = ',';\n\n              if (operator === '?') {\n                separator = '&';\n              } else if (operator !== '#') {\n                separator = operator;\n              }\n              return (values.length !== 0 ? operator : '') + values.join(separator);\n            } else {\n              return values.join(',');\n            }\n          } else {\n            return that.encodeReserved(literal);\n          }\n        });\n      }\n    };\n  };\n\n  return new UrlTemplate();\n}));\n","(function(self) {\n  'use strict';\n\n  if (self.fetch) {\n    return\n  }\n\n  var support = {\n    searchParams: 'URLSearchParams' in self,\n    iterable: 'Symbol' in self && 'iterator' in Symbol,\n    blob: 'FileReader' in self && 'Blob' in self && (function() {\n      try {\n        new Blob()\n        return true\n      } catch(e) {\n        return false\n      }\n    })(),\n    formData: 'FormData' in self,\n    arrayBuffer: 'ArrayBuffer' in self\n  }\n\n  if (support.arrayBuffer) {\n    var viewClasses = [\n      '[object Int8Array]',\n      '[object Uint8Array]',\n      '[object Uint8ClampedArray]',\n      '[object Int16Array]',\n      '[object Uint16Array]',\n      '[object Int32Array]',\n      '[object Uint32Array]',\n      '[object Float32Array]',\n      '[object Float64Array]'\n    ]\n\n    var isDataView = function(obj) {\n      return obj && DataView.prototype.isPrototypeOf(obj)\n    }\n\n    var isArrayBufferView = ArrayBuffer.isView || function(obj) {\n      return obj && viewClasses.indexOf(Object.prototype.toString.call(obj)) > -1\n    }\n  }\n\n  function normalizeName(name) {\n    if (typeof name !== 'string') {\n      name = String(name)\n    }\n    if (/[^a-z0-9\\-#$%&'*+.\\^_`|~]/i.test(name)) {\n      throw new TypeError('Invalid character in header field name')\n    }\n    return name.toLowerCase()\n  }\n\n  function normalizeValue(value) {\n    if (typeof value !== 'string') {\n      value = String(value)\n    }\n    return value\n  }\n\n  // Build a destructive iterator for the value list\n  function iteratorFor(items) {\n    var iterator = {\n      next: function() {\n        var value = items.shift()\n        return {done: value === undefined, value: value}\n      }\n    }\n\n    if (support.iterable) {\n      iterator[Symbol.iterator] = function() {\n        return iterator\n      }\n    }\n\n    return iterator\n  }\n\n  function Headers(headers) {\n    this.map = {}\n\n    if (headers instanceof Headers) {\n      headers.forEach(function(value, name) {\n        this.append(name, value)\n      }, this)\n\n    } else if (headers) {\n      Object.getOwnPropertyNames(headers).forEach(function(name) {\n        this.append(name, headers[name])\n      }, this)\n    }\n  }\n\n  Headers.prototype.append = function(name, value) {\n    name = normalizeName(name)\n    value = normalizeValue(value)\n    var oldValue = this.map[name]\n    this.map[name] = oldValue ? oldValue+','+value : value\n  }\n\n  Headers.prototype['delete'] = function(name) {\n    delete this.map[normalizeName(name)]\n  }\n\n  Headers.prototype.get = function(name) {\n    name = normalizeName(name)\n    return this.has(name) ? this.map[name] : null\n  }\n\n  Headers.prototype.has = function(name) {\n    return this.map.hasOwnProperty(normalizeName(name))\n  }\n\n  Headers.prototype.set = function(name, value) {\n    this.map[normalizeName(name)] = normalizeValue(value)\n  }\n\n  Headers.prototype.forEach = function(callback, thisArg) {\n    for (var name in this.map) {\n      if (this.map.hasOwnProperty(name)) {\n        callback.call(thisArg, this.map[name], name, this)\n      }\n    }\n  }\n\n  Headers.prototype.keys = function() {\n    var items = []\n    this.forEach(function(value, name) { items.push(name) })\n    return iteratorFor(items)\n  }\n\n  Headers.prototype.values = function() {\n    var items = []\n    this.forEach(function(value) { items.push(value) })\n    return iteratorFor(items)\n  }\n\n  Headers.prototype.entries = function() {\n    var items = []\n    this.forEach(function(value, name) { items.push([name, value]) })\n    return iteratorFor(items)\n  }\n\n  if (support.iterable) {\n    Headers.prototype[Symbol.iterator] = Headers.prototype.entries\n  }\n\n  function consumed(body) {\n    if (body.bodyUsed) {\n      return Promise.reject(new TypeError('Already read'))\n    }\n    body.bodyUsed = true\n  }\n\n  function fileReaderReady(reader) {\n    return new Promise(function(resolve, reject) {\n      reader.onload = function() {\n        resolve(reader.result)\n      }\n      reader.onerror = function() {\n        reject(reader.error)\n      }\n    })\n  }\n\n  function readBlobAsArrayBuffer(blob) {\n    var reader = new FileReader()\n    var promise = fileReaderReady(reader)\n    reader.readAsArrayBuffer(blob)\n    return promise\n  }\n\n  function readBlobAsText(blob) {\n    var reader = new FileReader()\n    var promise = fileReaderReady(reader)\n    reader.readAsText(blob)\n    return promise\n  }\n\n  function bufferClone(buf) {\n    if (buf.slice) {\n      return buf.slice(0)\n    } else {\n      var view = new Uint8Array(buf.byteLength)\n      view.set(new Uint8Array(buf))\n      return view.buffer\n    }\n  }\n\n  function Body() {\n    this.bodyUsed = false\n\n    this._initBody = function(body) {\n      this._bodyInit = body\n      if (!body) {\n        this._bodyText = ''\n      } else if (typeof body === 'string') {\n        this._bodyText = body\n      } else if (support.blob && Blob.prototype.isPrototypeOf(body)) {\n        this._bodyBlob = body\n      } else if (support.formData && FormData.prototype.isPrototypeOf(body)) {\n        this._bodyFormData = body\n      } else if (support.searchParams && URLSearchParams.prototype.isPrototypeOf(body)) {\n        this._bodyText = body.toString()\n      } else if (support.arrayBuffer && support.blob && isDataView(body)) {\n        this._bodyArrayBuffer = bufferClone(body.buffer)\n        // IE 10-11 can't handle a DataView body.\n        this._bodyInit = new Blob([this._bodyArrayBuffer])\n      } else if (support.arrayBuffer && (ArrayBuffer.prototype.isPrototypeOf(body) || isArrayBufferView(body))) {\n        this._bodyArrayBuffer = bufferClone(body)\n      } else {\n        throw new Error('unsupported BodyInit type')\n      }\n\n      if (!this.headers.get('content-type')) {\n        if (typeof body === 'string') {\n          this.headers.set('content-type', 'text/plain;charset=UTF-8')\n        } else if (this._bodyBlob && this._bodyBlob.type) {\n          this.headers.set('content-type', this._bodyBlob.type)\n        } else if (support.searchParams && URLSearchParams.prototype.isPrototypeOf(body)) {\n          this.headers.set('content-type', 'application/x-www-form-urlencoded;charset=UTF-8')\n        }\n      }\n    }\n\n    if (support.blob) {\n      this.blob = function() {\n        var rejected = consumed(this)\n        if (rejected) {\n          return rejected\n        }\n\n        if (this._bodyBlob) {\n          return Promise.resolve(this._bodyBlob)\n        } else if (this._bodyArrayBuffer) {\n          return Promise.resolve(new Blob([this._bodyArrayBuffer]))\n        } else if (this._bodyFormData) {\n          throw new Error('could not read FormData body as blob')\n        } else {\n          return Promise.resolve(new Blob([this._bodyText]))\n        }\n      }\n    }\n\n    this.text = function() {\n      var rejected = consumed(this)\n      if (rejected) {\n        return rejected\n      }\n\n      if (this._bodyBlob) {\n        return readBlobAsText(this._bodyBlob)\n      } else if (this._bodyArrayBuffer) {\n        var view = new Uint8Array(this._bodyArrayBuffer)\n        var str = String.fromCharCode.apply(null, view)\n        return Promise.resolve(str)\n      } else if (this._bodyFormData) {\n        throw new Error('could not read FormData body as text')\n      } else {\n        return Promise.resolve(this._bodyText)\n      }\n    }\n\n    if (support.arrayBuffer) {\n      this.arrayBuffer = function() {\n        if (this._bodyArrayBuffer) {\n          return consumed(this) || Promise.resolve(this._bodyArrayBuffer)\n        } else {\n          return this.blob().then(readBlobAsArrayBuffer)\n        }\n      }\n    }\n\n    if (support.formData) {\n      this.formData = function() {\n        return this.text().then(decode)\n      }\n    }\n\n    this.json = function() {\n      return this.text().then(JSON.parse)\n    }\n\n    return this\n  }\n\n  // HTTP methods whose capitalization should be normalized\n  var methods = ['DELETE', 'GET', 'HEAD', 'OPTIONS', 'POST', 'PUT']\n\n  function normalizeMethod(method) {\n    var upcased = method.toUpperCase()\n    return (methods.indexOf(upcased) > -1) ? upcased : method\n  }\n\n  function Request(input, options) {\n    options = options || {}\n    var body = options.body\n\n    if (typeof input === 'string') {\n      this.url = input\n    } else {\n      if (input.bodyUsed) {\n        throw new TypeError('Already read')\n      }\n      this.url = input.url\n      this.credentials = input.credentials\n      if (!options.headers) {\n        this.headers = new Headers(input.headers)\n      }\n      this.method = input.method\n      this.mode = input.mode\n      if (!body && input._bodyInit != null) {\n        body = input._bodyInit\n        input.bodyUsed = true\n      }\n    }\n\n    this.credentials = options.credentials || this.credentials || 'omit'\n    if (options.headers || !this.headers) {\n      this.headers = new Headers(options.headers)\n    }\n    this.method = normalizeMethod(options.method || this.method || 'GET')\n    this.mode = options.mode || this.mode || null\n    this.referrer = null\n\n    if ((this.method === 'GET' || this.method === 'HEAD') && body) {\n      throw new TypeError('Body not allowed for GET or HEAD requests')\n    }\n    this._initBody(body)\n  }\n\n  Request.prototype.clone = function() {\n    return new Request(this, { body: this._bodyInit })\n  }\n\n  function decode(body) {\n    var form = new FormData()\n    body.trim().split('&').forEach(function(bytes) {\n      if (bytes) {\n        var split = bytes.split('=')\n        var name = split.shift().replace(/\\+/g, ' ')\n        var value = split.join('=').replace(/\\+/g, ' ')\n        form.append(decodeURIComponent(name), decodeURIComponent(value))\n      }\n    })\n    return form\n  }\n\n  function parseHeaders(rawHeaders) {\n    var headers = new Headers()\n    rawHeaders.split('\\r\\n').forEach(function(line) {\n      var parts = line.split(':')\n      var key = parts.shift().trim()\n      if (key) {\n        var value = parts.join(':').trim()\n        headers.append(key, value)\n      }\n    })\n    return headers\n  }\n\n  Body.call(Request.prototype)\n\n  function Response(bodyInit, options) {\n    if (!options) {\n      options = {}\n    }\n\n    this.type = 'default'\n    this.status = 'status' in options ? options.status : 200\n    this.ok = this.status >= 200 && this.status < 300\n    this.statusText = 'statusText' in options ? options.statusText : 'OK'\n    this.headers = new Headers(options.headers)\n    this.url = options.url || ''\n    this._initBody(bodyInit)\n  }\n\n  Body.call(Response.prototype)\n\n  Response.prototype.clone = function() {\n    return new Response(this._bodyInit, {\n      status: this.status,\n      statusText: this.statusText,\n      headers: new Headers(this.headers),\n      url: this.url\n    })\n  }\n\n  Response.error = function() {\n    var response = new Response(null, {status: 0, statusText: ''})\n    response.type = 'error'\n    return response\n  }\n\n  var redirectStatuses = [301, 302, 303, 307, 308]\n\n  Response.redirect = function(url, status) {\n    if (redirectStatuses.indexOf(status) === -1) {\n      throw new RangeError('Invalid status code')\n    }\n\n    return new Response(null, {status: status, headers: {location: url}})\n  }\n\n  self.Headers = Headers\n  self.Request = Request\n  self.Response = Response\n\n  self.fetch = function(input, init) {\n    return new Promise(function(resolve, reject) {\n      var request = new Request(input, init)\n      var xhr = new XMLHttpRequest()\n\n      xhr.onload = function() {\n        var options = {\n          status: xhr.status,\n          statusText: xhr.statusText,\n          headers: parseHeaders(xhr.getAllResponseHeaders() || '')\n        }\n        options.url = 'responseURL' in xhr ? xhr.responseURL : options.headers.get('X-Request-URL')\n        var body = 'response' in xhr ? xhr.response : xhr.responseText\n        resolve(new Response(body, options))\n      }\n\n      xhr.onerror = function() {\n        reject(new TypeError('Network request failed'))\n      }\n\n      xhr.ontimeout = function() {\n        reject(new TypeError('Network request failed'))\n      }\n\n      xhr.open(request.method, request.url, true)\n\n      if (request.credentials === 'include') {\n        xhr.withCredentials = true\n      }\n\n      if ('responseType' in xhr && support.blob) {\n        xhr.responseType = 'blob'\n      }\n\n      request.headers.forEach(function(value, name) {\n        xhr.setRequestHeader(name, value)\n      })\n\n      xhr.send(typeof request._bodyInit === 'undefined' ? null : request._bodyInit)\n    })\n  }\n  self.fetch.polyfill = true\n})(typeof self !== 'undefined' ? self : this);\n"]}
PypiClean
/Electrum-CHI-3.3.8.tar.gz/Electrum-CHI-3.3.8/packages/pip/_vendor/urllib3/__init__.py
from __future__ import absolute_import import warnings from .connectionpool import ( HTTPConnectionPool, HTTPSConnectionPool, connection_from_url ) from . import exceptions from .filepost import encode_multipart_formdata from .poolmanager import PoolManager, ProxyManager, proxy_from_url from .response import HTTPResponse from .util.request import make_headers from .util.url import get_host from .util.timeout import Timeout from .util.retry import Retry # Set default logging handler to avoid "No handler found" warnings. import logging from logging import NullHandler __author__ = 'Andrey Petrov (andrey.petrov@shazow.net)' __license__ = 'MIT' __version__ = '1.24.1' __all__ = ( 'HTTPConnectionPool', 'HTTPSConnectionPool', 'PoolManager', 'ProxyManager', 'HTTPResponse', 'Retry', 'Timeout', 'add_stderr_logger', 'connection_from_url', 'disable_warnings', 'encode_multipart_formdata', 'get_host', 'make_headers', 'proxy_from_url', ) logging.getLogger(__name__).addHandler(NullHandler()) def add_stderr_logger(level=logging.DEBUG): """ Helper for quickly adding a StreamHandler to the logger. Useful for debugging. Returns the handler after adding it. """ # This method needs to be in this __init__.py to get the __name__ correct # even if urllib3 is vendored within another package. logger = logging.getLogger(__name__) handler = logging.StreamHandler() handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s %(message)s')) logger.addHandler(handler) logger.setLevel(level) logger.debug('Added a stderr logging handler to logger: %s', __name__) return handler # ... Clean up. del NullHandler # All warning filters *must* be appended unless you're really certain that they # shouldn't be: otherwise, it's very hard for users to use most Python # mechanisms to silence them. # SecurityWarning's always go off by default. warnings.simplefilter('always', exceptions.SecurityWarning, append=True) # SubjectAltNameWarning's should go off once per host warnings.simplefilter('default', exceptions.SubjectAltNameWarning, append=True) # InsecurePlatformWarning's don't vary between requests, so we keep it default. warnings.simplefilter('default', exceptions.InsecurePlatformWarning, append=True) # SNIMissingWarnings should go off only once. warnings.simplefilter('default', exceptions.SNIMissingWarning, append=True) def disable_warnings(category=exceptions.HTTPWarning): """ Helper for quickly disabling all urllib3 warnings. """ warnings.simplefilter('ignore', category)
PypiClean
/Flask_AdminLTE3-1.0.9-py3-none-any.whl/flask_adminlte3/static/plugins/codemirror/addon/hint/html-hint.js
(function(mod) { if (typeof exports == "object" && typeof module == "object") // CommonJS mod(require("../../lib/codemirror"), require("./xml-hint")); else if (typeof define == "function" && define.amd) // AMD define(["../../lib/codemirror", "./xml-hint"], mod); else // Plain browser env mod(CodeMirror); })(function(CodeMirror) { "use strict"; var langs = "ab aa af ak sq am ar an hy as av ae ay az bm ba eu be bn bh bi bs br bg my ca ch ce ny zh cv kw co cr hr cs da dv nl dz en eo et ee fo fj fi fr ff gl ka de el gn gu ht ha he hz hi ho hu ia id ie ga ig ik io is it iu ja jv kl kn kr ks kk km ki rw ky kv kg ko ku kj la lb lg li ln lo lt lu lv gv mk mg ms ml mt mi mr mh mn na nv nb nd ne ng nn no ii nr oc oj cu om or os pa pi fa pl ps pt qu rm rn ro ru sa sc sd se sm sg sr gd sn si sk sl so st es su sw ss sv ta te tg th ti bo tk tl tn to tr ts tt tw ty ug uk ur uz ve vi vo wa cy wo fy xh yi yo za zu".split(" "); var targets = ["_blank", "_self", "_top", "_parent"]; var charsets = ["ascii", "utf-8", "utf-16", "latin1", "latin1"]; var methods = ["get", "post", "put", "delete"]; var encs = ["application/x-www-form-urlencoded", "multipart/form-data", "text/plain"]; var media = ["all", "screen", "print", "embossed", "braille", "handheld", "print", "projection", "screen", "tty", "tv", "speech", "3d-glasses", "resolution [>][<][=] [X]", "device-aspect-ratio: X/Y", "orientation:portrait", "orientation:landscape", "device-height: [X]", "device-width: [X]"]; var s = { attrs: {} }; // Simple tag, reused for a whole lot of tags var data = { a: { attrs: { href: null, ping: null, type: null, media: media, target: targets, hreflang: langs } }, abbr: s, acronym: s, address: s, applet: s, area: { attrs: { alt: null, coords: null, href: null, target: null, ping: null, media: media, hreflang: langs, type: null, shape: ["default", "rect", "circle", "poly"] } }, article: s, aside: s, audio: { attrs: { src: null, mediagroup: null, crossorigin: ["anonymous", "use-credentials"], preload: ["none", "metadata", "auto"], autoplay: ["", "autoplay"], loop: ["", "loop"], controls: ["", "controls"] } }, b: s, base: { attrs: { href: null, target: targets } }, basefont: s, bdi: s, bdo: s, big: s, blockquote: { attrs: { cite: null } }, body: s, br: s, button: { attrs: { form: null, formaction: null, name: null, value: null, autofocus: ["", "autofocus"], disabled: ["", "autofocus"], formenctype: encs, formmethod: methods, formnovalidate: ["", "novalidate"], formtarget: targets, type: ["submit", "reset", "button"] } }, canvas: { attrs: { width: null, height: null } }, caption: s, center: s, cite: s, code: s, col: { attrs: { span: null } }, colgroup: { attrs: { span: null } }, command: { attrs: { type: ["command", "checkbox", "radio"], label: null, icon: null, radiogroup: null, command: null, title: null, disabled: ["", "disabled"], checked: ["", "checked"] } }, data: { attrs: { value: null } }, datagrid: { attrs: { disabled: ["", "disabled"], multiple: ["", "multiple"] } }, datalist: { attrs: { data: null } }, dd: s, del: { attrs: { cite: null, datetime: null } }, details: { attrs: { open: ["", "open"] } }, dfn: s, dir: s, div: s, dialog: { attrs: { open: null } }, dl: s, dt: s, em: s, embed: { attrs: { src: null, type: null, width: null, height: null } }, eventsource: { attrs: { src: null } }, fieldset: { attrs: { disabled: ["", "disabled"], form: null, name: null } }, figcaption: s, figure: s, font: s, footer: s, form: { attrs: { action: null, name: null, "accept-charset": charsets, autocomplete: ["on", "off"], enctype: encs, method: methods, novalidate: ["", "novalidate"], target: targets } }, frame: s, frameset: s, h1: s, h2: s, h3: s, h4: s, h5: s, h6: s, head: { attrs: {}, children: ["title", "base", "link", "style", "meta", "script", "noscript", "command"] }, header: s, hgroup: s, hr: s, html: { attrs: { manifest: null }, children: ["head", "body"] }, i: s, iframe: { attrs: { src: null, srcdoc: null, name: null, width: null, height: null, sandbox: ["allow-top-navigation", "allow-same-origin", "allow-forms", "allow-scripts"], seamless: ["", "seamless"] } }, img: { attrs: { alt: null, src: null, ismap: null, usemap: null, width: null, height: null, crossorigin: ["anonymous", "use-credentials"] } }, input: { attrs: { alt: null, dirname: null, form: null, formaction: null, height: null, list: null, max: null, maxlength: null, min: null, name: null, pattern: null, placeholder: null, size: null, src: null, step: null, value: null, width: null, accept: ["audio/*", "video/*", "image/*"], autocomplete: ["on", "off"], autofocus: ["", "autofocus"], checked: ["", "checked"], disabled: ["", "disabled"], formenctype: encs, formmethod: methods, formnovalidate: ["", "novalidate"], formtarget: targets, multiple: ["", "multiple"], readonly: ["", "readonly"], required: ["", "required"], type: ["hidden", "text", "search", "tel", "url", "email", "password", "datetime", "date", "month", "week", "time", "datetime-local", "number", "range", "color", "checkbox", "radio", "file", "submit", "image", "reset", "button"] } }, ins: { attrs: { cite: null, datetime: null } }, kbd: s, keygen: { attrs: { challenge: null, form: null, name: null, autofocus: ["", "autofocus"], disabled: ["", "disabled"], keytype: ["RSA"] } }, label: { attrs: { "for": null, form: null } }, legend: s, li: { attrs: { value: null } }, link: { attrs: { href: null, type: null, hreflang: langs, media: media, sizes: ["all", "16x16", "16x16 32x32", "16x16 32x32 64x64"] } }, map: { attrs: { name: null } }, mark: s, menu: { attrs: { label: null, type: ["list", "context", "toolbar"] } }, meta: { attrs: { content: null, charset: charsets, name: ["viewport", "application-name", "author", "description", "generator", "keywords"], "http-equiv": ["content-language", "content-type", "default-style", "refresh"] } }, meter: { attrs: { value: null, min: null, low: null, high: null, max: null, optimum: null } }, nav: s, noframes: s, noscript: s, object: { attrs: { data: null, type: null, name: null, usemap: null, form: null, width: null, height: null, typemustmatch: ["", "typemustmatch"] } }, ol: { attrs: { reversed: ["", "reversed"], start: null, type: ["1", "a", "A", "i", "I"] } }, optgroup: { attrs: { disabled: ["", "disabled"], label: null } }, option: { attrs: { disabled: ["", "disabled"], label: null, selected: ["", "selected"], value: null } }, output: { attrs: { "for": null, form: null, name: null } }, p: s, param: { attrs: { name: null, value: null } }, pre: s, progress: { attrs: { value: null, max: null } }, q: { attrs: { cite: null } }, rp: s, rt: s, ruby: s, s: s, samp: s, script: { attrs: { type: ["text/javascript"], src: null, async: ["", "async"], defer: ["", "defer"], charset: charsets } }, section: s, select: { attrs: { form: null, name: null, size: null, autofocus: ["", "autofocus"], disabled: ["", "disabled"], multiple: ["", "multiple"] } }, small: s, source: { attrs: { src: null, type: null, media: null } }, span: s, strike: s, strong: s, style: { attrs: { type: ["text/css"], media: media, scoped: null } }, sub: s, summary: s, sup: s, table: s, tbody: s, td: { attrs: { colspan: null, rowspan: null, headers: null } }, textarea: { attrs: { dirname: null, form: null, maxlength: null, name: null, placeholder: null, rows: null, cols: null, autofocus: ["", "autofocus"], disabled: ["", "disabled"], readonly: ["", "readonly"], required: ["", "required"], wrap: ["soft", "hard"] } }, tfoot: s, th: { attrs: { colspan: null, rowspan: null, headers: null, scope: ["row", "col", "rowgroup", "colgroup"] } }, thead: s, time: { attrs: { datetime: null } }, title: s, tr: s, track: { attrs: { src: null, label: null, "default": null, kind: ["subtitles", "captions", "descriptions", "chapters", "metadata"], srclang: langs } }, tt: s, u: s, ul: s, "var": s, video: { attrs: { src: null, poster: null, width: null, height: null, crossorigin: ["anonymous", "use-credentials"], preload: ["auto", "metadata", "none"], autoplay: ["", "autoplay"], mediagroup: ["movie"], muted: ["", "muted"], controls: ["", "controls"] } }, wbr: s }; var globalAttrs = { accesskey: ["a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9"], "class": null, contenteditable: ["true", "false"], contextmenu: null, dir: ["ltr", "rtl", "auto"], draggable: ["true", "false", "auto"], dropzone: ["copy", "move", "link", "string:", "file:"], hidden: ["hidden"], id: null, inert: ["inert"], itemid: null, itemprop: null, itemref: null, itemscope: ["itemscope"], itemtype: null, lang: ["en", "es"], spellcheck: ["true", "false"], autocorrect: ["true", "false"], autocapitalize: ["true", "false"], style: null, tabindex: ["1", "2", "3", "4", "5", "6", "7", "8", "9"], title: null, translate: ["yes", "no"], onclick: null, rel: ["stylesheet", "alternate", "author", "bookmark", "help", "license", "next", "nofollow", "noreferrer", "prefetch", "prev", "search", "tag"] }; function populate(obj) { for (var attr in globalAttrs) if (globalAttrs.hasOwnProperty(attr)) obj.attrs[attr] = globalAttrs[attr]; } populate(s); for (var tag in data) if (data.hasOwnProperty(tag) && data[tag] != s) populate(data[tag]); CodeMirror.htmlSchema = data; function htmlHint(cm, options) { var local = {schemaInfo: data}; if (options) for (var opt in options) local[opt] = options[opt]; return CodeMirror.hint.xml(cm, local); } CodeMirror.registerHelper("hint", "html", htmlHint); });
PypiClean
/FlowMaster-0.7.1.tar.gz/FlowMaster-0.7.1/flowmaster/operators/etl/work.py
from typing import TYPE_CHECKING, Iterator, Optional from flowmaster.executors import catch_exceptions, ExecutorIterationTask from flowmaster.models import FlowItem from flowmaster.operators.base.work import Work, prepare_items_for_order from flowmaster.service import iter_active_notebook_filenames, get_notebook from flowmaster.utils.logging_helper import Logger, getLogger from flowmaster.utils.logging_helper import logger if TYPE_CHECKING: from flowmaster.operators.etl.core import ETLOperator from flowmaster.operators.etl.policy import ETLNotebook class ETLWork(Work): def __init__(self, notebook: "ETLNotebook", logger: Optional[Logger] = None): super(ETLWork, self).__init__(notebook, logger) self.update_stale_data = notebook.work.update_stale_data self.Model = FlowItem self.logger = logger or getLogger() def iter_items_for_execute(self) -> Iterator[FlowItem]: """ Collects all flow items for execute. """ # TODO: Переименовать update_stale_data в update_range, этот параметр не только для ETL, # он и для других операторов нужен, поэтому удалить эту и завернуть все в одну функцию. return self.Model.get_items_for_execute( self.name, self.current_worktime, self.start_datetime, self.interval_timedelta, self.keep_sequence, self.retries, self.retry_delay, notebook_hash=self.notebook.hash, max_fatal_errors=self.max_fatal_errors, update_stale_data=self.update_stale_data, ) @catch_exceptions def ordering_etl_flow_tasks( *, dry_run: bool = False ) -> Iterator[ExecutorIterationTask]: """Prepare flow function to be sent to the queue and executed""" # TODO: избавиться от функции, переделать так, чтобы одна функция была для заказа from flowmaster.operators.etl.core import ETLOperator from flowmaster.operators.etl.policy import ETLNotebook for name in iter_active_notebook_filenames(): validate, text, notebook_dict, notebook, error = get_notebook(name) notebook: ETLNotebook if dry_run: if notebook.provider != "fakedata": continue if not validate: logger.error("ValidationError: '{}': {}", name, error) continue work = Work(notebook) for start_period, end_period in work.iter_period_for_execute(): flow = ETLOperator(notebook) etl_flow_task = flow.task(start_period, end_period, dry_run=dry_run) with prepare_items_for_order(flow, start_period, end_period): logger.info( "Order ETL flow [{}]: {} {}", notebook.name, start_period, end_period ) yield etl_flow_task
PypiClean
/DLTA-AI-1.1.tar.gz/DLTA-AI-1.1/DLTA_AI_app/mmdetection/configs/nas_fpn/retinanet_r50_nasfpn_crop640_50e_coco.py
_base_ = [ '../_base_/models/retinanet_r50_fpn.py', '../_base_/datasets/coco_detection.py', '../_base_/default_runtime.py' ] cudnn_benchmark = True # model settings norm_cfg = dict(type='BN', requires_grad=True) model = dict( type='RetinaNet', backbone=dict( type='ResNet', depth=50, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=norm_cfg, norm_eval=False, style='pytorch', init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), neck=dict(type='NASFPN', stack_times=7, norm_cfg=norm_cfg), bbox_head=dict(type='RetinaSepBNHead', num_ins=5, norm_cfg=norm_cfg), # training and testing settings train_cfg=dict(assigner=dict(neg_iou_thr=0.5))) # dataset settings img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), dict( type='Resize', img_scale=(640, 640), ratio_range=(0.8, 1.2), keep_ratio=True), dict(type='RandomCrop', crop_size=(640, 640)), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size=(640, 640)), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(640, 640), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=128), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( samples_per_gpu=8, workers_per_gpu=4, train=dict(pipeline=train_pipeline), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline)) # optimizer optimizer = dict( type='SGD', lr=0.08, momentum=0.9, weight_decay=0.0001, paramwise_cfg=dict(norm_decay_mult=0, bypass_duplicate=True)) optimizer_config = dict(grad_clip=None) # learning policy lr_config = dict( policy='step', warmup='linear', warmup_iters=1000, warmup_ratio=0.1, step=[30, 40]) # runtime settings runner = dict(type='EpochBasedRunner', max_epochs=50) # NOTE: `auto_scale_lr` is for automatically scaling LR, # USER SHOULD NOT CHANGE ITS VALUES. # base_batch_size = (8 GPUs) x (8 samples per GPU) auto_scale_lr = dict(base_batch_size=64)
PypiClean
/Lmgeo-1.1.0.tar.gz/Lmgeo-1.1.0/lmgeo/formats/hdf5raster.py
import os.path from collections import Sequence from warnings import warn from .raster import Raster from .gridenvelope2d import GridEnvelope2D from .const import Const, constants as const try: import tables from tables.exceptions import NoSuchNodeError HAS_TABLES = True except ImportError: HAS_TABLES = False raise Exception("If one wants to use the module hdf5raster, he / she needs to install Python package tables!") __author__ = "Steven B. Hoek" # Class for reading quick and dirty HDF5 format that can store weather data # in a raster format in an efficient way, for fast access class Hdf5Raster(Raster, GridEnvelope2D): '''A raster represented by 2 files, with extensions "h5" and "hdr"''' # Constants DATAFILEXT = "h5"; HEADEREXT = "" # Data attributes - assign some dummy values for the mean time name = "dummy.h5"; folder = os.getcwd(); cellsize = 1; nodatavalue = -9999.0; dataset_name = "dummy"; group_prefix = "row"; table_prefix = "col"; index_format = "04i"; variables = "temp, rain"; units = "degrees Celsius, mm/day"; # Private attributes datafile = None; currow = 0; def __init__(self, filepath): # Initialise Raster.__init__(self, filepath) GridEnvelope2D.__init__(self, 1, 1, 0.0, 0.0, 0.1, 0.1) # Retrieve the name from the filepath and assign - incl. extension self.name = os.path.basename(filepath); # Also derive the folder self.folder = os.path.dirname(filepath); def open(self, mode, ncols=1, nrows=1, xll=0., yll=0., cellsize=1., nodatavalue=-9999.0, dataset_name="dummy", group_prefix="row", table_prefix="col", index_format="04i", variables=[], units=[]): # Initialise super(Hdf5Raster, self).open(mode); fpath = os.path.join(self.folder, self.name) # Raise error again if Python package tables is not installed if not HAS_TABLES: raise Exception("If one wants to use the module hdf5raster, he / she needs to install Python package tables!") # Now prepare to read from file or to write it if (mode[0] == 'w'): # Open the file self.datafile = tables.open_file(fpath, 'w'); # Assign the data attributes self.ncols = ncols; self.nrows = nrows; self.xll = xll; self.yll = yll; self.cellsize = cellsize; self.nodatavalue = nodatavalue; self.dataset_name = dataset_name; self.group_prefix = group_prefix; self.table_prefix = table_prefix; self.index_format = index_format; self.variables = variables; self.units = units; self.writeheader(); else: # If file does not exist, then ... if os.path.exists(fpath): # Retrieve the data attributes from the attributes in the file self.datafile = tables.open_file(fpath, 'r'); self.readheader(); GridEnvelope2D.__init__(self, self.ncols, self.nrows, self.xll, self.yll, self.cellsize, self.cellsize); return True; else: return False; def readheader(self): # All information is stored in the file itself, i.e. as attributes # Read attributes and assign them. Assume that the file is open. f = self.datafile; try: self.ncols = f.root._v_attrs.ncols; self.nrows = f.root._v_attrs.nrows; self.xll = f.root._v_attrs.xllcorner; self.yll = f.root._v_attrs.yllcorner; self.cellsize = f.root._v_attrs.cellsize; self.nodatavalue = f.root._v_attrs.nodata_value; self.root_contains = f.root._v_attrs.dataset_name; self.group_prefix = f.root._v_attrs.group_prefix; self.table_prefix = f.root._v_attrs.table_prefix; self.index_format = f.root._v_attrs.index_format; self.variables = ', '.join(f.get_node(f.root, "variables", classname='Array').read()); self.units = ', '.join(f.get_node(f.root, "units", classname='Array').read()); except Exception as e: raise IOError("Problem encountered while reading attributes (" + str(e) + ")"); def getDataFileExt(self): result = self.DATAFILEXT; try: result = os.path.splitext(self.datafile)[1].strip('.'); finally: return result; def writeheader(self): # All this information is stored in the hdf5 file itself - write them as attributes try: f = self.datafile; # Write attributes to file f.root._v_attrs.ncols = self.ncols; f.root._v_attrs.nrows = self.nrows; f.root._v_attrs.xllcorner = self.xll; f.root._v_attrs.yllcorner = self.yll; f.root._v_attrs.cellsize = self.cellsize; f.root._v_attrs.nodata_value = self.nodatavalue; f.root._v_attrs.dataset_name = self.dataset_name; f.root._v_attrs.group_prefix = self.group_prefix; f.root._v_attrs.table_prefix = self.table_prefix; f.root._v_attrs.index_format = self.index_format; # Add arrays to the file - variables and units are already lists try: f.get_node(f.root, "variables", classname='Array') f.remove_node(f.root, "variables"); except tables.NoSuchNodeError: pass; f.create_array(f.root, "variables", self.variables); try: f.get_node(f.root, "units", classname='Array') f.remove_node(f.root, "variables"); except tables.NoSuchNodeError: pass; f.create_array(f.root, "units", self.units); f.flush(); except Exception as e: msg = "Attributes could not be written to file: " + self.datafile.filename + "\n"; raise IOError(msg + str(e)); def next(self, parseLine=True): # Read the next data slice if possible, otherwise generate StopIteration result = None; try: if (self.currow > self.nrows): raise StopIteration; if parseLine: grp_name = (self.group_prefix + "%" + self.index_format) % self.currow; grp = self.datafile.get_node(self.datafile.root, grp_name); result = [None] * self.ncols; for k in range(0, self.ncols): tbl_name = (self.table_prefix + "%" + self.index_format) % k; tbl = self.datafile.get_node(grp, tbl_name); result[k] = tbl.read(); self.currow += 1; # row index is zero-based! return result; except: raise StopIteration; def writenext(self, sequence_with_data, recordClass): # Write the next data if possible, otherwise generate StopIteration # We assume that exactly 1 row is included, with for each pixel of the # current row an array with records or at least a None value try: # Check input assert isinstance(sequence_with_data, Sequence), "Given input not of the expected type!" assert len(sequence_with_data) == self.ncols, "Input array does not have the expected size!"; msg = "Input class reference does not inherit from tables.IsDescription!" assert issubclass(recordClass, tables.IsDescription), msg # Assume that the group does not yet exist; note: row index is zero-based! if (self.currow >= self.nrows): raise StopIteration; filter1 = tables.filters.Filters(complevel=1, complib='blosc', fletcher32=True); grp_name = (self.group_prefix + "%" + self.index_format) % self.currow; f = self.datafile; grp = f.create_group(f.root, grp_name, 'represents a row'); # Now loop and add a table for each column for k in range(0, self.ncols): recs = sequence_with_data[k]; if (recs != None) and (type(recs) is list): # Create the table and aAdd the records to it tbl_name = (self.table_prefix + "%" + self.index_format) % k; tbl = f.create_table(grp, tbl_name, recordClass, expectedrows=len(recs), filters=filter1); tbl.append(recs); tbl.flush(); self.currow += 1; return True; except Exception as e: print(e); raise StopIteration; def write(self, colIndex, recordList, recordClass): # TODO test this! if (recordList == None) or (not type(recordList) is list): raise ValueError("Records were not provided in the form of a list."); msg = "Input class reference does not inherit from tables.IsDescription!" assert issubclass(recordClass, tables.IsDescription), msg # Initialise - it is assumed that the instance has been moved to the intended row already f = self.datafile; grp_name = (self.group_prefix + "%" + self.index_format) % self.currow; try: grp = f.get_node(f.root, grp_name); except NoSuchNodeError: grp = f.create_group(f.root, grp_name, 'represents a row'); # We've reached the right group - now we have to somehow get hold of the right table tbl_name = (self.table_prefix + "%" + self.index_format) % colIndex; try: # If the table already exists, delete it! tbl = f.del_node_attr(grp, tbl_name); f.del_node_attr(grp, tbl_name); except NoSuchNodeError: pass; try: # We can get hold of the right table - do so and add the records to it filter1 = tables.filters.Filters(complevel=1, complib='blosc', fletcher32=True); tbl = f.create_table(grp, tbl_name, recordClass, expectedrows=len(recordList), filters=filter1); tbl.append(recordList); tbl.flush(); except Exception as e: raise IOError(str(e)); def flush(self): self.datafile.flush(); def close(self): if self.datafile: self.datafile.close(); self.datafile = None; def reset(self): super(Hdf5Raster, self).reset() @GridEnvelope2D.dx.setter def dx(self, dx): if abs(dx - self.dy) > const.epsilon: warn("Given the HDF5 file format, class Hdf5Raster must have 1 pixel size for the horizontal and the vertical!") GridEnvelope2D.dx.fset(self, dx) @GridEnvelope2D.dy.setter def dy(self, dy): if abs(dy - self.dx) > const.epsilon: warn("Given the HDF5 file format, class Hdf5Raster must have 1 pixel size for the horizontal and the vertical!") GridEnvelope2D.dx.fset(self, dy)
PypiClean
/CartiMorph_nnUNet-1.7.14.tar.gz/CartiMorph_nnUNet-1.7.14/CartiMorph_nnUNet/experiment_planning/DatasetAnalyzer.py
from batchgenerators.utilities.file_and_folder_operations import * from multiprocessing import Pool from CartiMorph_nnUNet.configuration import default_num_threads from CartiMorph_nnUNet.paths import nnUNet_raw_data, nnUNet_cropped_data import numpy as np import pickle from CartiMorph_nnUNet.preprocessing.cropping import get_patient_identifiers_from_cropped_files from skimage.morphology import label from collections import OrderedDict class DatasetAnalyzer(object): def __init__(self, folder_with_cropped_data, overwrite=True, num_processes=default_num_threads): """ :param folder_with_cropped_data: :param overwrite: If True then precomputed values will not be used and instead recomputed from the data. False will allow loading of precomputed values. This may be dangerous though if some of the code of this class was changed, therefore the default is True. """ self.num_processes = num_processes self.overwrite = overwrite self.folder_with_cropped_data = folder_with_cropped_data self.sizes = self.spacings = None self.patient_identifiers = get_patient_identifiers_from_cropped_files(self.folder_with_cropped_data) assert isfile(join(self.folder_with_cropped_data, "dataset.json")), \ "dataset.json needs to be in folder_with_cropped_data" self.props_per_case_file = join(self.folder_with_cropped_data, "props_per_case.pkl") self.intensityproperties_file = join(self.folder_with_cropped_data, "intensityproperties.pkl") def load_properties_of_cropped(self, case_identifier): with open(join(self.folder_with_cropped_data, "%s.pkl" % case_identifier), 'rb') as f: properties = pickle.load(f) return properties @staticmethod def _check_if_all_in_one_region(seg, regions): res = OrderedDict() for r in regions: new_seg = np.zeros(seg.shape) for c in r: new_seg[seg == c] = 1 labelmap, numlabels = label(new_seg, return_num=True) if numlabels != 1: res[tuple(r)] = False else: res[tuple(r)] = True return res @staticmethod def _collect_class_and_region_sizes(seg, all_classes, vol_per_voxel): volume_per_class = OrderedDict() region_volume_per_class = OrderedDict() for c in all_classes: region_volume_per_class[c] = [] volume_per_class[c] = np.sum(seg == c) * vol_per_voxel labelmap, numregions = label(seg == c, return_num=True) for l in range(1, numregions + 1): region_volume_per_class[c].append(np.sum(labelmap == l) * vol_per_voxel) return volume_per_class, region_volume_per_class def _get_unique_labels(self, patient_identifier): seg = np.load(join(self.folder_with_cropped_data, patient_identifier) + ".npz")['data'][-1] unique_classes = np.unique(seg) return unique_classes def _load_seg_analyze_classes(self, patient_identifier, all_classes): """ 1) what class is in this training case? 2) what is the size distribution for each class? 3) what is the region size of each class? 4) check if all in one region :return: """ seg = np.load(join(self.folder_with_cropped_data, patient_identifier) + ".npz")['data'][-1] pkl = load_pickle(join(self.folder_with_cropped_data, patient_identifier) + ".pkl") vol_per_voxel = np.prod(pkl['itk_spacing']) # ad 1) unique_classes = np.unique(seg) # 4) check if all in one region regions = list() regions.append(list(all_classes)) for c in all_classes: regions.append((c, )) all_in_one_region = self._check_if_all_in_one_region(seg, regions) # 2 & 3) region sizes volume_per_class, region_sizes = self._collect_class_and_region_sizes(seg, all_classes, vol_per_voxel) return unique_classes, all_in_one_region, volume_per_class, region_sizes def get_classes(self): datasetjson = load_json(join(self.folder_with_cropped_data, "dataset.json")) return datasetjson['labels'] def analyse_segmentations(self): class_dct = self.get_classes() if self.overwrite or not isfile(self.props_per_case_file): p = Pool(self.num_processes) res = p.map(self._get_unique_labels, self.patient_identifiers) p.close() p.join() props_per_patient = OrderedDict() for p, unique_classes in \ zip(self.patient_identifiers, res): props = dict() props['has_classes'] = unique_classes props_per_patient[p] = props save_pickle(props_per_patient, self.props_per_case_file) else: props_per_patient = load_pickle(self.props_per_case_file) return class_dct, props_per_patient def get_sizes_and_spacings_after_cropping(self): sizes = [] spacings = [] # for c in case_identifiers: for c in self.patient_identifiers: properties = self.load_properties_of_cropped(c) sizes.append(properties["size_after_cropping"]) spacings.append(properties["original_spacing"]) return sizes, spacings def get_modalities(self): datasetjson = load_json(join(self.folder_with_cropped_data, "dataset.json")) modalities = datasetjson["modality"] modalities = {int(k): modalities[k] for k in modalities.keys()} return modalities def get_size_reduction_by_cropping(self): size_reduction = OrderedDict() for p in self.patient_identifiers: props = self.load_properties_of_cropped(p) shape_before_crop = props["original_size_of_raw_data"] shape_after_crop = props['size_after_cropping'] size_red = np.prod(shape_after_crop) / np.prod(shape_before_crop) size_reduction[p] = size_red return size_reduction def _get_voxels_in_foreground(self, patient_identifier, modality_id): all_data = np.load(join(self.folder_with_cropped_data, patient_identifier) + ".npz")['data'] modality = all_data[modality_id] mask = all_data[-1] > 0 voxels = list(modality[mask][::10]) # no need to take every voxel return voxels @staticmethod def _compute_stats(voxels): if len(voxels) == 0: return np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan median = np.median(voxels) mean = np.mean(voxels) sd = np.std(voxels) mn = np.min(voxels) mx = np.max(voxels) percentile_99_5 = np.percentile(voxels, 99.5) percentile_00_5 = np.percentile(voxels, 00.5) return median, mean, sd, mn, mx, percentile_99_5, percentile_00_5 def collect_intensity_properties(self, num_modalities): if self.overwrite or not isfile(self.intensityproperties_file): p = Pool(self.num_processes) results = OrderedDict() for mod_id in range(num_modalities): results[mod_id] = OrderedDict() v = p.starmap(self._get_voxels_in_foreground, zip(self.patient_identifiers, [mod_id] * len(self.patient_identifiers))) w = [] for iv in v: w += iv median, mean, sd, mn, mx, percentile_99_5, percentile_00_5 = self._compute_stats(w) local_props = p.map(self._compute_stats, v) props_per_case = OrderedDict() for i, pat in enumerate(self.patient_identifiers): props_per_case[pat] = OrderedDict() props_per_case[pat]['median'] = local_props[i][0] props_per_case[pat]['mean'] = local_props[i][1] props_per_case[pat]['sd'] = local_props[i][2] props_per_case[pat]['mn'] = local_props[i][3] props_per_case[pat]['mx'] = local_props[i][4] props_per_case[pat]['percentile_99_5'] = local_props[i][5] props_per_case[pat]['percentile_00_5'] = local_props[i][6] results[mod_id]['local_props'] = props_per_case results[mod_id]['median'] = median results[mod_id]['mean'] = mean results[mod_id]['sd'] = sd results[mod_id]['mn'] = mn results[mod_id]['mx'] = mx results[mod_id]['percentile_99_5'] = percentile_99_5 results[mod_id]['percentile_00_5'] = percentile_00_5 p.close() p.join() save_pickle(results, self.intensityproperties_file) else: results = load_pickle(self.intensityproperties_file) return results def analyze_dataset(self, collect_intensityproperties=True): # get all spacings and sizes sizes, spacings = self.get_sizes_and_spacings_after_cropping() # get all classes and what classes are in what patients # class min size # region size per class classes = self.get_classes() all_classes = [int(i) for i in classes.keys() if int(i) > 0] # modalities modalities = self.get_modalities() # collect intensity information if collect_intensityproperties: intensityproperties = self.collect_intensity_properties(len(modalities)) else: intensityproperties = None # size reduction by cropping size_reductions = self.get_size_reduction_by_cropping() dataset_properties = dict() dataset_properties['all_sizes'] = sizes dataset_properties['all_spacings'] = spacings dataset_properties['all_classes'] = all_classes dataset_properties['modalities'] = modalities # {idx: modality name} dataset_properties['intensityproperties'] = intensityproperties dataset_properties['size_reductions'] = size_reductions # {patient_id: size_reduction} save_pickle(dataset_properties, join(self.folder_with_cropped_data, "dataset_properties.pkl")) return dataset_properties
PypiClean
/Bis-Miner-3.11.1.tar.gz/Bis-Miner-3.11.0/Orange/evaluation/clustering.py
import numpy as np from sklearn.metrics import silhouette_score, adjusted_mutual_info_score, silhouette_samples from Orange.data import Table from Orange.evaluation.testing import Results from Orange.evaluation.scoring import Score __all__ = ['ClusteringEvaluation'] class ClusteringResults(Results): def __init__(self, store_data=True, **kwargs): super().__init__(store_data=True, **kwargs) def get_fold(self, fold): results = ClusteringResults() results.data = self.data if self.folds is None: raise ValueError("This 'Results' instance does not have folds.") if self.models is not None: results.models = self.models[fold] results.row_indices = self.row_indices results.actual = self.actual results.predicted = self.predicted[:, fold, :] results.domain = self.domain return results class ClusteringScore(Score): considers_actual = False def from_predicted(self, results, score_function): # Clustering scores from labels if self.considers_actual: return np.fromiter( (score_function(results.actual.flatten(), predicted.flatten()) for predicted in results.predicted), dtype=np.float64, count=len(results.predicted)) # Clustering scores from data only else: return np.fromiter( (score_function(results.data.X, predicted.flatten()) for predicted in results.predicted), dtype=np.float64, count=len(results.predicted)) class Silhouette(ClusteringScore): separate_folds = True def compute_score(self, results): return self.from_predicted(results, silhouette_score) class AdjustedMutualInfoScore(ClusteringScore): separate_folds = True considers_actual = True def compute_score(self, results): return self.from_predicted(results, adjusted_mutual_info_score) class ClusteringEvaluation(ClusteringResults): """ Clustering evaluation. If the constructor is given the data and a list of learning algorithms, it runs clustering and returns an instance of `Results` containing the predicted clustering labels. .. attribute:: k The number of runs. """ def __init__(self, data, learners, k=1, store_models=False): super().__init__(data=data, nmethods=len(learners), store_data=True, store_models=store_models, predicted=None) self.k = k Y = data.Y.copy().flatten() self.predicted = np.empty((len(learners), self.k, len(data))) self.folds = range(k) self.row_indices = np.arange(len(data)) self.actual = data.Y.flatten() if hasattr(data, "Y") else None if self.store_models: self.models = [] for k in range(self.k): if self.store_models: fold_models = [] self.models.append(fold_models) for i, learner in enumerate(learners): model = learner(data) if self.store_models: fold_models.append(model) labels = model(data) self.predicted[i, k, :] = labels.X.flatten() def graph_silhouette(X, y, xlim=None, colors=None, figsize=None, filename=None): """ Silhouette plot. :param filename: Output file name. :param X Orange.data.Table or numpy.ndarray Data table. :param y Orange.data.Table or numpy.ndarray: Cluster labels (integers). :param colors list, optional (default = None): List of colors. If provided, it must equal the number of clusters. :param figsize tuple (float, float): Figure size (width, height) in inches. :param xlim tuple (float, float): Limit x-axis values. """ import matplotlib.pyplot as plt if isinstance(X, Table): X = X.X if isinstance(y, Table): y = y.X y = y.ravel() # Detect number of clusters and set colors N = len(set(y)) if isinstance(colors, type(None)) : colors = ["g" if i % 2 else "b" for i in range(N)] elif len(colors) != N: import sys sys.stderr.write("Number of colors does not match the number of clusters. \n") return # Silhouette coefficients s = silhouette_samples(X, y) s = s[np.argsort(y)] # Sort by clusters parts = [] # Within clusters sort by silhouette scores for label, (i, j) in enumerate([(sum(y == c1), sum(y == c1) + sum(y == c2)) for c1, c2 in zip(range(-1, N-1), range(0, N))]): scores = sorted(s[i:j]) parts.append((scores, label)) # Plot data if figsize: plt.figure(figsize=figsize) else: plt.figure() plt.title("Silhouette score") total = 0 centers = [] for i, (scores, label) in enumerate(parts): plt.barh(range(total, total + len(scores)), scores, color=colors[i], edgecolor=colors[i]) centers.append(total+len(scores)/2) total += len(scores) if not isinstance(xlim, type(None)): plt.xlim(xlim) plt.yticks(centers) plt.gca().set_yticklabels(range(N)) plt.ylabel("Cluster label") if filename: plt.savefig(filename) plt.close() else: plt.show()
PypiClean
/Nano-CAT-0.7.2.tar.gz/Nano-CAT-0.7.2/nanoCAT/ff/match_job.py
import os import io import stat from typing import Generator, Any, List, Tuple, Hashable, Optional from os.path import join, isfile from itertools import chain from scm.plams.core.private import sha256 from scm.plams import SingleJob, JobError, Results, Settings, Molecule, FileError try: from scm.plams import writepdb, readpdb RDKIT_EX: Optional[ImportError] = None except ImportError as ex: RDKIT_EX = ex __all__ = ['MatchJob', 'MatchResults'] class MatchResults(Results): """The :class:`Results` subclass of :class:`MatchJob`.""" def recreate_molecule(self) -> Molecule: """Create a |Molecule| instance from ``"$JN.pdb"``.""" pdb_file = self['$JN.pdb'] try: return readpdb(self['$JN.pdb']) except AttributeError as ex: # readpdb() will pass None to from_rdmol(), resulting in an AttributeError down the line raise FileError(f"Failed to parse the content of ...{os.sep}{pdb_file!r}") from ex def recreate_settings(self) -> Settings: """Construct a |Settings| instance from ``"$JN.run"``.""" runfile = self['$JN.run'] # Ignore the first 2 lines with open(runfile, 'r') as f: for i in f: if 'MATCH.pl' in i: args = next(f).split() break else: raise FileError(f"Failed to parse the content of ...{os.sep}{runfile!r}") # Delete the executable and pop the .pdb filename del args[0] pdb_file = args.pop(-1) s = Settings() for k, v in zip(args[0::2], args[1::2]): k = k[1:].lower() s.input[k] = v s.input.filename = pdb_file return s def get_atom_names(self) -> List[str]: """Return a list of atom names extracted from ``"$JN.rtf"``.""" return self._parse_rtf(1) def get_atom_types(self) -> List[str]: """Return a list of atom types extracted from ``"$JN.rtf"``.""" return self._parse_rtf(2) def get_atom_charges(self) -> List[float]: """Return a list of atomic charges extracted from ``"$JN.rtf"``.""" return self._parse_rtf(3, as_type=float) def _parse_rtf(self, column: int, block: str = 'ATOM', as_type: type = str) -> list: """Extract values from the **block** in ``"$JN.rtf"``.""" filename = self['$JN.rtf'] ret = [] i, j = len(block), column with open(filename, 'r') as f: for item in f: if item[:i] == block: item_list = item.split() ret.append(as_type(item_list[j])) return ret class MatchJob(SingleJob): """A :class:`Job` subclass for interfacing with MATCH_: Multipurpose Atom-Typer for CHARMM. .. _MATCH: http://brooks.chem.lsa.umich.edu/index.php?page=match&subdir=articles/resources/software Examples -------- An example :class:`MatchJob` job: .. code:: python >>> s = Settings() >>> s.input.forcefield = 'top_all36_cgenff_new' >>> s.input.filename = 'ala.pdb' # Command line equivalent: MATCH.pl -forcefield top_all36_cgenff_new ala.pdb >>> job = MatchJob(settings=s) >>> results = job.run() The same example while with a :class:`Molecule` instance: .. code:: python >>> mol = Molecule('ala.pdb') >>> s = Settings() >>> s.input.forcefield = 'top_all36_cgenff_new' # Command line equivalent: MATCH.pl -forcefield top_all36_cgenff_new ala.pdb >>> job = MatchJob(molecule=mol, settings=s) >>> results = job.run() See Also -------- `10.1002/jcc.21963<https://doi.org/10.1002/jcc.21963>`_ MATCH: An atom-typing toolset for molecular mechanics force fields, J.D. Yesselman, D.J. Price, J.L. Knight and C.L. Brooks III, J. Comput. Chem., 2011. """ _result_type = MatchResults MATCH: str = os.path.join('$MATCH', 'scripts', 'MATCH.pl') pdb: str def __init__(self, settings: Settings, **kwargs: Any) -> None: """Initialize a :class:`MatchJob` instance.""" if RDKIT_EX is not None: raise ImportError(f"{RDKIT_EX}; usage of {self.__class__.__name__!r} requires " "the 'rdkit' package") from RDKIT_EX super().__init__(**kwargs) self._prepare_settings() self._prepare_pdb(io.StringIO()) def _get_ready(self) -> None: """Create the runfile.""" runfile = os.path.join(self.path, self._filename('run')) with open(runfile, 'w') as run: run.write(self.full_runscript()) os.chmod(runfile, os.stat(runfile).st_mode | stat.S_IEXEC) def get_input(self) -> None: """Not implemented; see :meth:`MatchJob.get_runscript`.""" cls_name = self.__class__.__name__ raise NotImplementedError(f"`{cls_name}.get_input()` is not implemented; " f"see `{cls_name}.get_runscript()`") def hash_input(self) -> str: def get_2tups() -> Generator[Tuple[str, Hashable], None, None]: for k, v in self.settings.input.items(): if isinstance(v, list): v = tuple(v) yield k, v yield 'pdb', self.pdb yield 'type', type(self) return sha256(frozenset(get_2tups())) def get_runscript(self) -> str: """Run a MACTH runscript.""" self._writepdb() # Write the .pdb file stored in MatchJob.pdb kv_iterator = ((k.strip('-'), str(v)) for k, v in self.settings.input.items()) args = ' '.join(i for i in chain.from_iterable(kv_iterator)) return f'"{self.MATCH}" {args} ".{os.sep}{self.name}.pdb"' def hash_runscript(self) -> str: """Alias for :meth:`MatchJob.hash_input`.""" return self.hash_input() def check(self) -> bool: """Check if the .prm, .rtf and top_...rtf files are present.""" files = {f'{self.name}.prm', f'{self.name}.rtf', f'top_{self.name}.rtf'} return files.issubset(self.results.files) """###################################### New methods ######################################""" def _prepare_settings(self) -> None: """Take :attr:`MatchJob.settings` and lower all its keys and strip ``"-"`` characters.""" s = self.settings.input self.settings.input = Settings({k.lower().strip('-'): v for k, v in s.items()}) def _prepare_pdb(self, stream): """Fill :attr:`MatchJob.pdb` with a string-representation of the .pdb file.""" conitions = {'filename' in self.settings.input, bool(self.molecule)} if not any(conitions): raise JobError("Ambiguous input: either `molecule` or " "`settings.input.filename` must be specified") if all(conitions): raise JobError("Ambiguous input: `molecule` and " "`settings.input.filename` cannot be both specified") if self.molecule: writepdb(self.molecule, stream) else: filename = self.settings.input.pop('filename') writepdb(readpdb(filename), stream) self.pdb: str = stream.getvalue() def _writepdb(self) -> None: """Convert :attr:`MatchJob.pdb` into a pdb file.""" filename = join(self.path, f'{self.name}.pdb') if not isfile(filename): writepdb(self.molecule, filename)
PypiClean
/GenomeQAML-0.0.14.tar.gz/GenomeQAML-0.0.14/README.md
[![Build status](https://travis-ci.org/OLC-LOC-Bioinformatics/GenomeQAML.svg?master)](https://travis-ci.org/OLC-LOC-Bioinformatics) # GenomeQAML: Genome Quality Assessment with Machine Learning The GenomeQAML is a script that uses a pre-computed ExtraTreesClassifier model in order to classify FASTA-formatted _de novo_ assemblies as bad, good, or very good. It's easy to use, and has minimal dependencies. ## External Dependencies - [Mash (v2.0 or greater)](https://github.com/marbl/mash) - [Prodigal (>=2.6.2)](https://github.com/hyattpd/Prodigal) Both of these need to be downloaded and included on your $PATH. ## Installation All you need to do is install with pip: `pip install genomeqaml`. Usage of a virtualenv is highly recommended. ## Usage GenomeQAML takes a directory containing uncompressed fasta files as input - these will be classified and a report written to a CSV-formatted file for your inspection. To run, type `classify.py -t /path/to/fasta/folder` This will create a report, by default called `QAMLreport.csv`. You can change the name of the report with the `-r` argument. ``` usage: classify.py [-h] -t TEST_FOLDER [-r REPORT_FILE] optional arguments: -h, --help show this help message and exit -t TEST_FOLDER, --test_folder TEST_FOLDER Path to folder containing FASTA files you want to test. -r REPORT_FILE, --report_file REPORT_FILE Name of output file. Default is QAMLreport.csv. ```
PypiClean
/GQCMS-0.0.4-py3-none-any.whl/build/lib/build/lib/build/lib/build/lib/gqcms/ConfigurationInteraction.py
import numpy as np import pandas as pd from scipy import linalg from gqcms import HartreeFock from gqcms import Hubbard from gqcms import Determinant from gqcms import createHamiltonianSCI from gqcms import DensityOperator def FCI(hamiltonian: np.ndarray, states: tuple = 0) -> pd.DataFrame: """ Computes the energy of a Hamiltonian via exact diagonalization. :param hamiltonian: a np.ndarray of a Hamiltonian :param state: indicates from which states the output is returned (default is 0 i.e. groundstate) :return: pandas DataFrame with the the columns ['E', 'C', '1PDM'] """ energies, C = linalg.eigh(hamiltonian) if isinstance(states, int): states = (states,) df_list = [] # get individual states for state in states: D = np.outer(C[:, state], C[:, state].T) df = pd.DataFrame([(energies[state], C[:, state], D)], columns=["E", "C", "1PDM"]) # df = pd.Series({"E": energies[state], "C": C[:, state], "1PDM": D}) df_list.append(df) if len(df_list) > 1: final_frame = pd.concat(df_list, ignore_index=True, axis=1).T else: final_frame = df_list[0] return final_frame def SCI( molecule: Hubbard, excitations: list, result_HF=None, maxiter: int = 100, max_size: int = 5, bdiis: bool = False, bdamping: bool = True, bseries: bool = False, diis_convergence: float = 1e-2, E_convergence: float = 1e-6, D_convergence: float = 1e-8, ) -> pd.Series: """ Performs a selected CI calculation with the given excitation degrees of the Hartree-Fock determinant :param molecule: Hubbard molecule :param excitations: list of excitation degrees that are taken into account :param result_HF: result of a Hartree-Fock computation :param maxiter: max number of iterations the Hartree-Fock algorithm is allowed to take :param bdiis: use DIIS or not (default is False) :param bdamping: use density damping or not (default is True) :param bseries: return a pandas series object or not (default is False) :param diss_convergence: min threshold of DIIS needed to stop the algorithm succesfull (default is 1e-2) :param E_convergence: min threshold of energy difference to stop the algorithm succesfull (default is 1e-6) :param D_convergence: min threshold of in density matrices (default is 1e-8) :return: pandas DataFrame with the the columns ['E', 'C', '1PDM'] """ # Convert excitations to list if int is given if isinstance(excitations, int): excitations = [excitations] # Perform a Hartree-Fock computation if result_HF is None if result_HF is None: HF_solver = HartreeFock(molecule, bdiis, bdamping, bseries, max_size, diis_convergence, E_convergence, D_convergence, maxiter) result_HF = HF_solver.solve() # Create Hamiltonian H, basis = createHamiltonianSCI(molecule, result_HF, excitations=excitations, return_extra=True) # Compute energie energies, coefficients = linalg.eigh(H) # Create density matrix in MO basis D_mo = DensityOperator(coefficients[:, 0], basis, molecule.sites) # Compute density matrix in site basis D_site = result_HF.C_a @ D_mo @ result_HF.C_a.T return pd.Series({'E': energies[0], 'C': coefficients[:, 0], '1PDM': np.outer(coefficients[:, 0], coefficients[:, 0].T), 'D_site': D_site})
PypiClean
/Diofant-0.14.0a2.tar.gz/Diofant-0.14.0a2/diofant/concrete/expr_with_intlimits.py
from ..core import oo from .expr_with_limits import ExprWithLimits class ReorderError(NotImplementedError): """Exception raised when trying to reorder dependent limits.""" def __init__(self, expr, msg): super().__init__(f'{expr} could not be reordered: {msg}.') class ExprWithIntLimits(ExprWithLimits): """Represents an expression with integer limits.""" def __init__(self, function, *symbols, **assumptions): if not all(all(abs(_) == oo or (_.is_integer is not False) for _ in l[1:]) for l in self.limits): raise ValueError('Limits must be integers or ±oo.') def change_index(self, var, trafo, newvar=None): r""" Change index of a Sum or Product. Perform a linear transformation `x \mapsto a x + b` on the index variable `x`. For `a` the only values allowed are `\pm 1`. A new variable to be used after the change of index can also be specified. Parameters ========== var : Symbol specifies the index variable `x` to transform. trafo : Expr The linear transformation in terms of ``var``. newvar : Symbol, optional Replacement symbol to be used instead of ``var`` in the final expression. Examples ======== >>> from diofant.abc import i, j, l, u, v >>> s = Sum(x, (x, a, b)) >>> s.doit() -a**2/2 + a/2 + b**2/2 + b/2 >>> sn = s.change_index(x, x + 1, y) >>> sn Sum(y - 1, (y, a + 1, b + 1)) >>> sn.doit() -a**2/2 + a/2 + b**2/2 + b/2 >>> sn = s.change_index(x, -x, y) >>> sn Sum(-y, (y, -b, -a)) >>> sn.doit() -a**2/2 + a/2 + b**2/2 + b/2 >>> sn = s.change_index(x, x+u) >>> sn Sum(-u + x, (x, a + u, b + u)) >>> sn.doit() -a**2/2 - a*u + a/2 + b**2/2 + b*u + b/2 - u*(-a + b + 1) + u >>> simplify(sn.doit()) -a**2/2 + a/2 + b**2/2 + b/2 >>> sn = s.change_index(x, -x - u, y) >>> sn Sum(-u - y, (y, -b - u, -a - u)) >>> sn.doit() -a**2/2 - a*u + a/2 + b**2/2 + b*u + b/2 - u*(-a + b + 1) + u >>> simplify(sn.doit()) -a**2/2 + a/2 + b**2/2 + b/2 >>> p = Product(i*j**2, (i, a, b), (j, c, d)) >>> p Product(i*j**2, (i, a, b), (j, c, d)) >>> p2 = p.change_index(i, i+3, k) >>> p2 Product(j**2*(k - 3), (k, a + 3, b + 3), (j, c, d)) >>> p3 = p2.change_index(j, -j, l) >>> p3 Product(l**2*(k - 3), (k, a + 3, b + 3), (l, -d, -c)) When dealing with symbols only, we can make a general linear transformation: >>> sn = s.change_index(x, u*x+v, y) >>> sn Sum((-v + y)/u, (y, b*u + v, a*u + v)) >>> sn.doit() -v*(a*u - b*u + 1)/u + (a**2*u**2/2 + a*u*v + a*u/2 - b**2*u**2/2 - b*u*v + b*u/2 + v)/u >>> simplify(sn.doit()) a**2*u/2 + a/2 - b**2*u/2 + b/2 However, the last result can be inconsistent with usual summation where the index increment is always 1. This is obvious as we get back the original value only for ``u`` equal +1 or -1. See Also ======== diofant.concrete.expr_with_intlimits.ExprWithIntLimits.index diofant.concrete.expr_with_intlimits.ExprWithIntLimits.reorder_limit diofant.concrete.expr_with_intlimits.ExprWithIntLimits.reorder diofant.concrete.summations.Sum.reverse_order diofant.concrete.products.Product.reverse_order """ if newvar is None: newvar = var limits = [] for limit in self.limits: if limit[0] == var: p = trafo.as_poly(var) if p.degree() != 1: raise ValueError('Index transformation is not linear') alpha = p.coeff_monomial(var) beta = p.coeff_monomial(1) if alpha.is_number: if alpha == 1: limits.append((newvar, alpha*limit[1] + beta, alpha*limit[2] + beta)) elif alpha == -1: limits.append((newvar, alpha*limit[2] + beta, alpha*limit[1] + beta)) else: raise ValueError('Linear transformation results in non-linear summation stepsize') else: # Note that the case of alpha being symbolic can give issues if alpha < 0. limits.append((newvar, alpha*limit[2] + beta, alpha*limit[1] + beta)) else: limits.append(limit) function = self.function.subs({var: (var - beta)/alpha}) function = function.subs({var: newvar}) return self.func(function, *limits) def index(self, x): """ Return the index of a dummy variable in the list of limits. Note that we start counting with 0 at the inner-most limits tuple. Parameters ========== x : Symbol a dummy variable Examples ======== >>> Sum(x*y, (x, a, b), (y, c, d)).index(x) 0 >>> Sum(x*y, (x, a, b), (y, c, d)).index(y) 1 >>> Product(x*y, (x, a, b), (y, c, d)).index(x) 0 >>> Product(x*y, (x, a, b), (y, c, d)).index(y) 1 See Also ======== diofant.concrete.expr_with_intlimits.ExprWithIntLimits.reorder_limit diofant.concrete.expr_with_intlimits.ExprWithIntLimits.reorder diofant.concrete.summations.Sum.reverse_order diofant.concrete.products.Product.reverse_order """ variables = [limit[0] for limit in self.limits] if variables.count(x) != 1: raise ValueError(self, 'Number of instances of variable not equal to one') return variables.index(x) def reorder(self, *arg): r""" Reorder limits in a expression containing a Sum or a Product. Parameters ========== \*arg : list of tuples These tuples can contain numerical indices or index variable names or involve both. Examples ======== >>> from diofant.abc import e, f >>> Sum(x*y, (x, a, b), (y, c, d)).reorder((x, y)) Sum(x*y, (y, c, d), (x, a, b)) >>> Sum(x*y*z, (x, a, b), (y, c, d), (z, e, f)).reorder((x, y), (x, z), (y, z)) Sum(x*y*z, (z, e, f), (y, c, d), (x, a, b)) >>> P = Product(x*y*z, (x, a, b), (y, c, d), (z, e, f)) >>> P.reorder((x, y), (x, z), (y, z)) Product(x*y*z, (z, e, f), (y, c, d), (x, a, b)) We can also select the index variables by counting them, starting with the inner-most one: >>> Sum(x**2, (x, a, b), (x, c, d)).reorder((0, 1)) Sum(x**2, (x, c, d), (x, a, b)) And of course we can mix both schemes: >>> Sum(x*y, (x, a, b), (y, c, d)).reorder((y, x)) Sum(x*y, (y, c, d), (x, a, b)) >>> Sum(x*y, (x, a, b), (y, c, d)).reorder((y, 0)) Sum(x*y, (y, c, d), (x, a, b)) See Also ======== diofant.concrete.expr_with_intlimits.ExprWithIntLimits.index diofant.concrete.expr_with_intlimits.ExprWithIntLimits.reorder_limit diofant.concrete.summations.Sum.reverse_order diofant.concrete.products.Product.reverse_order """ new_expr = self for r in arg: if len(r) != 2: raise ValueError(r, 'Invalid number of arguments') index1 = r[0] index2 = r[1] if not isinstance(r[0], int): index1 = self.index(r[0]) if not isinstance(r[1], int): index2 = self.index(r[1]) new_expr = new_expr.reorder_limit(index1, index2) return new_expr def reorder_limit(self, x, y): """ Interchange two limit tuples of a Sum or Product expression. Parameters ========== x, y: int are integers corresponding to the index variables of the two limits which are to be interchanged. Examples ======== >>> from diofant.abc import e, f >>> Sum(x*y*z, (x, a, b), (y, c, d), (z, e, f)).reorder_limit(0, 2) Sum(x*y*z, (z, e, f), (y, c, d), (x, a, b)) >>> Sum(x**2, (x, a, b), (x, c, d)).reorder_limit(1, 0) Sum(x**2, (x, c, d), (x, a, b)) >>> Product(x*y*z, (x, a, b), (y, c, d), (z, e, f)).reorder_limit(0, 2) Product(x*y*z, (z, e, f), (y, c, d), (x, a, b)) See Also ======== diofant.concrete.expr_with_intlimits.ExprWithIntLimits.index diofant.concrete.expr_with_intlimits.ExprWithIntLimits.reorder diofant.concrete.summations.Sum.reverse_order diofant.concrete.products.Product.reverse_order """ var = {limit[0] for limit in self.limits} limit_x = self.limits[x] limit_y = self.limits[y] if (len(set(limit_x[1].free_symbols).intersection(var)) == 0 and len(set(limit_x[2].free_symbols).intersection(var)) == 0 and len(set(limit_y[1].free_symbols).intersection(var)) == 0 and len(set(limit_y[2].free_symbols).intersection(var)) == 0): limits = [] for i, limit in enumerate(self.limits): if i == x: limits.append(limit_y) elif i == y: limits.append(limit_x) else: limits.append(limit) return type(self)(self.function, *limits) else: raise ReorderError(self, 'could not interchange the two limits specified')
PypiClean
/Hebel-0.02.1.tar.gz/Hebel-0.02.1/hebel/layers/logistic_layer.py
# This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. import numpy as np import cPickle from pycuda import gpuarray from pycuda import cumath from math import sqrt from .. import sampler from .top_layer import TopLayer from ..pycuda_ops import eps, linalg from ..pycuda_ops.elementwise import sign, nan_to_zeros, substract_matrix, sigmoid from ..pycuda_ops.reductions import matrix_sum_out_axis from ..pycuda_ops.matrix import add_vec_to_mat from ..pycuda_ops.softmax import cross_entropy_logistic class LogisticLayer(TopLayer): r""" A logistic classification layer for two classes, using cross-entropy loss function and sigmoid activations. **Parameters:** n_in : integer Number of input units. parameters : array_like of ``GPUArray`` Parameters used to initialize the layer. If this is omitted, then the weights are initalized randomly using *Bengio's rule* (uniform distribution with scale :math:`4 \cdot \sqrt{6 / (\mathtt{n\_in} + \mathtt{n\_out})}`) and the biases are initialized to zero. If ``parameters`` is given, then is must be in the form ``[weights, biases]``, where the shape of weights is ``(n_in, n_out)`` and the shape of ``biases`` is ``(n_out,)``. Both weights and biases must be ``GPUArray``. weights_scale : float, optional If ``parameters`` is omitted, then this factor is used as scale for initializing the weights instead of *Bengio's rule*. l1_penalty_weight : float, optional Weight used for L1 regularization of the weights. l2_penalty_weight : float, optional Weight used for L2 regularization of the weights. lr_multiplier : float, optional If this parameter is omitted, then the learning rate for the layer is scaled by :math:`2 / \sqrt{\mathtt{n\_in}}`. You may specify a different factor here. test_error_fct : {``class_error``, ``kl_error``, ``cross_entropy_error``}, optional Which error function to use on the test set. Default is ``class_error`` for classification error. Other choices are ``kl_error``, the Kullback-Leibler divergence, or ``cross_entropy_error``. **See also:** :class:`hebel.layers.SoftmaxLayer`, :class:`hebel.models.NeuralNet`, :class:`hebel.models.NeuralNetRegression`, :class:`hebel.layers.LinearRegressionLayer` **Examples**:: # Use the simple initializer and initialize with random weights logistic_layer = LogisticLayer(1000) # Sample weights yourself, specify an L1 penalty, and don't # use learning rate scaling import numpy as np from pycuda import gpuarray n_in = 1000 weights = gpuarray.to_gpu(.01 * np.random.randn(n_in, 1)) biases = gpuarray.to_gpu(np.zeros((1,))) softmax_layer = SoftmaxLayer(n_in, parameters=(weights, biases), l1_penalty_weight=.1, lr_multiplier=1.) """ n_parameters = 2 n_out = 1 def __init__(self, n_in, parameters=None, weights_scale=None, l1_penalty_weight=0., l2_penalty_weight=0., lr_multiplier=None, test_error_fct='class_error'): # Initialize weight using Bengio's rule self.weights_scale = 4 * sqrt(6. / (n_in + 1)) \ if weights_scale is None \ else weights_scale if parameters is not None: self.W, self.b = parameters else: self.W = self.weights_scale * \ sampler.gen_uniform((n_in, 1), dtype=np.float32) \ - .5 * self.weights_scale self.b = gpuarray.zeros((1,), dtype=np.float32) self.n_in = n_in self.test_error_fct = test_error_fct self.l1_penalty_weight = l1_penalty_weight self.l2_penalty_weight = l2_penalty_weight self.lr_multiplier = 2 * [1. / np.sqrt(n_in, dtype=np.float32)] \ if lr_multiplier is None else lr_multiplier self.persistent_temp_objects_config = ( ('activations', ('batch_size', 1), np.float32), ('df_W', self.W.shape, np.float32), ('df_b', self.b.shape, np.float32), ('df_input', ('batch_size', self.n_in), np.float32), ('delta', ('batch_size', 1), np.float32) ) @property def architecture(self): return {'class': self.__class__, 'n_in': self.n_in, 'n_out': 1} def feed_forward(self, input_data, prediction=False): """Propagate forward through the layer. **Parameters:** input_data : ``GPUArray`` Inpute data to compute activations for. prediction : bool, optional Whether to use prediction model. Only relevant when using dropout. If true, then weights are halved if the layers uses dropout. **Returns:** activations : ``GPUArray`` The activations of the output units. """ activations = self.get_temp_object('activations', (input_data.shape[0], 1), input_data.dtype) linalg.dot(input_data, self.W, target=activations) activations = add_vec_to_mat(activations, self.b, inplace=True) sigmoid(activations) return activations def backprop(self, input_data, targets, cache=None): """ Backpropagate through the logistic layer. **Parameters:** input_data : ``GPUArray`` Inpute data to compute activations for. targets : ``GPUArray`` The target values of the units. cache : list of ``GPUArray`` Cache obtained from forward pass. If the cache is provided, then the activations are not recalculated. **Returns:** gradients : tuple of ``GPUArray`` Gradients with respect to the weights and biases in the form ``(df_weights, df_biases)``. df_input : ``GPUArray`` Gradients with respect to the input. """ if cache is not None: activations = cache else: activations = self.feed_forward(input_data, prediction=False) # Get temporary objects df_W = self.get_temp_object('df_W', self.W.shape, self.W.dtype) df_b = self.get_temp_object('df_b', self.b.shape, self.b.dtype) df_input = self.get_temp_object('df_input', input_data.shape, input_data.dtype) delta = self.get_temp_object('delta', activations.shape, activations.dtype) substract_matrix(activations, targets, delta) nan_to_zeros(delta, delta) # Gradient wrt weights linalg.dot(input_data, delta, transa='T', target=df_W) # Gradient wrt bias matrix_sum_out_axis(delta, 0, target=df_b) # Gradient wrt input linalg.dot(delta, self.W, transb='T', target=df_input) # L1 penalty if self.l1_penalty_weight: df_W -= self.l1_penalty_weight * sign(self.W) # L2 penalty if self.l2_penalty_weight: df_W -= self.l2_penalty_weight * self.W return (df_W, df_b), df_input def test_error(self, input_data, targets, average=True, cache=None, prediction=True): """Compute the test error function given some data and targets. Uses the error function defined in :class:`SoftmaxLayer.test_error_fct`, which may be different from the cross-entropy error function used for training'. Alternatively, the other test error functions may be called directly. **Parameters:** input_data : ``GPUArray`` Inpute data to compute the test error function for. targets : ``GPUArray`` The target values of the units. average : bool Whether to divide the value of the error function by the number of data points given. cache : list of ``GPUArray`` Cache obtained from forward pass. If the cache is provided, then the activations are not recalculated. prediction : bool, optional Whether to use prediction model. Only relevant when using dropout. If true, then weights are halved if the layers uses dropout. **Returns:** test_error : float """ if self.test_error_fct == 'class_error': test_error = self.class_error elif self.test_error_fct == 'cross_entropy_error': test_error = self.cross_entropy_error else: raise ValueError('unknown test error function "%s"' % self.test_error_fct) return test_error(input_data, targets, average, cache, prediction) def cross_entropy_error(self, input_data, targets, average=True, cache=None, prediction=False): """ Return the cross entropy error """ if cache is not None: activations = cache else: activations = \ self.feed_forward(input_data, prediction=prediction) loss = cross_entropy_logistic(activations, targets) if average: loss /= targets.shape[0] return loss train_error = cross_entropy_error def class_error(self, input_data, targets, average=True, cache=None, prediction=False): """ Return the classification error rate """ if cache is not None: activations = cache else: activations = \ self.feed_forward(input_data, prediction=prediction) targets = targets.get() class_error = np.sum((activations.get() >= .5) != (targets >= .5)) if average: class_error = float(class_error) / targets.shape[0] return class_error
PypiClean
/MapProxy-1.16.0.tar.gz/MapProxy-1.16.0/mapproxy/config/spec.py
from __future__ import print_function import datetime from mapproxy.util.ext.dictspec.validator import validate, ValidationError from mapproxy.util.ext.dictspec.spec import one_of, anything, number from mapproxy.util.ext.dictspec.spec import recursive, required, type_spec, combined from mapproxy.compat import string_type def validate_options(conf_dict): """ Validate `conf_dict` agains mapproxy.yaml spec. Returns tuple with a list of errors and a bool. The list is empty when no errors where found. The bool is True when the errors are informal and not critical. """ try: validate(mapproxy_yaml_spec, conf_dict) except ValidationError as ex: return ex.errors, ex.informal_only else: return [], True time_spec = { 'seconds': number(), 'minutes': number(), 'hours': number(), 'days': number(), 'weeks': number(), 'time': anything(), 'mtime': str(), } coverage = recursive({ 'polygons': str(), 'polygons_srs': str(), 'bbox': one_of(str(), [number()]), 'bbox_srs': str(), 'ogr_datasource': str(), 'ogr_where': str(), 'ogr_srs': str(), 'datasource': one_of(str(), [number()]), 'where': str(), 'srs': str(), 'expire_tiles': str(), 'union': [recursive()], 'difference': [recursive()], 'intersection': [recursive()], 'clip': bool(), }) image_opts = { 'mode': str(), 'colors': number(), 'transparent': bool(), 'resampling_method': str(), 'format': str(), 'encoding_options': { anything(): anything() }, 'merge_method': str(), } http_opts = { 'method': str(), 'client_timeout': number(), 'ssl_no_cert_checks': bool(), 'ssl_ca_certs': str(), 'hide_error_details': bool(), 'headers': { anything(): str() }, 'manage_cookies': bool(), } mapserver_opts = { 'binary': str(), 'working_dir': str(), } scale_hints = { 'max_scale': number(), 'min_scale': number(), 'max_res': number(), 'min_res': number(), } source_commons = combined( scale_hints, { 'concurrent_requests': int(), 'coverage': coverage, 'seed_only': bool(), } ) riak_node = { 'host': str(), 'pb_port': number(), 'http_port': number(), } cache_types = { 'file': { 'directory_layout': str(), 'use_grid_names': bool(), 'directory': str(), 'tile_lock_dir': str(), }, 'sqlite': { 'directory': str(), 'sqlite_timeout': number(), 'sqlite_wal': bool(), 'tile_lock_dir': str(), }, 'mbtiles': { 'filename': str(), 'sqlite_timeout': number(), 'sqlite_wal': bool(), 'tile_lock_dir': str(), }, 'geopackage': { 'filename': str(), 'directory': str(), 'tile_lock_dir': str(), 'table_name': str(), 'levels': bool(), }, 'couchdb': { 'url': str(), 'db_name': str(), 'tile_metadata': { anything(): anything() }, 'tile_id': str(), 'tile_lock_dir': str(), }, 's3': { 'bucket_name': str(), 'directory_layout': str(), 'directory': str(), 'profile_name': str(), 'region_name': str(), 'endpoint_url': str(), 'access_control_list': str(), 'tile_lock_dir': str(), }, 'riak': { 'nodes': [riak_node], 'protocol': one_of('pbc', 'http', 'https'), 'bucket': str(), 'default_ports': { 'pb': number(), 'http': number(), }, 'secondary_index': bool(), 'tile_lock_dir': str(), }, 'redis': { 'host': str(), 'port': int(), 'db': int(), 'prefix': str(), 'default_ttl': int(), }, 'compact': { 'directory': str(), required('version'): number(), 'tile_lock_dir': str(), }, 'azureblob': { 'connection_string': str(), 'container_name': str(), 'directory_layout': str(), 'directory': str(), 'tile_lock_dir': str(), }, } on_error = { anything(): { required('response'): one_of([int], str), 'cache': bool, 'authorize_stale': bool } } inspire_md = { 'linked': { required('metadata_url'): { required('url'): str, required('media_type'): str, }, required('languages'): { required('default'): str, }, }, 'embedded': { required('resource_locators'): [{ required('url'): str, required('media_type'): str, }], required('temporal_reference'): { 'date_of_publication': one_of(str, datetime.date), 'date_of_creation': one_of(str, datetime.date), 'date_of_last_revision': one_of(str, datetime.date), }, required('conformities'): [{ 'title': string_type, 'uris': [str], 'date_of_publication': one_of(str, datetime.date), 'date_of_creation': one_of(str, datetime.date), 'date_of_last_revision': one_of(str, datetime.date), required('resource_locators'): [{ required('url'): str, required('media_type'): str, }], required('degree'): str, }], required('metadata_points_of_contact'): [{ 'organisation_name': string_type, 'email': str, }], required('mandatory_keywords'): [str], 'keywords': [{ required('title'): string_type, 'date_of_publication': one_of(str, datetime.date), 'date_of_creation': one_of(str, datetime.date), 'date_of_last_revision': one_of(str, datetime.date), 'uris': [str], 'resource_locators': [{ required('url'): str, required('media_type'): str, }], required('keyword_value'): string_type, }], required('metadata_date'): one_of(str, datetime.date), 'metadata_url': { required('url'): str, required('media_type'): str, }, required('languages'): { required('default'): str, }, }, } wms_130_layer_md = { 'abstract': string_type, 'keyword_list': [ { 'vocabulary': string_type, 'keywords': [string_type], } ], 'attribution': { 'title': string_type, 'url': str, 'logo': { 'url': str, 'width': int, 'height': int, 'format': string_type, } }, 'identifier': [ { 'url': str, 'name': string_type, 'value': string_type, } ], 'metadata': [ { 'url': str, 'type': str, 'format': str, }, ], 'data': [ { 'url': str, 'format': str, } ], 'feature_list': [ { 'url': str, 'format': str, } ], } grid_opts = { 'base': str(), 'name': str(), 'srs': str(), 'bbox': one_of(str(), [number()]), 'bbox_srs': str(), 'num_levels': int(), 'res': [number()], 'res_factor': one_of(number(), str()), 'max_res': number(), 'min_res': number(), 'stretch_factor': number(), 'max_shrink_factor': number(), 'align_resolutions_with': str(), 'origin': str(), 'tile_size': [int()], 'threshold_res': [number()], } ogc_service_md = { 'title': string_type, 'abstract': string_type, 'online_resource': string_type, 'contact': anything(), 'fees': string_type, 'access_constraints': string_type, 'keyword_list': [ { 'vocabulary': string_type, 'keywords': [string_type], } ], } band_source = { required('source'): str(), required('band'): int, 'factor': number(), } band_sources = { 'r': [band_source], 'g': [band_source], 'b': [band_source], 'a': [band_source], 'l': [band_source], } mapproxy_yaml_spec = { '__config_files__': anything(), # only used internaly 'globals': { 'image': { 'resampling_method': 'method', 'paletted': bool(), 'stretch_factor': number(), 'max_shrink_factor': number(), 'jpeg_quality': number(), 'formats': { anything(): image_opts, }, 'font_dir': str(), 'merge_method': str(), }, 'http': combined( http_opts, { 'access_control_allow_origin': one_of(str(), {}), } ), 'cache': { 'base_dir': str(), 'lock_dir': str(), 'tile_lock_dir': str(), 'meta_size': [number()], 'meta_buffer': number(), 'bulk_meta_tiles': bool(), 'max_tile_limit': number(), 'minimize_meta_requests': bool(), 'concurrent_tile_creators': int(), 'link_single_color_images': bool(), 's3': { 'bucket_name': str(), 'profile_name': str(), 'region_name': str(), 'endpoint_url': str(), }, 'azureblob': { 'connection_string': str(), 'container_name': str(), }, }, 'grid': { 'tile_size': [int()], }, 'srs': { 'axis_order_ne': [str()], 'axis_order_en': [str()], 'proj_data_dir': str(), 'preferred_src_proj': {anything(): [str()]}, }, 'tiles': { 'expires_hours': number(), }, 'mapserver': mapserver_opts, 'renderd': { 'address': str(), } }, 'grids': { anything(): grid_opts, }, 'caches': { anything(): { required('sources'): one_of([string_type], band_sources), 'name': str(), 'grids': [str()], 'cache_dir': str(), 'meta_size': [number()], 'meta_buffer': number(), 'bulk_meta_tiles': bool(), 'minimize_meta_requests': bool(), 'concurrent_tile_creators': int(), 'disable_storage': bool(), 'format': str(), 'image': image_opts, 'request_format': str(), 'use_direct_from_level': number(), 'use_direct_from_res': number(), 'link_single_color_images': bool(), 'cache_rescaled_tiles': bool(), 'upscale_tiles': int(), 'downscale_tiles': int(), 'refresh_before': time_spec, 'watermark': { 'text': string_type, 'font_size': number(), 'color': one_of(str(), [number()]), 'opacity': number(), 'spacing': str(), }, 'cache': type_spec('type', cache_types) } }, 'services': { 'demo': {}, 'kml': { 'use_grid_names': bool(), }, 'tms': { 'use_grid_names': bool(), 'origin': str(), }, 'wmts': { 'kvp': bool(), 'restful': bool(), 'restful_template': str(), 'restful_featureinfo_template': str(), 'md': ogc_service_md, 'featureinfo_formats': [ { required('mimetype'): str(), 'suffix': str(), }, ], }, 'wms': { 'srs': [str()], 'bbox_srs': [one_of(str(), {'bbox': [number()], 'srs': str()})], 'image_formats': [str()], 'attribution': { 'text': string_type, }, 'featureinfo_types': [str()], 'featureinfo_xslt': { anything(): str() }, 'on_source_errors': str(), 'max_output_pixels': one_of(number(), [number()]), 'strict': bool(), 'md': ogc_service_md, 'inspire_md': type_spec('type', inspire_md), 'versions': [str()], }, }, 'sources': { anything(): type_spec('type', { 'wms': combined(source_commons, { 'wms_opts': { 'version': str(), 'map': bool(), 'featureinfo': bool(), 'legendgraphic': bool(), 'legendurl': str(), 'featureinfo_format': str(), 'featureinfo_xslt': str(), 'featureinfo_out_format': str(), }, 'image': combined(image_opts, { 'opacity':number(), 'transparent_color': one_of(str(), [number()]), 'transparent_color_tolerance': number(), }), 'supported_formats': [str()], 'supported_srs': [str()], 'http': http_opts, 'on_error': on_error, 'forward_req_params': [str()], required('req'): { required('url'): str(), anything(): anything() } }), 'mapserver': combined(source_commons, { 'wms_opts': { 'version': str(), 'map': bool(), 'featureinfo': bool(), 'legendgraphic': bool(), 'legendurl': str(), 'featureinfo_format': str(), 'featureinfo_xslt': str(), }, 'image': combined(image_opts, { 'opacity':number(), 'transparent_color': one_of(str(), [number()]), 'transparent_color_tolerance': number(), }), 'supported_formats': [str()], 'supported_srs': [str()], 'forward_req_params': [str()], required('req'): { required('map'): str(), anything(): anything() }, 'mapserver': mapserver_opts, }), 'tile': combined(source_commons, { required('url'): str(), 'transparent': bool(), 'image': image_opts, 'grid': str(), 'request_format': str(), 'origin': str(), # TODO: remove with 1.5 'http': http_opts, 'on_error': on_error, }), 'mapnik': combined(source_commons, { required('mapfile'): str(), 'transparent': bool(), 'image': image_opts, 'layers': one_of(str(), [str()]), 'use_mapnik2': bool(), 'scale_factor': number(), 'multithreaded': bool(), }), 'arcgis': combined(source_commons, { required('req'): { required('url'): str(), 'dpi': int(), 'layers': str(), 'transparent': bool(), 'time': str() }, 'opts': { 'featureinfo': bool(), 'featureinfo_tolerance': number(), 'featureinfo_return_geometries': bool(), }, 'supported_srs': [str()], 'http': http_opts, 'on_error': on_error }), 'debug': { }, }) }, 'layers': one_of( { anything(): combined(scale_hints, { 'sources': [string_type], required('title'): string_type, 'legendurl': str(), 'md': wms_130_layer_md, }) }, recursive([combined(scale_hints, { 'sources': [string_type], 'tile_sources': [string_type], 'name': str(), required('title'): string_type, 'legendurl': str(), 'layers': recursive(), 'md': wms_130_layer_md, 'dimensions': { anything(): { required('values'): [one_of(string_type, float, int)], 'default': one_of(string_type, float, int), } } })]) ), # `parts` can be used for partial configurations that are referenced # from other sections (e.g. coverages, dimensions, etc.) 'parts': anything(), } def add_source_to_mapproxy_yaml_spec(source_name, source_spec): """ Add a new source type to mapproxy_yaml_spec. Used by plugins. """ # sources has a single anything() : {} member values = list(mapproxy_yaml_spec['sources'].values()) assert len(values) == 1 values[0].add_subspec(source_name, source_spec) def add_service_to_mapproxy_yaml_spec(service_name, service_spec): """ Add a new service type to mapproxy_yaml_spec. Used by plugins. """ mapproxy_yaml_spec['services'][service_name] = service_spec def add_subcategory_to_layer_md(category_name, category_def): """ Add a new category to wms_130_layer_md. Used by plugins """ wms_130_layer_md[category_name] = category_def
PypiClean
/Curtain-0.3.tar.gz/Curtain-0.3/src/curtain/handler.py
import xml.sax.handler, xml.sax.xmlreader import pkg_resources from curtain import ns import processors class _AttributesNSImpl(xml.sax.xmlreader.AttributesNSImpl): def __init__(self, dct, extra_attrs): attrs = dict(dct.items() + extra_attrs.items()) xml.sax.xmlreader.AttributesNSImpl.__init__(self, attrs, dict([((k[0] + ':' + k[1] if k[0] is not None else k[1]), v) for k, v in attrs.items()])) class _Handler(xml.sax.handler.ContentHandler): ''' The SAX handler which process a template and produce the function. :IVariables: __template : Template The template which requested this handler. __function : list(str) The body of the generated function, as a list of strings. __locator : Locator The locator possibly provided by the XML parser. __indentation_level : int The current indentation level for output. __completed : bool Whether the processing of the whole document has been completed or not. __enders : list(list(callable)) A stack of list of methods which should be called at the end tag, just after the endElementNS event code has been produced. __preenders : list(list(callable)) A stack of list of methods which should be called at the end tag, just before the endElementNS event code has been produced. __pmappings : list(bool) A stack of booleans meaning if the i-th 'startPrefixMapping' has been emitted or not. __processors : list(str, Processor) A list of processor names and instances, ordered by priority. __output_suspension_level : int An integer which tells how many times the _suspend_output function has been called (_resume_output decrease this counter instead). __lastvarnum : int The number of variables used, to produce new unique variable names. ''' # # public interface # def __init__(self, template): ''' Create a new handler. :Parameters: template : Template The template which requested this handler. ''' # initialize internal variables self.__template = template self.__function = [] self.__locator = None self.__indentation_level = 0 self.__completed = False self.__enders = [] self.__preenders = [] self.__pmappings = [] self.__processors = [] self.__output_suspension_level = 0 self.__lastvarnum = 0 self.__precallbacks = {'startDocument': [], 'endDocument': [], 'startElementNS': [], 'endElementNS': [], 'characters': []} self.__postcallbacks = {'startDocument': [], 'endDocument': [], 'startElementNS': [], 'endElementNS': [], 'characters': []} self.__register_default_processor_classes() @property def source(self): 'The source code of the compiled function.' assert self.__completed return '\n'.join(self.__function) @property def function(self): '''The function which produce the output; takes the xml generator and environment as arguments.''' d = {} exec self.source in d return d['process'] def register_processor(self, name, processor_class): ''' Register a new processor. ''' p = processor_class() self.__processors.append((name, p)) self.__processors.sort(key = lambda (name, el): -el.priority) p.registered(self) # # internal interface # __tab = '\t' def __attrs(self, attrs): return 'curtain.handler._AttributesNSImpl(%r, _extra_attrs)' % dict( (k,v) for (k,v) in attrs.items() if k[0] != ns) def __register_default_processor_classes(self): for entrypoint in pkg_resources.iter_entry_points('curtain.processors'): name = entrypoint.name k = entrypoint.load() self.register_processor(name, k) def __callbacks(self, callbackmap, name, *args, **kwargs): for entry in callbackmap[name]: entry(*args, **kwargs) def __save_location_deco(meth): def decorated(self, *args, **kwargs): ln, cn = self.__locator.getLineNumber(), self.__locator.getColumnNumber() self._add('_location.new(%r, %r)' % (ln, cn)) return meth(self, *args, **kwargs) return decorated # # processors interface # def _indent(self): self.__indentation_level += 1 def _unindent(self): self.__indentation_level -= 1 def _add(self, line): if self.__output_suspension_level == 0: self.__function.append( self.__tab*self.__indentation_level + line) def _get_var(self): name = '_var_%d' % self.__lastvarnum self.__lastvarnum += 1 return name def _add_ender(self, ender): self.__enders[-1].append(ender) def _add_preender(self, preender): self.__preenders[-1].append(preender) def _suspend_output(self): self.__output_suspension_level += 1 def _resume_output(self): self.__output_suspension_level -= 1 def _reset_suspension(self, value): old = self.__output_suspension_level self.__output_suspension_level = value return old def _register_precallback(self, name, value): self.__precallbacks[name].append(value) def _register_postcallback(self, name, value): self.__postcallbacks[name].append(value) def _unregister_precallback(self, name, value): self.__precallbacks[name].remove(value) def _unregister_postcallback(self, name, value): self.__postcallbacks[name].remove(value) # # xml.sax.handlers.ContentHandler interface # def setDocumentLocator(self, locator): # save the locator self.__locator = locator def startDocument(self): # precallbacks self.__callbacks(self.__precallbacks, 'startDocument') # start of the program self.__completed = False self._add('import curtain') self._add('def process(xml_generator, env, _location, _slots = {}, _i18n_context = None):') self._indent() self._add("globals().update(env)") self._add('if len(_slots) == 0:') self._indent() self._add('xml_generator.startDocument()') self._unindent() # postcallbacks self.__callbacks(self.__postcallbacks, 'startDocument') def endDocument(self): # precallbacks self.__callbacks(self.__precallbacks, 'endDocument') # end of program self._add('if len(_slots) == 0:') self._indent() self._add('xml_generator.endDocument()') self._unindent() self._unindent() self.__completed = True # postcallbacks self.__callbacks(self.__postcallbacks, 'endDocument') @__save_location_deco def startElementNS(self, name, qname, attrs): # precallbacks self.__callbacks(self.__precallbacks, 'startElementNS', name, qname, attrs) # initialize environment self.__enders.append([]) self.__preenders.append([]) # invoke processor classes attrnames = [attrname[1] for attrname in attrs.keys() if attrname[0] == ns] processors = [] for pname, p in self.__processors: p.tag(self) if pname in attrnames: value = attrs[(ns, pname)].strip() if p.value_kind == 'simple': pass elif p.value_kind == 'single': i = value.index(' ') value = (value[:i], value[i+1:]) elif p.value_kind == 'list': v = [] for subvalue in value.split(';'): subvalue = subvalue.strip() i = subvalue.index(' ') v.append((subvalue[:i], subvalue[i+1:])) value = v else: raise ValueError('unknown value_kind %r' % p.value_kind) p.process(self, value) processors.append((p, value)) # produce tag if name[0] != ns: self._add('xml_generator.startElementNS(%r, %r, %s)' % (name, qname, self.__attrs(attrs))) # processor post-processing for p, value in processors: p.post_process(self, value) # postcallbacks self.__callbacks(self.__postcallbacks, 'startElementNS', name, qname, attrs) @__save_location_deco def endElementNS(self, name, qname): # precallbacks self.__callbacks(self.__precallbacks, 'endElementNS', name, qname) # preenders preender = self.__preenders.pop() for f in preender: f() # code if name[0] != ns: self._add('xml_generator.endElementNS(%r, %r)' % (name, qname)) # postenders ender = self.__enders.pop() for f in ender: f() # postcallbacks self.__callbacks(self.__postcallbacks, 'endElementNS', name, qname) @__save_location_deco def ignorableWhitespace(self, whitespace): self._add('xml_generator.ignorableWhitespace(%r)' % whitespace) @__save_location_deco def characters(self, content): # precallbacks self.__callbacks(self.__precallbacks, 'characters', content) # code self._add('xml_generator.characters(%r)' % content) # postcallbacks self.__callbacks(self.__postcallbacks, 'characters', content) @__save_location_deco def startPrefixMapping(self, prefix, uri): if uri != ns: self._add('xml_generator.startPrefixMapping(%r, %r)' % (prefix, uri)) self.__pmappings.append(True) else: self.__pmappings.append(False) @__save_location_deco def endPrefixMapping(self, prefix): if self.__pmappings.pop(): self._add('xml_generator.endPrefixMapping(%r)' % prefix)
PypiClean
/Exegol-4.2.5.tar.gz/Exegol-4.2.5/exegol-docker-build/sources/install/package_misc.sh
source common.sh function install_misc_apt_tools() { fapt rlwrap imagemagick ascii rsync add-history rlwrap add-history imagemagick add-history ascii add-history rsync add-test-command "rlwrap --version" # Reverse shell utility add-test-command "convert -version" # Copy, modify, and distribute image add-test-command "ascii -v" # The ascii table in the shell add-test-command "rsync -h" # File synchronization tool for efficiently copying and updating data between local or remote locations. add-to-list "rlwrap,https://github.com/hanslub42/rlwrap,rlwrap is a small utility that wraps input and output streams of executables / making it possible to edit and re-run input history" add-to-list "imagemagick,https://github.com/ImageMagick/ImageMagick,ImageMagick is a free and open-source image manipulation tool used to create / edit / compose / or convert bitmap images." add-to-list "ascii,https://github.com/moul/ascii,ASCII command-line tool to replace images with color-coded ASCII art." add-to-list "rsync,https://packages.debian.org/sid/rsync,File synchronization tool for efficiently copying and updating data between local or remote locations" } function install_goshs() { colorecho "Installing goshs" go install -v github.com/patrickhener/goshs@latest add-history goshs add-test-command "goshs -v" add-to-list "goshs,https://github.com/patrickhener/goshs,Goshs is a replacement for Python's SimpleHTTPServer. It allows uploading and downloading via HTTP/S with either self-signed certificate or user provided certificate and you can use HTTP basic auth." } function install_shellerator() { colorecho "Installing shellerator" python3 -m pipx install git+https://github.com/ShutdownRepo/shellerator add-history shellerator add-test-command "shellerator --help" add-to-list "shellerator,https://github.com/ShutdownRepo/Shellerator,a simple command-line tool for generating shellcode" } function install_uberfile() { colorecho "Installing uberfile" python3 -m pipx install git+https://github.com/ShutdownRepo/uberfile add-history uberfile add-test-command "uberfile --help" add-to-list "uberfile,https://github.com/ShutdownRepo/Uberfile,Uberfile is a simple command-line tool aimed to help pentesters quickly generate file downloader one-liners in multiple contexts (wget / curl / powershell / certutil...). This project code is based on my other similar project for one-liner reverseshell generation Shellerator." } function install_arsenal() { colorecho "Installing arsenal" python3 -m pipx install git+https://github.com/Orange-Cyberdefense/arsenal add-aliases arsenal add-history arsenal add-test-command "arsenal --version" add-to-list "arsenal,https://github.com/Orange-Cyberdefense/arsenal,Powerful weapons for penetration testing." } function install_whatportis() { colorecho "Installing whatportis" python3 -m pipx install whatportis # TODO : FIX : "port": port[1] if port[1] else "---",list index out of range - cli.py # echo y | whatportis --update add-history whatportis add-test-command "whatportis --version" add-to-list "whatportis,https://github.com/ncrocfer/whatportis,Command-line tool to lookup port information" } function install_searchsploit() { colorecho "Installing searchsploit" if [ ! -d /opt/tools/exploitdb ] then git -C /opt/tools/ clone --depth 1 https://gitlab.com/exploit-database/exploitdb add-history searchsploit add-test-command "searchsploit --help; searchsploit --help |& grep 'You can use any number of search terms'" add-to-list "searchsploit,https://gitlab.com/exploit-database/exploitdb,A command line search tool for Exploit-DB" else colorecho "Searchsploit is already installed" fi } function configure_searchsploit() { colorecho "Configuring Searchsploit" ln -sf /opt/tools/exploitdb/searchsploit /opt/tools/bin/searchsploit cp -n /opt/tools/exploitdb/.searchsploit_rc ~/ sed -i 's/\(.*[pP]aper.*\)/#\1/' ~/.searchsploit_rc sed -i 's/opt\/exploitdb/opt\/tools\/exploitdb/' ~/.searchsploit_rc } function install_trilium() { colorecho "Installing Trilium (building from sources)" # TODO : apt install in a second step fapt libpng16-16 libpng-dev pkg-config autoconf libtool build-essential nasm libx11-dev libxkbfile-dev git -C /opt/tools/ clone -b stable --depth 1 https://github.com/zadam/trilium.git cd /opt/tools/trilium add-aliases trilium add-history trilium add-test-command "trilium-start;sleep 20;trilium-stop" add-to-list "trilium,https://github.com/zadam/trilium,Personal knowledge management system." } function configure_trilium() { colorecho "Configuring trilium" zsh -c "source ~/.zshrc && cd /opt/tools/trilium && nvm install 16 && nvm use 16 && npm install && npm rebuild" mkdir -p /root/.local/share/trilium-data cp -v /root/sources/assets/trilium/* /root/.local/share/trilium-data } function install_ngrok() { colorecho "Installing ngrok" if [[ $(uname -m) = 'x86_64' ]] then wget -O /tmp/ngrok.zip https://bin.equinox.io/c/4VmDzA7iaHb/ngrok-stable-linux-amd64.zip elif [[ $(uname -m) = 'aarch64' ]] then wget -O /tmp/ngrok.zip https://bin.equinox.io/c/4VmDzA7iaHb/ngrok-stable-linux-arm64.zip elif [[ $(uname -m) = 'armv7l' ]] then wget -O /tmp/ngrok.zip https://bin.equinox.io/c/4VmDzA7iaHb/ngrok-stable-linux-arm.zip else criticalecho-noexit "This installation function doesn't support architecture $(uname -m)" && return fi unzip -d /opt/tools/bin/ /tmp/ngrok.zip add-history ngrok add-test-command "ngrok version" add-to-list "ngrok,https://github.com/inconshreveable/ngrok,Expose a local server behind a NAT or firewall to the internet" } function install_objectwalker() { colorecho "Installing objectwalker" python3 -m pipx install git+https://github.com/p0dalirius/objectwalker add-history objectwalker add-test-command "objectwalker --help" add-to-list "objectwalker,https://github.com/p0dalirius/objectwalker,A python module to explore the object tree to extract paths to interesting objects in memory." } # Package dedicated to offensive miscellaneous tools function package_misc() { set_go_env set_ruby_env install_misc_apt_tools install_goshs # Web uploader/downloader page install_searchsploit # Exploitdb local search engine install_shellerator # Reverse shell generator install_uberfile # file uploader/downloader commands generator install_arsenal # Cheatsheets tool install_trilium # notes taking tool install_ngrok # expose a local development server to the Internet install_whatportis # Search default port number install_objectwalker # Python module to explore the object tree to extract paths to interesting objects in memory } function package_misc_configure() { configure_searchsploit configure_trilium }
PypiClean
/Flask-Statics-Helper-1.0.0.tar.gz/Flask-Statics-Helper-1.0.0/flask_statics/static/angular/i18n/angular-locale_eo.js
'use strict'; angular.module("ngLocale", [], ["$provide", function($provide) { var PLURAL_CATEGORY = {ZERO: "zero", ONE: "one", TWO: "two", FEW: "few", MANY: "many", OTHER: "other"}; function getDecimals(n) { n = n + ''; var i = n.indexOf('.'); return (i == -1) ? 0 : n.length - i - 1; } function getVF(n, opt_precision) { var v = opt_precision; if (undefined === v) { v = Math.min(getDecimals(n), 3); } var base = Math.pow(10, v); var f = ((n * base) | 0) % base; return {v: v, f: f}; } $provide.value("$locale", { "DATETIME_FORMATS": { "AMPMS": [ "atm", "ptm" ], "DAY": [ "diman\u0109o", "lundo", "mardo", "merkredo", "\u0135a\u016ddo", "vendredo", "sabato" ], "MONTH": [ "januaro", "februaro", "marto", "aprilo", "majo", "junio", "julio", "a\u016dgusto", "septembro", "oktobro", "novembro", "decembro" ], "SHORTDAY": [ "di", "lu", "ma", "me", "\u0135a", "ve", "sa" ], "SHORTMONTH": [ "jan", "feb", "mar", "apr", "maj", "jun", "jul", "a\u016dg", "sep", "okt", "nov", "dec" ], "fullDate": "EEEE, d-'a' 'de' MMMM y", "longDate": "y-MMMM-dd", "medium": "y-MMM-dd HH:mm:ss", "mediumDate": "y-MMM-dd", "mediumTime": "HH:mm:ss", "short": "yy-MM-dd HH:mm", "shortDate": "yy-MM-dd", "shortTime": "HH:mm" }, "NUMBER_FORMATS": { "CURRENCY_SYM": "$", "DECIMAL_SEP": ",", "GROUP_SEP": "\u00a0", "PATTERNS": [ { "gSize": 3, "lgSize": 3, "maxFrac": 3, "minFrac": 0, "minInt": 1, "negPre": "-", "negSuf": "", "posPre": "", "posSuf": "" }, { "gSize": 3, "lgSize": 3, "maxFrac": 2, "minFrac": 2, "minInt": 1, "negPre": "\u00a4\u00a0-", "negSuf": "", "posPre": "\u00a4\u00a0", "posSuf": "" } ] }, "id": "eo", "pluralCat": function(n, opt_precision) { var i = n | 0; var vf = getVF(n, opt_precision); if (i == 1 && vf.v == 0) { return PLURAL_CATEGORY.ONE; } return PLURAL_CATEGORY.OTHER;} }); }]);
PypiClean
/CleanAdminDjango-1.5.3.1.tar.gz/CleanAdminDjango-1.5.3.1/django/contrib/auth/__init__.py
import re from django.core.exceptions import ImproperlyConfigured from django.utils.importlib import import_module from django.middleware.csrf import rotate_token from django.contrib.auth.signals import user_logged_in, user_logged_out, user_login_failed SESSION_KEY = '_auth_user_id' BACKEND_SESSION_KEY = '_auth_user_backend' REDIRECT_FIELD_NAME = 'next' def load_backend(path): i = path.rfind('.') module, attr = path[:i], path[i + 1:] try: mod = import_module(module) except ImportError as e: raise ImproperlyConfigured('Error importing authentication backend %s: "%s"' % (path, e)) except ValueError: raise ImproperlyConfigured('Error importing authentication backends. Is AUTHENTICATION_BACKENDS a correctly defined list or tuple?') try: cls = getattr(mod, attr) except AttributeError: raise ImproperlyConfigured('Module "%s" does not define a "%s" authentication backend' % (module, attr)) return cls() def get_backends(): from django.conf import settings backends = [] for backend_path in settings.AUTHENTICATION_BACKENDS: backends.append(load_backend(backend_path)) if not backends: raise ImproperlyConfigured('No authentication backends have been defined. Does AUTHENTICATION_BACKENDS contain anything?') return backends def _clean_credentials(credentials): """ Cleans a dictionary of credentials of potentially sensitive info before sending to less secure functions. Not comprehensive - intended for user_login_failed signal """ SENSITIVE_CREDENTIALS = re.compile('api|token|key|secret|password|signature', re.I) CLEANSED_SUBSTITUTE = '********************' for key in credentials: if SENSITIVE_CREDENTIALS.search(key): credentials[key] = CLEANSED_SUBSTITUTE return credentials def authenticate(**credentials): """ If the given credentials are valid, return a User object. """ for backend in get_backends(): try: user = backend.authenticate(**credentials) except TypeError: # This backend doesn't accept these credentials as arguments. Try the next one. continue if user is None: continue # Annotate the user object with the path of the backend. user.backend = "%s.%s" % (backend.__module__, backend.__class__.__name__) return user # The credentials supplied are invalid to all backends, fire signal user_login_failed.send(sender=__name__, credentials=_clean_credentials(credentials)) def login(request, user): """ Persist a user id and a backend in the request. This way a user doesn't have to reauthenticate on every request. Note that data set during the anonymous session is retained when the user logs in. """ if user is None: user = request.user # TODO: It would be nice to support different login methods, like signed cookies. if SESSION_KEY in request.session: if request.session[SESSION_KEY] != user.pk: # To avoid reusing another user's session, create a new, empty # session if the existing session corresponds to a different # authenticated user. request.session.flush() else: request.session.cycle_key() request.session[SESSION_KEY] = user.pk request.session[BACKEND_SESSION_KEY] = user.backend if hasattr(request, 'user'): request.user = user rotate_token(request) user_logged_in.send(sender=user.__class__, request=request, user=user) def logout(request): """ Removes the authenticated user's ID from the request and flushes their session data. """ # Dispatch the signal before the user is logged out so the receivers have a # chance to find out *who* logged out. user = getattr(request, 'user', None) if hasattr(user, 'is_authenticated') and not user.is_authenticated(): user = None user_logged_out.send(sender=user.__class__, request=request, user=user) request.session.flush() if hasattr(request, 'user'): from django.contrib.auth.models import AnonymousUser request.user = AnonymousUser() def get_user_model(): "Return the User model that is active in this project" from django.conf import settings from django.db.models import get_model try: app_label, model_name = settings.AUTH_USER_MODEL.split('.') except ValueError: raise ImproperlyConfigured("AUTH_USER_MODEL must be of the form 'app_label.model_name'") user_model = get_model(app_label, model_name) if user_model is None: raise ImproperlyConfigured("AUTH_USER_MODEL refers to model '%s' that has not been installed" % settings.AUTH_USER_MODEL) return user_model def get_user(request): from django.contrib.auth.models import AnonymousUser try: user_id = request.session[SESSION_KEY] backend_path = request.session[BACKEND_SESSION_KEY] backend = load_backend(backend_path) user = backend.get_user(user_id) or AnonymousUser() except KeyError: user = AnonymousUser() return user
PypiClean
/CherryPy-18.8.0.tar.gz/CherryPy-18.8.0/cherrypy/_cpmodpy.py
import io import logging import os import re import sys from more_itertools import always_iterable import cherrypy from cherrypy._cperror import format_exc, bare_error from cherrypy.lib import httputil # ------------------------------ Request-handling def setup(req): from mod_python import apache # Run any setup functions defined by a "PythonOption cherrypy.setup" # directive. options = req.get_options() if 'cherrypy.setup' in options: for function in options['cherrypy.setup'].split(): atoms = function.split('::', 1) if len(atoms) == 1: mod = __import__(atoms[0], globals(), locals()) else: modname, fname = atoms mod = __import__(modname, globals(), locals(), [fname]) func = getattr(mod, fname) func() cherrypy.config.update({'log.screen': False, 'tools.ignore_headers.on': True, 'tools.ignore_headers.headers': ['Range'], }) engine = cherrypy.engine if hasattr(engine, 'signal_handler'): engine.signal_handler.unsubscribe() if hasattr(engine, 'console_control_handler'): engine.console_control_handler.unsubscribe() engine.autoreload.unsubscribe() cherrypy.server.unsubscribe() @engine.subscribe('log') def _log(msg, level): newlevel = apache.APLOG_ERR if logging.DEBUG >= level: newlevel = apache.APLOG_DEBUG elif logging.INFO >= level: newlevel = apache.APLOG_INFO elif logging.WARNING >= level: newlevel = apache.APLOG_WARNING # On Windows, req.server is required or the msg will vanish. See # http://www.modpython.org/pipermail/mod_python/2003-October/014291.html # Also, "When server is not specified...LogLevel does not apply..." apache.log_error(msg, newlevel, req.server) engine.start() def cherrypy_cleanup(data): engine.exit() try: # apache.register_cleanup wasn't available until 3.1.4. apache.register_cleanup(cherrypy_cleanup) except AttributeError: req.server.register_cleanup(req, cherrypy_cleanup) class _ReadOnlyRequest: expose = ('read', 'readline', 'readlines') def __init__(self, req): for method in self.expose: self.__dict__[method] = getattr(req, method) recursive = False _isSetUp = False def handler(req): from mod_python import apache try: global _isSetUp if not _isSetUp: setup(req) _isSetUp = True # Obtain a Request object from CherryPy local = req.connection.local_addr local = httputil.Host( local[0], local[1], req.connection.local_host or '') remote = req.connection.remote_addr remote = httputil.Host( remote[0], remote[1], req.connection.remote_host or '') scheme = req.parsed_uri[0] or 'http' req.get_basic_auth_pw() try: # apache.mpm_query only became available in mod_python 3.1 q = apache.mpm_query threaded = q(apache.AP_MPMQ_IS_THREADED) forked = q(apache.AP_MPMQ_IS_FORKED) except AttributeError: bad_value = ("You must provide a PythonOption '%s', " "either 'on' or 'off', when running a version " 'of mod_python < 3.1') options = req.get_options() threaded = options.get('multithread', '').lower() if threaded == 'on': threaded = True elif threaded == 'off': threaded = False else: raise ValueError(bad_value % 'multithread') forked = options.get('multiprocess', '').lower() if forked == 'on': forked = True elif forked == 'off': forked = False else: raise ValueError(bad_value % 'multiprocess') sn = cherrypy.tree.script_name(req.uri or '/') if sn is None: send_response(req, '404 Not Found', [], '') else: app = cherrypy.tree.apps[sn] method = req.method path = req.uri qs = req.args or '' reqproto = req.protocol headers = list(req.headers_in.copy().items()) rfile = _ReadOnlyRequest(req) prev = None try: redirections = [] while True: request, response = app.get_serving(local, remote, scheme, 'HTTP/1.1') request.login = req.user request.multithread = bool(threaded) request.multiprocess = bool(forked) request.app = app request.prev = prev # Run the CherryPy Request object and obtain the response try: request.run(method, path, qs, reqproto, headers, rfile) break except cherrypy.InternalRedirect: ir = sys.exc_info()[1] app.release_serving() prev = request if not recursive: if ir.path in redirections: raise RuntimeError( 'InternalRedirector visited the same URL ' 'twice: %r' % ir.path) else: # Add the *previous* path_info + qs to # redirections. if qs: qs = '?' + qs redirections.append(sn + path + qs) # Munge environment and try again. method = 'GET' path = ir.path qs = ir.query_string rfile = io.BytesIO() send_response( req, response.output_status, response.header_list, response.body, response.stream) finally: app.release_serving() except Exception: tb = format_exc() cherrypy.log(tb, 'MOD_PYTHON', severity=logging.ERROR) s, h, b = bare_error() send_response(req, s, h, b) return apache.OK def send_response(req, status, headers, body, stream=False): # Set response status req.status = int(status[:3]) # Set response headers req.content_type = 'text/plain' for header, value in headers: if header.lower() == 'content-type': req.content_type = value continue req.headers_out.add(header, value) if stream: # Flush now so the status and headers are sent immediately. req.flush() # Set response body for seg in always_iterable(body): req.write(seg) # --------------- Startup tools for CherryPy + mod_python --------------- # try: import subprocess def popen(fullcmd): p = subprocess.Popen(fullcmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, close_fds=True) return p.stdout except ImportError: def popen(fullcmd): pipein, pipeout = os.popen4(fullcmd) return pipeout def read_process(cmd, args=''): fullcmd = '%s %s' % (cmd, args) pipeout = popen(fullcmd) try: firstline = pipeout.readline() cmd_not_found = re.search( b'(not recognized|No such file|not found)', firstline, re.IGNORECASE ) if cmd_not_found: raise IOError('%s must be on your system path.' % cmd) output = firstline + pipeout.read() finally: pipeout.close() return output class ModPythonServer(object): template = """ # Apache2 server configuration file for running CherryPy with mod_python. DocumentRoot "/" Listen %(port)s LoadModule python_module modules/mod_python.so <Location %(loc)s> SetHandler python-program PythonHandler %(handler)s PythonDebug On %(opts)s </Location> """ def __init__(self, loc='/', port=80, opts=None, apache_path='apache', handler='cherrypy._cpmodpy::handler'): self.loc = loc self.port = port self.opts = opts self.apache_path = apache_path self.handler = handler def start(self): opts = ''.join([' PythonOption %s %s\n' % (k, v) for k, v in self.opts]) conf_data = self.template % {'port': self.port, 'loc': self.loc, 'opts': opts, 'handler': self.handler, } mpconf = os.path.join(os.path.dirname(__file__), 'cpmodpy.conf') with open(mpconf, 'wb') as f: f.write(conf_data) response = read_process(self.apache_path, '-k start -f %s' % mpconf) self.ready = True return response def stop(self): os.popen('apache -k stop') self.ready = False
PypiClean
/lektor-3.4.0b6-py3-none-any.whl/lektor/project.py
from __future__ import annotations import hashlib import os import sys from enum import Enum from pathlib import Path from inifile import IniFile from werkzeug.utils import cached_property from lektor.environment import Environment from lektor.utils import comma_delimited from lektor.utils import get_cache_dir from lektor.utils import untrusted_to_os_path class Project: def __init__(self, name, project_file, tree, themes=None): self.name = name self.project_file = project_file self.tree = os.path.normpath(tree) self.themes = themes or [] self.id = hashlib.md5(self.tree.encode("utf-8")).hexdigest() def open_config(self): if self.project_file is None: raise RuntimeError("This project has no project file.") return IniFile(self.project_file) @classmethod def from_file(cls, filename): """Reads a project from a project file.""" inifile = IniFile(filename) if inifile.is_new: return None name = ( inifile.get("project.name") or os.path.basename(filename).rsplit(".")[0].title() ) path = os.path.join( os.path.dirname(filename), untrusted_to_os_path(inifile.get("project.path") or "."), ) themes = inifile.get("project.themes") if themes is not None: themes = [x.strip() for x in themes.split(",")] else: themes = [] return cls( name=name, project_file=filename, tree=path, themes=themes, ) @classmethod def from_path(cls, path, extension_required=False): """Locates the project for a path.""" path = os.path.abspath(path) if os.path.isfile(path) and ( not extension_required or path.endswith(".lektorproject") ): return cls.from_file(path) try: files = [ x for x in os.listdir(path) if x.lower().endswith(".lektorproject") ] except OSError: return None if len(files) == 1: return cls.from_file(os.path.join(path, files[0])) if os.path.isdir(path) and os.path.isfile( os.path.join(path, "content/contents.lr") ): return cls( name=os.path.basename(path), project_file=None, tree=path, ) return None @classmethod def discover(cls, base=None): """Auto discovers the closest project.""" if base is None: base = os.getcwd() here = base while 1: project = cls.from_path(here, extension_required=True) if project is not None: return project node = os.path.dirname(here) if node == here: break here = node return None @property def project_path(self): return self.project_file or self.tree def get_output_path(self): """The path where output files are stored.""" config = self.open_config() # raises if no project_file output_path = config.get("project.output_path") if output_path: path = Path(config.filename).parent / output_path else: path = Path(get_cache_dir(), "builds", self.id) return str(path) class PackageCacheType(Enum): VENV = "venv" # The new virtual environment-based package cache FLAT = "flat" # No longer used flat-directory package cache def get_package_cache_path( self, cache_type: PackageCacheType = PackageCacheType.VENV ) -> Path: """The path where plugin packages are stored.""" if cache_type is self.PackageCacheType.FLAT: cache_name = "packages" else: cache_name = "venvs" h = hashlib.md5() h.update(self.id.encode("utf-8")) h.update(sys.version.encode("utf-8")) h.update(sys.prefix.encode("utf-8")) return Path(get_cache_dir(), cache_name, h.hexdigest()) def content_path_from_filename(self, filename): """Given a filename returns the content path or None if not in project. """ dirname, basename = os.path.split(os.path.abspath(filename)) if basename == "contents.lr": path = dirname elif basename.endswith(".lr"): path = os.path.join(dirname, basename[:-3]) else: return None content_path = os.path.normpath(self.tree).split(os.path.sep) + ["content"] file_path = os.path.normpath(path).split(os.path.sep) prefix = os.path.commonprefix([content_path, file_path]) if prefix == content_path: return "/" + "/".join(file_path[len(content_path) :]) return None def make_env(self, load_plugins=True): """Create a new environment for this project.""" return Environment(self, load_plugins=load_plugins) @cached_property def excluded_assets(self): """List of glob patterns matching filenames of excluded assets. Combines with default EXCLUDED_ASSETS. """ config = self.open_config() return list(comma_delimited(config.get("project.excluded_assets", ""))) @cached_property def included_assets(self): """List of glob patterns matching filenames of included assets. Overrides both excluded_assets and the default excluded patterns. """ config = self.open_config() return list(comma_delimited(config.get("project.included_assets", ""))) def to_json(self): return { "name": self.name, "project_file": self.project_file, "project_path": self.project_path, "default_output_path": self.get_output_path(), "package_cache_path": str(self.get_package_cache_path()), "id": self.id, "tree": self.tree, }
PypiClean
/LabtoolSuite-0.1.3.tar.gz/LabtoolSuite-0.1.3/Labtools/docs/js/osmplayer.default.js
!function(a,b){b.playLoader=b.playLoader||{},b.playLoader[a]=function(a,b){minplayer.playLoader.call(this,a,b)},b.playLoader[a].prototype=new minplayer.playLoader,b.playLoader[a].prototype.constructor=b.playLoader[a],b.playLoader[a].prototype.getDisplay=function(){return this.options.build&&jQuery(".minplayer-"+a,this.context).prepend('<div class="minplayer-'+a+'-loader-wrapper"><div class="minplayer-'+a+'-big-play ui-state-default"><span></span></div><div class="minplayer-'+a+'-loader">&nbsp;</div><div class="minplayer-'+a+'-preview ui-widget-content"></div></div>'),jQuery(".minplayer-"+a+" .minplayer-"+a+"-loader-wrapper",this.context)},b.playLoader[a].prototype.loadPreview=function(a){minplayer.playLoader.prototype.loadPreview.call(this,a)||this.elements.preview.addClass("no-image")},b.playLoader[a].prototype.getElements=function(){var b=minplayer.playLoader.prototype.getElements.call(this);return jQuery.extend(b,{busy:jQuery(".minplayer-"+a+"-loader",this.display),bigPlay:jQuery(".minplayer-"+a+"-big-play",this.display),preview:jQuery(".minplayer-"+a+"-preview",this.display)})}}("default",osmplayer||{}),function(a,b){b.controller=b.controller||{},b.controller[a]=function(a,b){minplayer.controller.call(this,a,b)},b.controller[a].prototype=new minplayer.controller,b.controller[a].prototype.constructor=b.controller[a],b.controller[a].prototype.construct=function(){this.options=jQuery.extend({volumeVertical:!0,controllerOnly:!1,showController:!0},this.options),minplayer.controller.prototype.construct.call(this);var a=this;if(!this.options.showController)return void this.get("player",function(a){a.display.removeClass("with-controller")});if(!this.options.volumeVertical||this.options.controllerOnly){this.display.addClass("minplayer-controls-volume-horizontal"),this.display.removeClass("minplayer-controls-volume-vertical");try{this.volumeBar.slider("option","orientation","horizontal")}catch(b){}}else this.display.addClass("minplayer-controls-volume-vertical"),this.display.removeClass("minplayer-controls-volume-horizontal");this.get("player",function(b){a.options.controllerOnly?b.display.addClass("controller-only"):this.get("media",function(a){a.hasController()?b.display.addClass("with-controller"):this.showThenHide(5e3,function(a){var c=a?"addClass":"removeClass";b.display[c]("with-controller")})})})},b.controller[a].prototype.getDisplay=function(){return this.options.showController?(this.options.build&&jQuery(".minplayer-"+a,this.context).prepend('<div class="minplayer-'+a+'-controls ui-widget-header"><div class="minplayer-'+a+'-controls-left"><a class="minplayer-'+a+"-play minplayer-"+a+'-button ui-state-default ui-corner-all" title="Play"><span class="ui-icon ui-icon-play"></span></a><a class="minplayer-'+a+"-pause minplayer-"+a+'-button ui-state-default ui-corner-all" title="Pause"><span class="ui-icon ui-icon-pause"></span></a></div><div class="minplayer-'+a+'-controls-right"><div class="minplayer-'+a+'-timer">00:00</div><div class="minplayer-'+a+'-fullscreen ui-widget-content"><div class="minplayer-'+a+'-fullscreen-inner ui-state-default"></div></div><div class="minplayer-'+a+'-volume"><div class="minplayer-'+a+'-volume-slider"></div><a class="minplayer-'+a+"-volume-mute minplayer-"+a+'-button ui-state-default ui-corner-all" title="Mute"><span class="ui-icon ui-icon-volume-on"></span></a><a class="minplayer-'+a+"-volume-unmute minplayer-"+a+'-button ui-state-default ui-corner-all" title="Unmute"><span class="ui-icon ui-icon-volume-off"></span></a></div></div><div class="minplayer-'+a+'-controls-mid"><div class="minplayer-'+a+'-seek"><div class="minplayer-'+a+'-progress ui-state-default"></div></div></div></div>'),this.context.addClass("with-controller"),jQuery(".minplayer-"+a+"-controls",this.context)):jQuery(null)},b.controller[a].prototype.getElements=function(){var b=minplayer.controller.prototype.getElements.call(this),c=jQuery(".minplayer-"+a+"-timer",this.display);return jQuery.extend(b,{play:jQuery(".minplayer-"+a+"-play",this.display),pause:jQuery(".minplayer-"+a+"-pause",this.display),fullscreen:jQuery(".minplayer-"+a+"-fullscreen",this.display),seek:jQuery(".minplayer-"+a+"-seek",this.display),progress:jQuery(".minplayer-"+a+"-progress",this.display),volume:jQuery(".minplayer-"+a+"-volume-slider",this.display),mute:jQuery(".minplayer-"+a+"-volume-mute",this.display),timer:c,duration:c})}}("default",osmplayer||{}),function(a,b){b.playlist=b.playlist||{},b.playlist[a]=function(a,c){b.playlist.call(this,a,c)},b.playlist[a].prototype=new b.playlist,b.playlist[a].prototype.constructor=b.playlist[a],b.playlist[a].prototype.construct=function(){this.options=jQuery.extend({showPlaylist:!0},this.options),b.playlist.prototype.construct.call(this),this.showThenHide(this.elements.hideShow),this.get("player",function(a){var b=this.options.vertical?"width":"height",c=this.options.vertical?"right":"bottom",d=this.options.vertical?"marginRight":"marginBottom";this.hideShow=function(e,f){var g={},h={},i=this.display[b](),j=this.options.vertical?"e":"s",k=this.options.vertical?"w":"n",l=e?"ui-icon-triangle-1-"+k:"ui-icon-triangle-1-"+j,m=e?"ui-icon-triangle-1-"+j:"ui-icon-triangle-1-"+k;jQuery("span",this.elements.hideShow).removeClass(l).addClass(m),g[c]=e?i:0,a.elements.minplayer&&(f?a.elements.minplayer.animate(g,"fast"):a.elements.minplayer.css(g)),h[d]=e?0:-i,f?this.display.animate(h,"fast",function(){a.resize()}):this.display.css(h)},this.ubind(this.uuid+":playlistLoad",function(a){return function(b,c){1===c.nodes.length?a.hideShow(!1,!0):a.hideShow(!0,!0)}}(this)),this.elements.hideShow&&this.elements.hideShow.bind("click",function(a){return function(b){b.preventDefault();var c=jQuery("span",a.elements.hideShow),d=(a.options.vertical?"e":"s",a.options.vertical?"w":"n"),e=c.hasClass("ui-icon-triangle-1-"+d);a.hideShow(e,!0)}}(this)),a.elements.minplayer&&(this.options.showPlaylist?this.options.vertical?a.elements.minplayer.css("right",this.display.width()+"px"):a.elements.minplayer.css("bottom",this.display.height()+"px"):this.hideShow(!1))})},b.playlist[a].prototype.getDisplay=function(){return this.options.build&&this.context.append('<div class="osmplayer-'+a+'-playlist"><div class="osmplayer-'+a+'-hide-show-playlist ui-state-default"><span class="ui-icon"></span></div><div class="minplayer-'+a+'-loader-wrapper"><div class="minplayer-'+a+'-loader"></div></div><div class="osmplayer-'+a+'-playlist-scroll ui-widget-content"><div class="osmplayer-'+a+'-playlist-list"></div></div></div>'),jQuery(".osmplayer-"+a+"-playlist",this.context)},b.playlist[a].prototype.getElements=function(){var c=b.playlist.prototype.getElements.call(this),d=this.options.vertical?"playlist-vertical":"playlist-horizontal";d+=this.options.playlistOnly?" playlist-only":"";var e=this.options.showPlaylist,f=this.options.vertical?e?"e":"w":e?"s":"n",g=this.options.vertical?"ui-corner-left":"ui-corner-top";(this.options.disablePlaylist||!this.options.playlist)&&this.display.remove(),this.display.addClass(d);var h=jQuery(".osmplayer-"+a+"-hide-show-playlist",this.display);return h.addClass(g),this.options.playlistOnly&&(h.hide(),h=null),jQuery("span",h).addClass("ui-icon-triangle-1-"+f),jQuery.extend(c,{playlist_busy:jQuery(".minplayer-"+a+"-loader-wrapper",this.display),list:jQuery(".osmplayer-"+a+"-playlist-list",this.display),scroll:jQuery(".osmplayer-"+a+"-playlist-scroll",this.display),hideShow:h})}}("default",osmplayer||{}),function(a,b){b.teaser=b.teaser||{},b.teaser[a]=function(a,c){b.teaser.call(this,a,c)},b.teaser[a].prototype=new b.teaser,b.teaser[a].prototype.constructor=b.teaser[a],b.teaser[a].prototype.construct=function(){minplayer.display.prototype.construct.call(this),this.display.bind("mouseenter",function(a){return function(){a.addClass("ui-state-hover")}}(this.elements.info)).bind("mouseleave",function(a){return function(){a.removeClass("ui-state-hover")}}(this.elements.info))},b.teaser[a].prototype.getDisplay=function(){this.context.append('<div class="osmplayer-'+a+'-teaser ui-widget-content"><div class="osmplayer-'+a+'-teaser-image"></div><div class="osmplayer-'+a+'-teaser-info ui-state-default"><div class="osmplayer-'+a+'-teaser-title"></div></div></div>');var b=jQuery(".osmplayer-"+a+"-teaser",this.context);return b.eq(b.length-1)},b.teaser[a].prototype.select=function(a){a?this.elements.info.addClass("ui-state-active"):this.elements.info.removeClass("ui-state-active")},b.teaser[a].prototype.getElements=function(){var c=b.teaser.prototype.getElements.call(this);return jQuery.extend(c,{info:jQuery(".osmplayer-"+a+"-teaser-info",this.display),title:jQuery(".osmplayer-"+a+"-teaser-title",this.display),image:jQuery(".osmplayer-"+a+"-teaser-image",this.display)})}}("default",osmplayer||{}),function(a,b){b.pager=b.pager||{},b.pager[a]=function(a,c){b.pager.call(this,a,c)},b.pager[a].prototype=new b.pager,b.pager[a].prototype.constructor=b.pager[a],b.pager[a].prototype.getDisplay=function(){return this.options.build&&this.context.append('<div class="osmplayer-'+a+'-playlist-pager ui-widget-header"><div class="osmplayer-'+a+'-playlist-pager-left"><a href="#" class="osmplayer-'+a+"-playlist-pager-link osmplayer-"+a+"-playlist-pager-prevpage minplayer-"+a+'-button ui-state-default ui-corner-all"><span class="ui-icon ui-icon-circle-triangle-w"></span></a></div><div class="osmplayer-'+a+'-playlist-pager-right"><a href="#" class="osmplayer-'+a+"-playlist-pager-link osmplayer-"+a+"-playlist-pager-nextpage minplayer-"+a+'-button ui-state-default ui-corner-all"><span class="ui-icon ui-icon-circle-triangle-e"></span></a></div></div>'),jQuery(".osmplayer-"+a+"-playlist-pager",this.context)},b.pager[a].prototype.getElements=function(){var c=b.pager.prototype.getElements.call(this);return jQuery.extend(c,{prevPage:jQuery(".osmplayer-"+a+"-playlist-pager-prevpage",this.display),nextPage:jQuery(".osmplayer-"+a+"-playlist-pager-nextpage",this.display)})}}("default",osmplayer||{}),function(a,b){b[a]=function(a,c){b.call(this,a,c)},b[a].prototype=new b,b[a].prototype.constructor=b[a],b[a].prototype.getDisplay=function(){return 0===this.context.children().length&&(this.context=this.context.attr({id:this.options.id+"-player","class":"minplayer-"+a+"-media"}).wrap(jQuery(document.createElement("div")).attr({"class":"minplayer-"+a+"-display ui-widget-content"})).parent(".minplayer-"+a+"-display").wrap(jQuery(document.createElement("div")).attr({"class":"minplayer-"+a})).parent(".minplayer-"+a).prepend('<div class="minplayer-'+a+'-logo"></div><div class="minplayer-'+a+'-error"></div>').wrap(jQuery(document.createElement("div")).attr({id:this.options.id,"class":"osmplayer-"+a+" player-ui"})).parent(".osmplayer-"+a),this.options.build=!0),this.context},b[a].prototype.getElements=function(){var c=b.prototype.getElements.call(this);this.display.width(this.options.width),this.display.height(this.options.height);var d=jQuery(".minplayer-"+a,this.display);return this.options.playlistOnly&&(d.remove(),d=null),jQuery.extend(c,{player:this.display,minplayer:d,display:jQuery(".minplayer-"+a+"-display",this.display),media:jQuery(".minplayer-"+a+"-media",this.display),error:jQuery(".minplayer-"+a+"-error",this.display),logo:jQuery(".minplayer-"+a+"-logo",this.display)})}}("default",osmplayer||{});
PypiClean
/NetworkSim-0.2.2.tar.gz/NetworkSim-0.2.2/examples/transmitter_receiver_and_ram.ipynb
``` from NetworkSim.simulation.process.ram import RAM from NetworkSim.simulation.process.transmitter.fixed import FT from NetworkSim.simulation.process.receiver.tunable import TR from NetworkSim.architecture.setup.model import Model from NetworkSim.architecture.base.network import Network import simpy env =simpy.Environment() model = Model(network=Network(num_nodes=4)) ram0 = RAM(env=env, until=2000, ram_id=0, model=model) ram0.initialise() trans0 = FT(env=env, ram=ram0, transmitter_id=0, model=model) trans0.initialise() rec1 = TR(env=env, until=2000, receiver_id=1, model=model) rec1.initialise() rec2 = TR(env=env, until=2000, receiver_id=2, model=model) rec2.initialise() rec3 = TR(env=env, until=2000, receiver_id=3, model=model) rec3.initialise() env.run(until=2000) rec1.model.circulation_time trans0.transmitted_data_packet_df trans0.transmitted_control_packet_df trans0.ram.generated_data_packet_df print(trans0.transmitter_data_clock_cycle) print(rec1.receiver_data_clock_cycle) print(rec1.model.get_data_packet_total_duration()) trans0.model.data_rings[0].packet_record_df trans0.model.control_ring.packet_record_df rec1.received_control_packet_df rec1.queue_df rec1.queue rec1.received_data_packet_df ```
PypiClean
/Biryani-0.10.8-py3-none-any.whl/biryani/jsonconv.py
# Biryani -- A conversion and validation toolbox # By: Emmanuel Raviart <emmanuel@raviart.com> # # Copyright (C) 2009, 2010, 2011, 2012, 2013, 2014, 2015 Emmanuel Raviart # http://packages.python.org/Biryani/ # # This file is part of Biryani. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """JSON Related Converters""" import json from .baseconv import cleanup_line, pipe from . import states __all__ = [ 'make_input_to_json', 'make_json_to_str', 'make_str_to_json', ] # Level-1 Converters def make_json_to_str(*args, **kwargs): """Return a converter that encodes a JSON data to a string. >>> make_json_to_str()({u'a': 1, u'b': [2, u'three']}) (u'{"a": 1, "b": [2, "three"]}', None) >>> make_json_to_str()(u'Hello World') (u'"Hello World"', None) >>> make_json_to_str(ensure_ascii = False, indent = 2, sort_keys = True)({u'a': 1, u'b': [2, u'three']}) (u'{\\n "a": 1, \\n "b": [\\n 2, \\n "three"\\n ]\\n}', None) >>> make_json_to_str()(set([1, 2, 3])) (set([1, 2, 3]), u'Invalid JSON') >>> make_json_to_str()(u'') (u'""', None) >>> make_json_to_str()(None) (None, None) """ def json_to_str(value, state = None): if value is None: return value, None if state is None: state = states.default_state try: value_str = unicode(json.dumps(value, *args, **kwargs)) except TypeError: return value, state._(u'Invalid JSON') return value_str, None return json_to_str def make_str_to_json(*args, **kwargs): """Return a converter that decodes a clean string to a JSON data. .. note:: For a converter that doesn't require a clean string, see :func:`make_input_to_json`. >>> make_str_to_json()(u'{"a": 1, "b": [2, "three"]}') ({u'a': 1, u'b': [2, u'three']}, None) >>> make_str_to_json()(u'null') (None, None) >>> make_str_to_json()(u'Hello World') (u'Hello World', u'Invalid JSON') >>> make_str_to_json()(u'{"a": 1, "b":') (u'{"a": 1, "b":', u'Invalid JSON') >>> make_str_to_json()(u'') (u'', u'Invalid JSON') >>> make_str_to_json()(None) (None, None) """ def str_to_json(value, state = None): if value is None: return value, None if state is None: state = states.default_state if isinstance(value, str): # Ensure that json.loads() uses unicode strings. try: value = value.decode('utf-8') except UnicodeDecodeError: try: value = value.decode('cp1252') except UnicodeDecodeError: return value, state._(u'''JSON doesn't use "utf-8" encoding''') try: return json.loads(value, *args, **kwargs), None except ValueError: return value, state._(u'Invalid JSON') return str_to_json # Level-2 Converters def make_input_to_json(*args, **kwargs): """Return a converter that decodes a string to a JSON data. >>> make_input_to_json()(u'{"a": 1, "b": [2, "three"]}') ({u'a': 1, u'b': [2, u'three']}, None) >>> make_input_to_json()(u'null') (None, None) >>> make_input_to_json()(u'Hello World') (u'Hello World', u'Invalid JSON') >>> make_input_to_json()(u'{"a": 1, "b":') (u'{"a": 1, "b":', u'Invalid JSON') >>> make_input_to_json()(u'') (None, None) >>> make_input_to_json()(None) (None, None) """ return pipe( cleanup_line, make_str_to_json(*args, **kwargs), )
PypiClean
/JSON_minify-0.3.0.tar.gz/JSON_minify-0.3.0/json_minify/__init__.py
import re def json_minify(string, strip_space=True): tokenizer = re.compile('"|(/\*)|(\*/)|(//)|\n|\r') end_slashes_re = re.compile(r'(\\)*$') in_string = False in_multi = False in_single = False new_str = [] index = 0 for match in re.finditer(tokenizer, string): if not (in_multi or in_single): tmp = string[index:match.start()] if not in_string and strip_space: # replace white space as defined in standard tmp = re.sub('[ \t\n\r]+', '', tmp) new_str.append(tmp) elif not strip_space: # Replace comments with white space so that the JSON parser reports # the correct column numbers on parsing errors. new_str.append(' ' * (match.start() - index)) index = match.end() val = match.group() if val == '"' and not (in_multi or in_single): escaped = end_slashes_re.search(string, 0, match.start()) # start of string or unescaped quote character to end string if not in_string or (escaped is None or len(escaped.group()) % 2 == 0): # noqa in_string = not in_string index -= 1 # include " character in next catch elif not (in_string or in_multi or in_single): if val == '/*': in_multi = True elif val == '//': in_single = True elif val == '*/' and in_multi and not (in_string or in_single): in_multi = False if not strip_space: new_str.append(' ' * len(val)) elif val in '\r\n' and not (in_multi or in_string) and in_single: in_single = False elif not ((in_multi or in_single) or (val in ' \r\n\t' and strip_space)): # noqa new_str.append(val) if not strip_space: if val in '\r\n': new_str.append(val) elif in_multi or in_single: new_str.append(' ' * len(val)) new_str.append(string[index:]) return ''.join(new_str)
PypiClean
/Apppath-1.0.3.tar.gz/Apppath-1.0.3/apppath/__init__.py
import datetime import os from importlib.metadata import PackageNotFoundError from warnings import warn from typing import Any from importlib import resources from warg import package_is_editable __project__ = "Apppath" __author__ = "Christian Heider Nielsen" __version__ = "1.0.3" __doc__ = r""" Created on 27/04/2019 A class and a set of functions for providing for system-consensual path for apps to store data, logs, cache... @author: cnheider """ __all__ = [ "PROJECT_APP_PATH", "PROJECT_NAME", "PROJECT_VERSION", "get_version", "PROJECT_ORGANISATION", "PROJECT_AUTHOR", "PROJECT_YEAR", "AppPath", "AppPathSubDirEnum", "open_app_path" # "INCLUDE_PROJECT_READMES", # "PACKAGE_DATA_PATH" ] from .app_path import * from .system_open_path_utilities import * PROJECT_NAME = __project__.lower().strip().replace(" ", "_") PROJECT_VERSION = __version__ PROJECT_YEAR = 2018 PROJECT_AUTHOR = __author__.lower().strip().replace(" ", "_") PROJECT_ORGANISATION = "Pything" PACKAGE_DATA_PATH = resources.files(PROJECT_NAME) / "data" try: DEVELOP = package_is_editable(PROJECT_NAME) except PackageNotFoundError as e: DEVELOP = True def get_version(append_time: Any = DEVELOP) -> str: """description""" version = __version__ if not version: version = os.getenv("VERSION", "0.0.0") if append_time: now = datetime.datetime.utcnow() date_version = now.strftime("%Y%m%d%H%M%S") # date_version = time.time() if version: # Most git tags are prefixed with 'v' (example: v1.2.3) this is # never desirable for artifact repositories, so we strip the # leading 'v' if it's present. version = version[1:] if isinstance(version, str) and version.startswith("v") else version else: # Default version is an ISO8601 compliant datetime. PyPI doesn't allow # the colon ':' character in its versions, and time is required to allow # for multiple publications to master in one day. This datetime string # uses the 'basic' ISO8601 format for both its date and time components # to avoid issues with the colon character (ISO requires that date and # time components of a date-time string must be uniformly basic or # extended, which is why the date component does not have dashes. # # Publications using datetime versions should only be made from master # to represent the HEAD moving forward. warn(f"Environment variable VERSION is not set, only using datetime: {date_version}") # warn(f'Environment variable VERSION is not set, only using timestamp: {version}') version = f"{version}.{date_version}" return version if __version__ is None: __version__ = get_version(append_time=True) __version_info__ = tuple(int(segment) for segment in __version__.split(".")) PROJECT_APP_PATH = AppPath(app_name=PROJECT_NAME, app_author=PROJECT_AUTHOR)
PypiClean
/Beaker-1.12.1.tar.gz/Beaker-1.12.1/README.rst
========================= Cache and Session Library ========================= About ===== Beaker is a web session and general caching library that includes WSGI middleware for use in web applications. As a general caching library, Beaker can handle storing for various times any Python object that can be pickled with optional back-ends on a fine-grained basis. Beaker was built largely on the code from MyghtyUtils, then refactored and extended with database support. Beaker includes Cache and Session WSGI middleware to ease integration with WSGI capable frameworks, and is automatically used by `Pylons <https://pylonsproject.org/about-pylons-framework.html>`_ and `TurboGears <https://www.turbogears.org/>`_. Features ======== * Fast, robust performance * Multiple reader/single writer lock system to avoid duplicate simultaneous cache creation * Cache back-ends include dbm, file, memory, memcached, Redis, MongoDB, and database (Using SQLAlchemy for multiple-db vendor support) * Signed cookies to prevent session hijacking/spoofing * Cookie-only sessions to remove the need for a db or file backend (ideal for clustered systems) * Extensible Container object to support new back-ends * Caches can be divided into namespaces (to represent templates, objects, etc.) then keyed for different copies * Create functions for automatic call-backs to create new cache copies after expiration * Fine-grained toggling of back-ends, keys, and expiration per Cache object Documentation ============= Documentation can be found on the `Official Beaker Docs site <https://beaker.readthedocs.io/en/latest/>`_. Source ====== The latest developer version is available in a `GitHub repository <https://github.com/bbangert/beaker>`_. Contributing ============ Bugs can be filed on GitHub, **should be accompanied by a test case** to retain current code coverage, and should be in a pull request when ready to be accepted into the beaker code-base.
PypiClean
/AVOScript-0.11.5.tar.gz/AVOScript-0.11.5/avos.py
from argparse import ArgumentParser from json import loads, dumps from os import path from sys import exit import subprocess from typing import Dict, List, Optional try: from avoscript.lexer import Lexer from avoscript.lexer.default import ENV, ENV_CONSTS, LevelIndex from avoscript import version, AVOSCRIPT, PKGS from avoscript.parser import imp_parser from avoscript.lexer.types import Signal except ImportError: from src.avoscript.lexer import Lexer from src.avoscript.lexer.default import ENV, ENV_CONSTS, LevelIndex from src.avoscript import version, AVOSCRIPT, PKGS from src.avoscript.parser import imp_parser from src.avoscript.lexer.types import Signal from colorama import Fore, init init(autoreset=True) parser = ArgumentParser( "avoscript", description=f"{Fore.LIGHTRED_EX}AVOScript{Fore.RESET} {Fore.LIGHTCYAN_EX}{version}{Fore.RESET} interpreter", ) # --== Flags ==-- # flags = parser.add_argument_group("Flags") flags.add_argument( "-i", "--interactive", help="start interactive mode", action="store_true" ) flags.add_argument( "-v", "--version", help="show avoscript version", action="store_true" ) flags.add_argument( "-V", "--verbose", help="enables verbose mode", action="store_true", ) # --== With args ==-- # flags.add_argument( "-s", "--script", dest="script", metavar="<src>", help="execute script" ) flags.add_argument( "-f", "--file", dest="file", metavar="<file>", help="execute file script" ) package_manager = parser.add_argument_group("Package Manager") package_manager.add_argument( "-nf", "--no-fetch", dest="no_fetch", help="disable fetching package data", action="store_false" ) package_manager.add_argument( "--upd", action="store_true", help="update packages data" ) package_manager.add_argument( "--upload", action="store_true", help="upload current project" ) package_manager.add_argument( "add", nargs='*', help="install package" ) parser.set_defaults( script="", file="", add=None, ) def git_clone( url: str, directory: str, target_dir: Optional[str] = None ): """Clones repo :param url: repo url :param directory: current working directory :param target_dir: dir to clone """ if target_dir is not None: subprocess.run( f'git clone --depth 1 --no-tags -q {url} {target_dir}', shell=True, cwd=directory, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL, ) else: subprocess.run( f'git clone --depth 1 --no-tags -q {url}', shell=True, cwd=directory, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL, ) def fetch_pkgs(no_fetch: bool, out_file: str = 'pkgs.json') -> List[Dict[str, str]]: """Fetches packages data :param no_fetch: need to fetch :param out_file: output file :return: list of packages """ if not no_fetch: print(f"{Fore.LIGHTMAGENTA_EX}Fetch packages ...{Fore.RESET}") if not path.exists(path.join(AVOSCRIPT, 'avoscript')): print(f"{Fore.LIGHTMAGENTA_EX}Cloning repo ...{Fore.RESET}") git_clone('https://github.com/ethosa/avoscript.git', AVOSCRIPT) else: subprocess.run( 'cd avoscript && git init -q && git remote add origin https://github.com/ethosa/avoscript.git && ' 'git fetch -q origin master --depth 1 --no-tags && git checkout -q origin/master -- pkgs.json && ' f'git show origin/master:pkgs.json > {out_file}', cwd=AVOSCRIPT, shell=True ) try: out = None with open(path.join(AVOSCRIPT, 'avoscript', out_file), 'r', encoding='utf-8') as f: out = f.read() if out is not None: return loads(out) return [] except FileNotFoundError: print(f"{Fore.LIGHTRED_EX}Need to fetch!{Fore.RESET}") return [] def install_package(name: str, data: List[Dict[str, str]]): """Install package :param name: package name :param data: package data """ print(f"Install {Fore.LIGHTMAGENTA_EX}{name}{Fore.RESET} package ...") installed = False _name = name.replace('-', ' ') for i in data: if 'name' in i and i['name'] == _name: if 'github_url' in i: print(f"Found {Fore.LIGHTMAGENTA_EX}Github URL{Fore.RESET}, cloning ...") i['name'] = i['name'].replace(' ', '_') git_clone(i['github_url'], PKGS, i['name']) installed = True print( f"{Fore.LIGHTGREEN_EX}Successfully installed{Fore.RESET} " f"{Fore.LIGHTCYAN_EX}{name}{Fore.RESET} " f"{Fore.LIGHTGREEN_EX}package{Fore.RESET} " ) break else: print( f"{Fore.LIGHTYELLOW_EX}[WARNING]:{Fore.RESET} package " f"{Fore.LIGHTMAGENTA_EX}{name}{Fore.RESET} hasn't github_url" ) if not installed: print( f"{Fore.LIGHTRED_EX}[ERROR]:{Fore.RESET} package " f"{Fore.LIGHTMAGENTA_EX}{name}{Fore.RESET} is not exists" ) def main(): args = parser.parse_args() signal = Signal() signal.NEED_FREE = False signal.VERBOSE = args.verbose # -V/--verbose env = [{}] consts = [{}] lvl = LevelIndex() lvl.inc() # -v/--version flag if args.version: print(f"{Fore.LIGHTRED_EX}AVOScript{Fore.RESET} {Fore.LIGHTCYAN_EX}{version}{Fore.RESET}") # --upload flag elif args.upload: print(f"{Fore.LIGHTYELLOW_EX}Working via Github CLI (gh){Fore.RESET}") package_name = input(f"{Fore.LIGHTCYAN_EX}name of package: {Fore.RESET}") package_description = input(f"{Fore.LIGHTCYAN_EX}package description: {Fore.RESET}") github_url = input(f"{Fore.LIGHTCYAN_EX}project Github URL: {Fore.RESET}") if not package_name: print(f"{Fore.LIGHTRED_EX}[ERROR]:{Fore.RESET} package name is empty") return fetch_pkgs(False) subprocess.run( f'cd avoscript && git pull -q --no-tags && git checkout -b {package_name}', cwd=AVOSCRIPT, shell=True, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL ) data = [] with open(path.join(AVOSCRIPT, 'avoscript', 'pkgs.json'), 'r') as f: data = loads(f.read()) data.append({ 'name': package_name.replace('-', ' '), 'description': package_description, 'github_url': github_url }) with open(path.join(AVOSCRIPT, 'avoscript', 'pkgs.json'), 'w') as f: f.write(dumps(data, indent=2)) subprocess.run( f'cd avoscript && ' f'git add pkgs.json && git commit -q -m "add `{package_name}` package" && ' f'gh pr create -t "Add `{package_name}` package" -B master -b "{package_description}" -l "new package" && ' f'git switch master && git branch -D {package_name}', cwd=AVOSCRIPT, shell=True ) print(f"{Fore.GREEN}PR was created{Fore.RESET}") # --upd flag elif args.upd: fetch_pkgs(False) # -i/--interactive flag elif args.interactive: print( f"Welcome to {Fore.LIGHTRED_EX}AVOScript{Fore.RESET} " f"{Fore.LIGHTCYAN_EX}{version}{Fore.RESET} interactive mode." ) print( f"Write {Fore.LIGHTRED_EX}exit{Fore.RESET} to shutdown interactive mode." ) print(f"{Fore.LIGHTGREEN_EX}>>>{Fore.RESET} ", end="") source = input() while source != 'exit': signal.NO_CREATE_LEVEL = True imp_parser(Lexer.lex(source)).value.eval(env, consts, lvl, {}, signal) print(f"{Fore.LIGHTGREEN_EX}>>>{Fore.RESET} ", end="") source = input() print(f"Exited via {Fore.LIGHTRED_EX}exit{Fore.RESET} command") exit(0) # -s/--script flag elif args.script: imp_parser(Lexer.lex(args.script)).value.eval(env, consts, lvl, {}, signal) # -f/--file flag elif args.file: imp_parser(Lexer.lex_file(args.file)).value.eval(env, consts, lvl, {}, signal) # add pos arg elif args.add: data = fetch_pkgs(args.no_fetch) # -nf/--no-fetch flag for i in args.add[1:]: install_package(i, data) if __name__ == '__main__': main()
PypiClean
/EIVideo-0.1a0.tar.gz/EIVideo-0.1a0/paddlevideo/utils/precise_bn.py
import paddle import itertools from EIVideo.paddlevideo.utils import get_logger logger = get_logger("paddlevideo") """ Implement precise bn, which is useful for improving accuracy. """ @paddle.no_grad() # speed up and save CUDA memory def do_preciseBN(model, data_loader, parallel, num_iters=200): """ Recompute and update the batch norm stats to make them more precise. During training both BN stats and the weight are changing after every iteration, so the running average can not precisely reflect the actual stats of the current model. In this function, the BN stats are recomputed with fixed weights, to make the running average more precise. Specifically, it computes the true average of per-batch mean/variance instead of the running average. This is useful to improve validation accuracy. Args: model: the model whose bn stats will be recomputed data_loader: an iterator. Produce data as input to the model num_iters: number of iterations to compute the stats. Return: the model with precise mean and variance in bn layers. """ bn_layers_list = [ m for m in model.sublayers() if any((isinstance(m, bn_type) for bn_type in (paddle.nn.BatchNorm1D, paddle.nn.BatchNorm2D, paddle.nn.BatchNorm3D))) and m.training ] if len(bn_layers_list) == 0: return # moving_mean=moving_mean*momentum+batch_mean*(1.−momentum) # we set momentum=0. to get the true mean and variance during forward momentum_actual = [bn._momentum for bn in bn_layers_list] for bn in bn_layers_list: bn._momentum = 0. running_mean = [paddle.zeros_like(bn._mean) for bn in bn_layers_list] #pre-ignore running_var = [paddle.zeros_like(bn._variance) for bn in bn_layers_list] ind = -1 for ind, data in enumerate(itertools.islice(data_loader, num_iters)): logger.info("doing precise BN {} / {}...".format(ind + 1, num_iters)) if parallel: model._layers.train_step(data) else: model.train_step(data) for i, bn in enumerate(bn_layers_list): # Accumulates the bn stats. running_mean[i] += (bn._mean - running_mean[i]) / (ind + 1) running_var[i] += (bn._variance - running_var[i]) / (ind + 1) assert ind == num_iters - 1, ( "update_bn_stats is meant to run for {} iterations, but the batch_sampler stops at {} iterations." .format(num_iters, ind)) # Sets the precise bn stats. for i, bn in enumerate(bn_layers_list): bn._mean.set_value(running_mean[i]) bn._variance.set_value(running_var[i]) bn._momentum = momentum_actual[i]
PypiClean
/DjangoDjangoAppCenter-0.0.11-py3-none-any.whl/AppCenter/simpleui/static/admin/js/vendor/xregexp/xregexp.js
/** * XRegExp provides augmented, extensible JavaScript regular expressions. You get new syntax, * flags, and methods beyond what browsers support natively. XRegExp is also a regex utility belt * with tools to make your client-side grepping simpler and more powerful, while freeing you from * worrying about pesky cross-browser inconsistencies and the dubious `lastIndex` property. See * XRegExp's documentation (http://xregexp.com/) for more details. * @module xregexp * @requires N/A */ var XRegExp; // Avoid running twice; that would reset tokens and could break references to native globals XRegExp = XRegExp || (function (undef) { "use strict"; /*-------------------------------------- * Private variables *------------------------------------*/ var self, addToken, add, // Optional features; can be installed and uninstalled features = { natives: false, extensibility: false }, // Store native methods to use and restore ("native" is an ES3 reserved keyword) nativ = { exec: RegExp.prototype.exec, test: RegExp.prototype.test, match: String.prototype.match, replace: String.prototype.replace, split: String.prototype.split }, // Storage for fixed/extended native methods fixed = {}, // Storage for cached regexes cache = {}, // Storage for addon tokens tokens = [], // Token scopes defaultScope = "default", classScope = "class", // Regexes that match native regex syntax nativeTokens = { // Any native multicharacter token in default scope (includes octals, excludes character classes) "default": /^(?:\\(?:0(?:[0-3][0-7]{0,2}|[4-7][0-7]?)?|[1-9]\d*|x[\dA-Fa-f]{2}|u[\dA-Fa-f]{4}|c[A-Za-z]|[\s\S])|\(\?[:=!]|[?*+]\?|{\d+(?:,\d*)?}\??)/, // Any native multicharacter token in character class scope (includes octals) "class": /^(?:\\(?:[0-3][0-7]{0,2}|[4-7][0-7]?|x[\dA-Fa-f]{2}|u[\dA-Fa-f]{4}|c[A-Za-z]|[\s\S]))/ }, // Any backreference in replacement strings replacementToken = /\$(?:{([\w$]+)}|(\d\d?|[\s\S]))/g, // Any character with a later instance in the string duplicateFlags = /([\s\S])(?=[\s\S]*\1)/g, // Any greedy/lazy quantifier quantifier = /^(?:[?*+]|{\d+(?:,\d*)?})\??/, // Check for correct `exec` handling of nonparticipating capturing groups compliantExecNpcg = nativ.exec.call(/()??/, "")[1] === undef, // Check for flag y support (Firefox 3+) hasNativeY = RegExp.prototype.sticky !== undef, // Used to kill infinite recursion during XRegExp construction isInsideConstructor = false, // Storage for known flags, including addon flags registeredFlags = "gim" + (hasNativeY ? "y" : ""); /*-------------------------------------- * Private helper functions *------------------------------------*/ /** * Attaches XRegExp.prototype properties and named capture supporting data to a regex object. * @private * @param {RegExp} regex Regex to augment. * @param {Array} captureNames Array with capture names, or null. * @param {Boolean} [isNative] Whether the regex was created by `RegExp` rather than `XRegExp`. * @returns {RegExp} Augmented regex. */ function augment(regex, captureNames, isNative) { var p; // Can't auto-inherit these since the XRegExp constructor returns a nonprimitive value for (p in self.prototype) { if (self.prototype.hasOwnProperty(p)) { regex[p] = self.prototype[p]; } } regex.xregexp = {captureNames: captureNames, isNative: !!isNative}; return regex; } /** * Returns native `RegExp` flags used by a regex object. * @private * @param {RegExp} regex Regex to check. * @returns {String} Native flags in use. */ function getNativeFlags(regex) { //return nativ.exec.call(/\/([a-z]*)$/i, String(regex))[1]; return (regex.global ? "g" : "") + (regex.ignoreCase ? "i" : "") + (regex.multiline ? "m" : "") + (regex.extended ? "x" : "") + // Proposed for ES6, included in AS3 (regex.sticky ? "y" : ""); // Proposed for ES6, included in Firefox 3+ } /** * Copies a regex object while preserving special properties for named capture and augmenting with * `XRegExp.prototype` methods. The copy has a fresh `lastIndex` property (set to zero). Allows * adding and removing flags while copying the regex. * @private * @param {RegExp} regex Regex to copy. * @param {String} [addFlags] Flags to be added while copying the regex. * @param {String} [removeFlags] Flags to be removed while copying the regex. * @returns {RegExp} Copy of the provided regex, possibly with modified flags. */ function copy(regex, addFlags, removeFlags) { if (!self.isRegExp(regex)) { throw new TypeError("type RegExp expected"); } var flags = nativ.replace.call(getNativeFlags(regex) + (addFlags || ""), duplicateFlags, ""); if (removeFlags) { // Would need to escape `removeFlags` if this was public flags = nativ.replace.call(flags, new RegExp("[" + removeFlags + "]+", "g"), ""); } if (regex.xregexp && !regex.xregexp.isNative) { // Compiling the current (rather than precompilation) source preserves the effects of nonnative source flags regex = augment(self(regex.source, flags), regex.xregexp.captureNames ? regex.xregexp.captureNames.slice(0) : null); } else { // Augment with `XRegExp.prototype` methods, but use native `RegExp` (avoid searching for special tokens) regex = augment(new RegExp(regex.source, flags), null, true); } return regex; } /* * Returns the last index at which a given value can be found in an array, or `-1` if it's not * present. The array is searched backwards. * @private * @param {Array} array Array to search. * @param {*} value Value to locate in the array. * @returns {Number} Last zero-based index at which the item is found, or -1. */ function lastIndexOf(array, value) { var i = array.length; if (Array.prototype.lastIndexOf) { return array.lastIndexOf(value); // Use the native method if available } while (i--) { if (array[i] === value) { return i; } } return -1; } /** * Determines whether an object is of the specified type. * @private * @param {*} value Object to check. * @param {String} type Type to check for, in lowercase. * @returns {Boolean} Whether the object matches the type. */ function isType(value, type) { return Object.prototype.toString.call(value).toLowerCase() === "[object " + type + "]"; } /** * Prepares an options object from the given value. * @private * @param {String|Object} value Value to convert to an options object. * @returns {Object} Options object. */ function prepareOptions(value) { value = value || {}; if (value === "all" || value.all) { value = {natives: true, extensibility: true}; } else if (isType(value, "string")) { value = self.forEach(value, /[^\s,]+/, function (m) { this[m] = true; }, {}); } return value; } /** * Runs built-in/custom tokens in reverse insertion order, until a match is found. * @private * @param {String} pattern Original pattern from which an XRegExp object is being built. * @param {Number} pos Position to search for tokens within `pattern`. * @param {Number} scope Current regex scope. * @param {Object} context Context object assigned to token handler functions. * @returns {Object} Object with properties `output` (the substitution string returned by the * successful token handler) and `match` (the token's match array), or null. */ function runTokens(pattern, pos, scope, context) { var i = tokens.length, result = null, match, t; // Protect against constructing XRegExps within token handler and trigger functions isInsideConstructor = true; // Must reset `isInsideConstructor`, even if a `trigger` or `handler` throws try { while (i--) { // Run in reverse order t = tokens[i]; if ((t.scope === "all" || t.scope === scope) && (!t.trigger || t.trigger.call(context))) { t.pattern.lastIndex = pos; match = fixed.exec.call(t.pattern, pattern); // Fixed `exec` here allows use of named backreferences, etc. if (match && match.index === pos) { result = { output: t.handler.call(context, match, scope), match: match }; break; } } } } catch (err) { throw err; } finally { isInsideConstructor = false; } return result; } /** * Enables or disables XRegExp syntax and flag extensibility. * @private * @param {Boolean} on `true` to enable; `false` to disable. */ function setExtensibility(on) { self.addToken = addToken[on ? "on" : "off"]; features.extensibility = on; } /** * Enables or disables native method overrides. * @private * @param {Boolean} on `true` to enable; `false` to disable. */ function setNatives(on) { RegExp.prototype.exec = (on ? fixed : nativ).exec; RegExp.prototype.test = (on ? fixed : nativ).test; String.prototype.match = (on ? fixed : nativ).match; String.prototype.replace = (on ? fixed : nativ).replace; String.prototype.split = (on ? fixed : nativ).split; features.natives = on; } /*-------------------------------------- * Constructor *------------------------------------*/ /** * Creates an extended regular expression object for matching text with a pattern. Differs from a * native regular expression in that additional syntax and flags are supported. The returned object * is in fact a native `RegExp` and works with all native methods. * @class XRegExp * @constructor * @param {String|RegExp} pattern Regex pattern string, or an existing `RegExp` object to copy. * @param {String} [flags] Any combination of flags: * <li>`g` - global * <li>`i` - ignore case * <li>`m` - multiline anchors * <li>`n` - explicit capture * <li>`s` - dot matches all (aka singleline) * <li>`x` - free-spacing and line comments (aka extended) * <li>`y` - sticky (Firefox 3+ only) * Flags cannot be provided when constructing one `RegExp` from another. * @returns {RegExp} Extended regular expression object. * @example * * // With named capture and flag x * date = XRegExp('(?<year> [0-9]{4}) -? # year \n\ * (?<month> [0-9]{2}) -? # month \n\ * (?<day> [0-9]{2}) # day ', 'x'); * * // Passing a regex object to copy it. The copy maintains special properties for named capture, * // is augmented with `XRegExp.prototype` methods, and has a fresh `lastIndex` property (set to * // zero). Native regexes are not recompiled using XRegExp syntax. * XRegExp(/regex/); */ self = function (pattern, flags) { if (self.isRegExp(pattern)) { if (flags !== undef) { throw new TypeError("can't supply flags when constructing one RegExp from another"); } return copy(pattern); } // Tokens become part of the regex construction process, so protect against infinite recursion // when an XRegExp is constructed within a token handler function if (isInsideConstructor) { throw new Error("can't call the XRegExp constructor within token definition functions"); } var output = [], scope = defaultScope, tokenContext = { hasNamedCapture: false, captureNames: [], hasFlag: function (flag) { return flags.indexOf(flag) > -1; } }, pos = 0, tokenResult, match, chr; pattern = pattern === undef ? "" : String(pattern); flags = flags === undef ? "" : String(flags); if (nativ.match.call(flags, duplicateFlags)) { // Don't use test/exec because they would update lastIndex throw new SyntaxError("invalid duplicate regular expression flag"); } // Strip/apply leading mode modifier with any combination of flags except g or y: (?imnsx) pattern = nativ.replace.call(pattern, /^\(\?([\w$]+)\)/, function ($0, $1) { if (nativ.test.call(/[gy]/, $1)) { throw new SyntaxError("can't use flag g or y in mode modifier"); } flags = nativ.replace.call(flags + $1, duplicateFlags, ""); return ""; }); self.forEach(flags, /[\s\S]/, function (m) { if (registeredFlags.indexOf(m[0]) < 0) { throw new SyntaxError("invalid regular expression flag " + m[0]); } }); while (pos < pattern.length) { // Check for custom tokens at the current position tokenResult = runTokens(pattern, pos, scope, tokenContext); if (tokenResult) { output.push(tokenResult.output); pos += (tokenResult.match[0].length || 1); } else { // Check for native tokens (except character classes) at the current position match = nativ.exec.call(nativeTokens[scope], pattern.slice(pos)); if (match) { output.push(match[0]); pos += match[0].length; } else { chr = pattern.charAt(pos); if (chr === "[") { scope = classScope; } else if (chr === "]") { scope = defaultScope; } // Advance position by one character output.push(chr); ++pos; } } } return augment(new RegExp(output.join(""), nativ.replace.call(flags, /[^gimy]+/g, "")), tokenContext.hasNamedCapture ? tokenContext.captureNames : null); }; /*-------------------------------------- * Public methods/properties *------------------------------------*/ // Installed and uninstalled states for `XRegExp.addToken` addToken = { on: function (regex, handler, options) { options = options || {}; if (regex) { tokens.push({ pattern: copy(regex, "g" + (hasNativeY ? "y" : "")), handler: handler, scope: options.scope || defaultScope, trigger: options.trigger || null }); } // Providing `customFlags` with null `regex` and `handler` allows adding flags that do // nothing, but don't throw an error if (options.customFlags) { registeredFlags = nativ.replace.call(registeredFlags + options.customFlags, duplicateFlags, ""); } }, off: function () { throw new Error("extensibility must be installed before using addToken"); } }; /** * Extends or changes XRegExp syntax and allows custom flags. This is used internally and can be * used to create XRegExp addons. `XRegExp.install('extensibility')` must be run before calling * this function, or an error is thrown. If more than one token can match the same string, the last * added wins. * @memberOf XRegExp * @param {RegExp} regex Regex object that matches the new token. * @param {Function} handler Function that returns a new pattern string (using native regex syntax) * to replace the matched token within all future XRegExp regexes. Has access to persistent * properties of the regex being built, through `this`. Invoked with two arguments: * <li>The match array, with named backreference properties. * <li>The regex scope where the match was found. * @param {Object} [options] Options object with optional properties: * <li>`scope` {String} Scopes where the token applies: 'default', 'class', or 'all'. * <li>`trigger` {Function} Function that returns `true` when the token should be applied; e.g., * if a flag is set. If `false` is returned, the matched string can be matched by other tokens. * Has access to persistent properties of the regex being built, through `this` (including * function `this.hasFlag`). * <li>`customFlags` {String} Nonnative flags used by the token's handler or trigger functions. * Prevents XRegExp from throwing an invalid flag error when the specified flags are used. * @example * * // Basic usage: Adds \a for ALERT character * XRegExp.addToken( * /\\a/, * function () {return '\\x07';}, * {scope: 'all'} * ); * XRegExp('\\a[\\a-\\n]+').test('\x07\n\x07'); // -> true */ self.addToken = addToken.off; /** * Caches and returns the result of calling `XRegExp(pattern, flags)`. On any subsequent call with * the same pattern and flag combination, the cached copy is returned. * @memberOf XRegExp * @param {String} pattern Regex pattern string. * @param {String} [flags] Any combination of XRegExp flags. * @returns {RegExp} Cached XRegExp object. * @example * * while (match = XRegExp.cache('.', 'gs').exec(str)) { * // The regex is compiled once only * } */ self.cache = function (pattern, flags) { var key = pattern + "/" + (flags || ""); return cache[key] || (cache[key] = self(pattern, flags)); }; /** * Escapes any regular expression metacharacters, for use when matching literal strings. The result * can safely be used at any point within a regex that uses any flags. * @memberOf XRegExp * @param {String} str String to escape. * @returns {String} String with regex metacharacters escaped. * @example * * XRegExp.escape('Escaped? <.>'); * // -> 'Escaped\?\ <\.>' */ self.escape = function (str) { return nativ.replace.call(str, /[-[\]{}()*+?.,\\^$|#\s]/g, "\\$&"); }; /** * Executes a regex search in a specified string. Returns a match array or `null`. If the provided * regex uses named capture, named backreference properties are included on the match array. * Optional `pos` and `sticky` arguments specify the search start position, and whether the match * must start at the specified position only. The `lastIndex` property of the provided regex is not * used, but is updated for compatibility. Also fixes browser bugs compared to the native * `RegExp.prototype.exec` and can be used reliably cross-browser. * @memberOf XRegExp * @param {String} str String to search. * @param {RegExp} regex Regex to search with. * @param {Number} [pos=0] Zero-based index at which to start the search. * @param {Boolean|String} [sticky=false] Whether the match must start at the specified position * only. The string `'sticky'` is accepted as an alternative to `true`. * @returns {Array} Match array with named backreference properties, or null. * @example * * // Basic use, with named backreference * var match = XRegExp.exec('U+2620', XRegExp('U\\+(?<hex>[0-9A-F]{4})')); * match.hex; // -> '2620' * * // With pos and sticky, in a loop * var pos = 2, result = [], match; * while (match = XRegExp.exec('<1><2><3><4>5<6>', /<(\d)>/, pos, 'sticky')) { * result.push(match[1]); * pos = match.index + match[0].length; * } * // result -> ['2', '3', '4'] */ self.exec = function (str, regex, pos, sticky) { var r2 = copy(regex, "g" + (sticky && hasNativeY ? "y" : ""), (sticky === false ? "y" : "")), match; r2.lastIndex = pos = pos || 0; match = fixed.exec.call(r2, str); // Fixed `exec` required for `lastIndex` fix, etc. if (sticky && match && match.index !== pos) { match = null; } if (regex.global) { regex.lastIndex = match ? r2.lastIndex : 0; } return match; }; /** * Executes a provided function once per regex match. * @memberOf XRegExp * @param {String} str String to search. * @param {RegExp} regex Regex to search with. * @param {Function} callback Function to execute for each match. Invoked with four arguments: * <li>The match array, with named backreference properties. * <li>The zero-based match index. * <li>The string being traversed. * <li>The regex object being used to traverse the string. * @param {*} [context] Object to use as `this` when executing `callback`. * @returns {*} Provided `context` object. * @example * * // Extracts every other digit from a string * XRegExp.forEach('1a2345', /\d/, function (match, i) { * if (i % 2) this.push(+match[0]); * }, []); * // -> [2, 4] */ self.forEach = function (str, regex, callback, context) { var pos = 0, i = -1, match; while ((match = self.exec(str, regex, pos))) { callback.call(context, match, ++i, str, regex); pos = match.index + (match[0].length || 1); } return context; }; /** * Copies a regex object and adds flag `g`. The copy maintains special properties for named * capture, is augmented with `XRegExp.prototype` methods, and has a fresh `lastIndex` property * (set to zero). Native regexes are not recompiled using XRegExp syntax. * @memberOf XRegExp * @param {RegExp} regex Regex to globalize. * @returns {RegExp} Copy of the provided regex with flag `g` added. * @example * * var globalCopy = XRegExp.globalize(/regex/); * globalCopy.global; // -> true */ self.globalize = function (regex) { return copy(regex, "g"); }; /** * Installs optional features according to the specified options. * @memberOf XRegExp * @param {Object|String} options Options object or string. * @example * * // With an options object * XRegExp.install({ * // Overrides native regex methods with fixed/extended versions that support named * // backreferences and fix numerous cross-browser bugs * natives: true, * * // Enables extensibility of XRegExp syntax and flags * extensibility: true * }); * * // With an options string * XRegExp.install('natives extensibility'); * * // Using a shortcut to install all optional features * XRegExp.install('all'); */ self.install = function (options) { options = prepareOptions(options); if (!features.natives && options.natives) { setNatives(true); } if (!features.extensibility && options.extensibility) { setExtensibility(true); } }; /** * Checks whether an individual optional feature is installed. * @memberOf XRegExp * @param {String} feature Name of the feature to check. One of: * <li>`natives` * <li>`extensibility` * @returns {Boolean} Whether the feature is installed. * @example * * XRegExp.isInstalled('natives'); */ self.isInstalled = function (feature) { return !!(features[feature]); }; /** * Returns `true` if an object is a regex; `false` if it isn't. This works correctly for regexes * created in another frame, when `instanceof` and `constructor` checks would fail. * @memberOf XRegExp * @param {*} value Object to check. * @returns {Boolean} Whether the object is a `RegExp` object. * @example * * XRegExp.isRegExp('string'); // -> false * XRegExp.isRegExp(/regex/i); // -> true * XRegExp.isRegExp(RegExp('^', 'm')); // -> true * XRegExp.isRegExp(XRegExp('(?s).')); // -> true */ self.isRegExp = function (value) { return isType(value, "regexp"); }; /** * Retrieves the matches from searching a string using a chain of regexes that successively search * within previous matches. The provided `chain` array can contain regexes and objects with `regex` * and `backref` properties. When a backreference is specified, the named or numbered backreference * is passed forward to the next regex or returned. * @memberOf XRegExp * @param {String} str String to search. * @param {Array} chain Regexes that each search for matches within preceding results. * @returns {Array} Matches by the last regex in the chain, or an empty array. * @example * * // Basic usage; matches numbers within <b> tags * XRegExp.matchChain('1 <b>2</b> 3 <b>4 a 56</b>', [ * XRegExp('(?is)<b>.*?</b>'), * /\d+/ * ]); * // -> ['2', '4', '56'] * * // Passing forward and returning specific backreferences * html = '<a href="http://xregexp.com/api/">XRegExp</a>\ * <a href="http://www.google.com/">Google</a>'; * XRegExp.matchChain(html, [ * {regex: /<a href="([^"]+)">/i, backref: 1}, * {regex: XRegExp('(?i)^https?://(?<domain>[^/?#]+)'), backref: 'domain'} * ]); * // -> ['xregexp.com', 'www.google.com'] */ self.matchChain = function (str, chain) { return (function recurseChain(values, level) { var item = chain[level].regex ? chain[level] : {regex: chain[level]}, matches = [], addMatch = function (match) { matches.push(item.backref ? (match[item.backref] || "") : match[0]); }, i; for (i = 0; i < values.length; ++i) { self.forEach(values[i], item.regex, addMatch); } return ((level === chain.length - 1) || !matches.length) ? matches : recurseChain(matches, level + 1); }([str], 0)); }; /** * Returns a new string with one or all matches of a pattern replaced. The pattern can be a string * or regex, and the replacement can be a string or a function to be called for each match. To * perform a global search and replace, use the optional `scope` argument or include flag `g` if * using a regex. Replacement strings can use `${n}` for named and numbered backreferences. * Replacement functions can use named backreferences via `arguments[0].name`. Also fixes browser * bugs compared to the native `String.prototype.replace` and can be used reliably cross-browser. * @memberOf XRegExp * @param {String} str String to search. * @param {RegExp|String} search Search pattern to be replaced. * @param {String|Function} replacement Replacement string or a function invoked to create it. * Replacement strings can include special replacement syntax: * <li>$$ - Inserts a literal '$'. * <li>$&, $0 - Inserts the matched substring. * <li>$` - Inserts the string that precedes the matched substring (left context). * <li>$' - Inserts the string that follows the matched substring (right context). * <li>$n, $nn - Where n/nn are digits referencing an existent capturing group, inserts * backreference n/nn. * <li>${n} - Where n is a name or any number of digits that reference an existent capturing * group, inserts backreference n. * Replacement functions are invoked with three or more arguments: * <li>The matched substring (corresponds to $& above). Named backreferences are accessible as * properties of this first argument. * <li>0..n arguments, one for each backreference (corresponding to $1, $2, etc. above). * <li>The zero-based index of the match within the total search string. * <li>The total string being searched. * @param {String} [scope='one'] Use 'one' to replace the first match only, or 'all'. If not * explicitly specified and using a regex with flag `g`, `scope` is 'all'. * @returns {String} New string with one or all matches replaced. * @example * * // Regex search, using named backreferences in replacement string * var name = XRegExp('(?<first>\\w+) (?<last>\\w+)'); * XRegExp.replace('John Smith', name, '${last}, ${first}'); * // -> 'Smith, John' * * // Regex search, using named backreferences in replacement function * XRegExp.replace('John Smith', name, function (match) { * return match.last + ', ' + match.first; * }); * // -> 'Smith, John' * * // Global string search/replacement * XRegExp.replace('RegExp builds RegExps', 'RegExp', 'XRegExp', 'all'); * // -> 'XRegExp builds XRegExps' */ self.replace = function (str, search, replacement, scope) { var isRegex = self.isRegExp(search), search2 = search, result; if (isRegex) { if (scope === undef && search.global) { scope = "all"; // Follow flag g when `scope` isn't explicit } // Note that since a copy is used, `search`'s `lastIndex` isn't updated *during* replacement iterations search2 = copy(search, scope === "all" ? "g" : "", scope === "all" ? "" : "g"); } else if (scope === "all") { search2 = new RegExp(self.escape(String(search)), "g"); } result = fixed.replace.call(String(str), search2, replacement); // Fixed `replace` required for named backreferences, etc. if (isRegex && search.global) { search.lastIndex = 0; // Fixes IE, Safari bug (last tested IE 9, Safari 5.1) } return result; }; /** * Splits a string into an array of strings using a regex or string separator. Matches of the * separator are not included in the result array. However, if `separator` is a regex that contains * capturing groups, backreferences are spliced into the result each time `separator` is matched. * Fixes browser bugs compared to the native `String.prototype.split` and can be used reliably * cross-browser. * @memberOf XRegExp * @param {String} str String to split. * @param {RegExp|String} separator Regex or string to use for separating the string. * @param {Number} [limit] Maximum number of items to include in the result array. * @returns {Array} Array of substrings. * @example * * // Basic use * XRegExp.split('a b c', ' '); * // -> ['a', 'b', 'c'] * * // With limit * XRegExp.split('a b c', ' ', 2); * // -> ['a', 'b'] * * // Backreferences in result array * XRegExp.split('..word1..', /([a-z]+)(\d+)/i); * // -> ['..', 'word', '1', '..'] */ self.split = function (str, separator, limit) { return fixed.split.call(str, separator, limit); }; /** * Executes a regex search in a specified string. Returns `true` or `false`. Optional `pos` and * `sticky` arguments specify the search start position, and whether the match must start at the * specified position only. The `lastIndex` property of the provided regex is not used, but is * updated for compatibility. Also fixes browser bugs compared to the native * `RegExp.prototype.test` and can be used reliably cross-browser. * @memberOf XRegExp * @param {String} str String to search. * @param {RegExp} regex Regex to search with. * @param {Number} [pos=0] Zero-based index at which to start the search. * @param {Boolean|String} [sticky=false] Whether the match must start at the specified position * only. The string `'sticky'` is accepted as an alternative to `true`. * @returns {Boolean} Whether the regex matched the provided value. * @example * * // Basic use * XRegExp.test('abc', /c/); // -> true * * // With pos and sticky * XRegExp.test('abc', /c/, 0, 'sticky'); // -> false */ self.test = function (str, regex, pos, sticky) { // Do this the easy way :-) return !!self.exec(str, regex, pos, sticky); }; /** * Uninstalls optional features according to the specified options. * @memberOf XRegExp * @param {Object|String} options Options object or string. * @example * * // With an options object * XRegExp.uninstall({ * // Restores native regex methods * natives: true, * * // Disables additional syntax and flag extensions * extensibility: true * }); * * // With an options string * XRegExp.uninstall('natives extensibility'); * * // Using a shortcut to uninstall all optional features * XRegExp.uninstall('all'); */ self.uninstall = function (options) { options = prepareOptions(options); if (features.natives && options.natives) { setNatives(false); } if (features.extensibility && options.extensibility) { setExtensibility(false); } }; /** * Returns an XRegExp object that is the union of the given patterns. Patterns can be provided as * regex objects or strings. Metacharacters are escaped in patterns provided as strings. * Backreferences in provided regex objects are automatically renumbered to work correctly. Native * flags used by provided regexes are ignored in favor of the `flags` argument. * @memberOf XRegExp * @param {Array} patterns Regexes and strings to combine. * @param {String} [flags] Any combination of XRegExp flags. * @returns {RegExp} Union of the provided regexes and strings. * @example * * XRegExp.union(['a+b*c', /(dogs)\1/, /(cats)\1/], 'i'); * // -> /a\+b\*c|(dogs)\1|(cats)\2/i * * XRegExp.union([XRegExp('(?<pet>dogs)\\k<pet>'), XRegExp('(?<pet>cats)\\k<pet>')]); * // -> XRegExp('(?<pet>dogs)\\k<pet>|(?<pet>cats)\\k<pet>') */ self.union = function (patterns, flags) { var parts = /(\()(?!\?)|\\([1-9]\d*)|\\[\s\S]|\[(?:[^\\\]]|\\[\s\S])*]/g, numCaptures = 0, numPriorCaptures, captureNames, rewrite = function (match, paren, backref) { var name = captureNames[numCaptures - numPriorCaptures]; if (paren) { // Capturing group ++numCaptures; if (name) { // If the current capture has a name return "(?<" + name + ">"; } } else if (backref) { // Backreference return "\\" + (+backref + numPriorCaptures); } return match; }, output = [], pattern, i; if (!(isType(patterns, "array") && patterns.length)) { throw new TypeError("patterns must be a nonempty array"); } for (i = 0; i < patterns.length; ++i) { pattern = patterns[i]; if (self.isRegExp(pattern)) { numPriorCaptures = numCaptures; captureNames = (pattern.xregexp && pattern.xregexp.captureNames) || []; // Rewrite backreferences. Passing to XRegExp dies on octals and ensures patterns // are independently valid; helps keep this simple. Named captures are put back output.push(self(pattern.source).source.replace(parts, rewrite)); } else { output.push(self.escape(pattern)); } } return self(output.join("|"), flags); }; /** * The XRegExp version number. * @static * @memberOf XRegExp * @type String */ self.version = "2.0.0"; /*-------------------------------------- * Fixed/extended native methods *------------------------------------*/ /** * Adds named capture support (with backreferences returned as `result.name`), and fixes browser * bugs in the native `RegExp.prototype.exec`. Calling `XRegExp.install('natives')` uses this to * override the native method. Use via `XRegExp.exec` without overriding natives. * @private * @param {String} str String to search. * @returns {Array} Match array with named backreference properties, or null. */ fixed.exec = function (str) { var match, name, r2, origLastIndex, i; if (!this.global) { origLastIndex = this.lastIndex; } match = nativ.exec.apply(this, arguments); if (match) { // Fix browsers whose `exec` methods don't consistently return `undefined` for // nonparticipating capturing groups if (!compliantExecNpcg && match.length > 1 && lastIndexOf(match, "") > -1) { r2 = new RegExp(this.source, nativ.replace.call(getNativeFlags(this), "g", "")); // Using `str.slice(match.index)` rather than `match[0]` in case lookahead allowed // matching due to characters outside the match nativ.replace.call(String(str).slice(match.index), r2, function () { var i; for (i = 1; i < arguments.length - 2; ++i) { if (arguments[i] === undef) { match[i] = undef; } } }); } // Attach named capture properties if (this.xregexp && this.xregexp.captureNames) { for (i = 1; i < match.length; ++i) { name = this.xregexp.captureNames[i - 1]; if (name) { match[name] = match[i]; } } } // Fix browsers that increment `lastIndex` after zero-length matches if (this.global && !match[0].length && (this.lastIndex > match.index)) { this.lastIndex = match.index; } } if (!this.global) { this.lastIndex = origLastIndex; // Fixes IE, Opera bug (last tested IE 9, Opera 11.6) } return match; }; /** * Fixes browser bugs in the native `RegExp.prototype.test`. Calling `XRegExp.install('natives')` * uses this to override the native method. * @private * @param {String} str String to search. * @returns {Boolean} Whether the regex matched the provided value. */ fixed.test = function (str) { // Do this the easy way :-) return !!fixed.exec.call(this, str); }; /** * Adds named capture support (with backreferences returned as `result.name`), and fixes browser * bugs in the native `String.prototype.match`. Calling `XRegExp.install('natives')` uses this to * override the native method. * @private * @param {RegExp} regex Regex to search with. * @returns {Array} If `regex` uses flag g, an array of match strings or null. Without flag g, the * result of calling `regex.exec(this)`. */ fixed.match = function (regex) { if (!self.isRegExp(regex)) { regex = new RegExp(regex); // Use native `RegExp` } else if (regex.global) { var result = nativ.match.apply(this, arguments); regex.lastIndex = 0; // Fixes IE bug return result; } return fixed.exec.call(regex, this); }; /** * Adds support for `${n}` tokens for named and numbered backreferences in replacement text, and * provides named backreferences to replacement functions as `arguments[0].name`. Also fixes * browser bugs in replacement text syntax when performing a replacement using a nonregex search * value, and the value of a replacement regex's `lastIndex` property during replacement iterations * and upon completion. Note that this doesn't support SpiderMonkey's proprietary third (`flags`) * argument. Calling `XRegExp.install('natives')` uses this to override the native method. Use via * `XRegExp.replace` without overriding natives. * @private * @param {RegExp|String} search Search pattern to be replaced. * @param {String|Function} replacement Replacement string or a function invoked to create it. * @returns {String} New string with one or all matches replaced. */ fixed.replace = function (search, replacement) { var isRegex = self.isRegExp(search), captureNames, result, str, origLastIndex; if (isRegex) { if (search.xregexp) { captureNames = search.xregexp.captureNames; } if (!search.global) { origLastIndex = search.lastIndex; } } else { search += ""; } if (isType(replacement, "function")) { result = nativ.replace.call(String(this), search, function () { var args = arguments, i; if (captureNames) { // Change the `arguments[0]` string primitive to a `String` object that can store properties args[0] = new String(args[0]); // Store named backreferences on the first argument for (i = 0; i < captureNames.length; ++i) { if (captureNames[i]) { args[0][captureNames[i]] = args[i + 1]; } } } // Update `lastIndex` before calling `replacement`. // Fixes IE, Chrome, Firefox, Safari bug (last tested IE 9, Chrome 17, Firefox 11, Safari 5.1) if (isRegex && search.global) { search.lastIndex = args[args.length - 2] + args[0].length; } return replacement.apply(null, args); }); } else { str = String(this); // Ensure `args[args.length - 1]` will be a string when given nonstring `this` result = nativ.replace.call(str, search, function () { var args = arguments; // Keep this function's `arguments` available through closure return nativ.replace.call(String(replacement), replacementToken, function ($0, $1, $2) { var n; // Named or numbered backreference with curly brackets if ($1) { /* XRegExp behavior for `${n}`: * 1. Backreference to numbered capture, where `n` is 1+ digits. `0`, `00`, etc. is the entire match. * 2. Backreference to named capture `n`, if it exists and is not a number overridden by numbered capture. * 3. Otherwise, it's an error. */ n = +$1; // Type-convert; drop leading zeros if (n <= args.length - 3) { return args[n] || ""; } n = captureNames ? lastIndexOf(captureNames, $1) : -1; if (n < 0) { throw new SyntaxError("backreference to undefined group " + $0); } return args[n + 1] || ""; } // Else, special variable or numbered backreference (without curly brackets) if ($2 === "$") return "$"; if ($2 === "&" || +$2 === 0) return args[0]; // $&, $0 (not followed by 1-9), $00 if ($2 === "`") return args[args.length - 1].slice(0, args[args.length - 2]); if ($2 === "'") return args[args.length - 1].slice(args[args.length - 2] + args[0].length); // Else, numbered backreference (without curly brackets) $2 = +$2; // Type-convert; drop leading zero /* XRegExp behavior: * - Backreferences without curly brackets end after 1 or 2 digits. Use `${..}` for more digits. * - `$1` is an error if there are no capturing groups. * - `$10` is an error if there are less than 10 capturing groups. Use `${1}0` instead. * - `$01` is equivalent to `$1` if a capturing group exists, otherwise it's an error. * - `$0` (not followed by 1-9), `$00`, and `$&` are the entire match. * Native behavior, for comparison: * - Backreferences end after 1 or 2 digits. Cannot use backreference to capturing group 100+. * - `$1` is a literal `$1` if there are no capturing groups. * - `$10` is `$1` followed by a literal `0` if there are less than 10 capturing groups. * - `$01` is equivalent to `$1` if a capturing group exists, otherwise it's a literal `$01`. * - `$0` is a literal `$0`. `$&` is the entire match. */ if (!isNaN($2)) { if ($2 > args.length - 3) { throw new SyntaxError("backreference to undefined group " + $0); } return args[$2] || ""; } throw new SyntaxError("invalid token " + $0); }); }); } if (isRegex) { if (search.global) { search.lastIndex = 0; // Fixes IE, Safari bug (last tested IE 9, Safari 5.1) } else { search.lastIndex = origLastIndex; // Fixes IE, Opera bug (last tested IE 9, Opera 11.6) } } return result; }; /** * Fixes browser bugs in the native `String.prototype.split`. Calling `XRegExp.install('natives')` * uses this to override the native method. Use via `XRegExp.split` without overriding natives. * @private * @param {RegExp|String} separator Regex or string to use for separating the string. * @param {Number} [limit] Maximum number of items to include in the result array. * @returns {Array} Array of substrings. */ fixed.split = function (separator, limit) { if (!self.isRegExp(separator)) { return nativ.split.apply(this, arguments); // use faster native method } var str = String(this), origLastIndex = separator.lastIndex, output = [], lastLastIndex = 0, lastLength; /* Values for `limit`, per the spec: * If undefined: pow(2,32) - 1 * If 0, Infinity, or NaN: 0 * If positive number: limit = floor(limit); if (limit >= pow(2,32)) limit -= pow(2,32); * If negative number: pow(2,32) - floor(abs(limit)) * If other: Type-convert, then use the above rules */ limit = (limit === undef ? -1 : limit) >>> 0; self.forEach(str, separator, function (match) { if ((match.index + match[0].length) > lastLastIndex) { // != `if (match[0].length)` output.push(str.slice(lastLastIndex, match.index)); if (match.length > 1 && match.index < str.length) { Array.prototype.push.apply(output, match.slice(1)); } lastLength = match[0].length; lastLastIndex = match.index + lastLength; } }); if (lastLastIndex === str.length) { if (!nativ.test.call(separator, "") || lastLength) { output.push(""); } } else { output.push(str.slice(lastLastIndex)); } separator.lastIndex = origLastIndex; return output.length > limit ? output.slice(0, limit) : output; }; /*-------------------------------------- * Built-in tokens *------------------------------------*/ // Shortcut add = addToken.on; /* Letter identity escapes that natively match literal characters: \p, \P, etc. * Should be SyntaxErrors but are allowed in web reality. XRegExp makes them errors for cross- * browser consistency and to reserve their syntax, but lets them be superseded by XRegExp addons. */ add(/\\([ABCE-RTUVXYZaeg-mopqyz]|c(?![A-Za-z])|u(?![\dA-Fa-f]{4})|x(?![\dA-Fa-f]{2}))/, function (match, scope) { // \B is allowed in default scope only if (match[1] === "B" && scope === defaultScope) { return match[0]; } throw new SyntaxError("invalid escape " + match[0]); }, {scope: "all"}); /* Empty character class: [] or [^] * Fixes a critical cross-browser syntax inconsistency. Unless this is standardized (per the spec), * regex syntax can't be accurately parsed because character class endings can't be determined. */ add(/\[(\^?)]/, function (match) { // For cross-browser compatibility with ES3, convert [] to \b\B and [^] to [\s\S]. // (?!) should work like \b\B, but is unreliable in Firefox return match[1] ? "[\\s\\S]" : "\\b\\B"; }); /* Comment pattern: (?# ) * Inline comments are an alternative to the line comments allowed in free-spacing mode (flag x). */ add(/(?:\(\?#[^)]*\))+/, function (match) { // Keep tokens separated unless the following token is a quantifier return nativ.test.call(quantifier, match.input.slice(match.index + match[0].length)) ? "" : "(?:)"; }); /* Named backreference: \k<name> * Backreference names can use the characters A-Z, a-z, 0-9, _, and $ only. */ add(/\\k<([\w$]+)>/, function (match) { var index = isNaN(match[1]) ? (lastIndexOf(this.captureNames, match[1]) + 1) : +match[1], endIndex = match.index + match[0].length; if (!index || index > this.captureNames.length) { throw new SyntaxError("backreference to undefined group " + match[0]); } // Keep backreferences separate from subsequent literal numbers return "\\" + index + ( endIndex === match.input.length || isNaN(match.input.charAt(endIndex)) ? "" : "(?:)" ); }); /* Whitespace and line comments, in free-spacing mode (aka extended mode, flag x) only. */ add(/(?:\s+|#.*)+/, function (match) { // Keep tokens separated unless the following token is a quantifier return nativ.test.call(quantifier, match.input.slice(match.index + match[0].length)) ? "" : "(?:)"; }, { trigger: function () { return this.hasFlag("x"); }, customFlags: "x" }); /* Dot, in dotall mode (aka singleline mode, flag s) only. */ add(/\./, function () { return "[\\s\\S]"; }, { trigger: function () { return this.hasFlag("s"); }, customFlags: "s" }); /* Named capturing group; match the opening delimiter only: (?<name> * Capture names can use the characters A-Z, a-z, 0-9, _, and $ only. Names can't be integers. * Supports Python-style (?P<name> as an alternate syntax to avoid issues in recent Opera (which * natively supports the Python-style syntax). Otherwise, XRegExp might treat numbered * backreferences to Python-style named capture as octals. */ add(/\(\?P?<([\w$]+)>/, function (match) { if (!isNaN(match[1])) { // Avoid incorrect lookups, since named backreferences are added to match arrays throw new SyntaxError("can't use integer as capture name " + match[0]); } this.captureNames.push(match[1]); this.hasNamedCapture = true; return "("; }); /* Numbered backreference or octal, plus any following digits: \0, \11, etc. * Octals except \0 not followed by 0-9 and backreferences to unopened capture groups throw an * error. Other matches are returned unaltered. IE <= 8 doesn't support backreferences greater than * \99 in regex syntax. */ add(/\\(\d+)/, function (match, scope) { if (!(scope === defaultScope && /^[1-9]/.test(match[1]) && +match[1] <= this.captureNames.length) && match[1] !== "0") { throw new SyntaxError("can't use octal escape or backreference to undefined group " + match[0]); } return match[0]; }, {scope: "all"}); /* Capturing group; match the opening parenthesis only. * Required for support of named capturing groups. Also adds explicit capture mode (flag n). */ add(/\((?!\?)/, function () { if (this.hasFlag("n")) { return "(?:"; } this.captureNames.push(null); return "("; }, {customFlags: "n"}); /*-------------------------------------- * Expose XRegExp *------------------------------------*/ // For CommonJS enviroments if (typeof exports !== "undefined") { exports.XRegExp = self; } return self; }()); /***** unicode-base.js *****/ /*! * XRegExp Unicode Base v1.0.0 * (c) 2008-2012 Steven Levithan <http://xregexp.com/> * MIT License * Uses Unicode 6.1 <http://unicode.org/> */ /** * Adds support for the `\p{L}` or `\p{Letter}` Unicode category. Addon packages for other Unicode * categories, scripts, blocks, and properties are available separately. All Unicode tokens can be * inverted using `\P{..}` or `\p{^..}`. Token names are case insensitive, and any spaces, hyphens, * and underscores are ignored. * @requires XRegExp */ (function (XRegExp) { "use strict"; var unicode = {}; /*-------------------------------------- * Private helper functions *------------------------------------*/ // Generates a standardized token name (lowercase, with hyphens, spaces, and underscores removed) function slug(name) { return name.replace(/[- _]+/g, "").toLowerCase(); } // Expands a list of Unicode code points and ranges to be usable in a regex character class function expand(str) { return str.replace(/\w{4}/g, "\\u$&"); } // Adds leading zeros if shorter than four characters function pad4(str) { while (str.length < 4) { str = "0" + str; } return str; } // Converts a hexadecimal number to decimal function dec(hex) { return parseInt(hex, 16); } // Converts a decimal number to hexadecimal function hex(dec) { return parseInt(dec, 10).toString(16); } // Inverts a list of Unicode code points and ranges function invert(range) { var output = [], lastEnd = -1, start; XRegExp.forEach(range, /\\u(\w{4})(?:-\\u(\w{4}))?/, function (m) { start = dec(m[1]); if (start > (lastEnd + 1)) { output.push("\\u" + pad4(hex(lastEnd + 1))); if (start > (lastEnd + 2)) { output.push("-\\u" + pad4(hex(start - 1))); } } lastEnd = dec(m[2] || m[1]); }); if (lastEnd < 0xFFFF) { output.push("\\u" + pad4(hex(lastEnd + 1))); if (lastEnd < 0xFFFE) { output.push("-\\uFFFF"); } } return output.join(""); } // Generates an inverted token on first use function cacheInversion(item) { return unicode["^" + item] || (unicode["^" + item] = invert(unicode[item])); } /*-------------------------------------- * Core functionality *------------------------------------*/ XRegExp.install("extensibility"); /** * Adds to the list of Unicode properties that XRegExp regexes can match via \p{..} or \P{..}. * @memberOf XRegExp * @param {Object} pack Named sets of Unicode code points and ranges. * @param {Object} [aliases] Aliases for the primary token names. * @example * * XRegExp.addUnicodePackage({ * XDigit: '0030-00390041-00460061-0066' // 0-9A-Fa-f * }, { * XDigit: 'Hexadecimal' * }); */ XRegExp.addUnicodePackage = function (pack, aliases) { var p; if (!XRegExp.isInstalled("extensibility")) { throw new Error("extensibility must be installed before adding Unicode packages"); } if (pack) { for (p in pack) { if (pack.hasOwnProperty(p)) { unicode[slug(p)] = expand(pack[p]); } } } if (aliases) { for (p in aliases) { if (aliases.hasOwnProperty(p)) { unicode[slug(aliases[p])] = unicode[slug(p)]; } } } }; /* Adds data for the Unicode `Letter` category. Addon packages include other categories, scripts, * blocks, and properties. */ XRegExp.addUnicodePackage({ L: "0041-005A0061-007A00AA00B500BA00C0-00D600D8-00F600F8-02C102C6-02D102E0-02E402EC02EE0370-037403760377037A-037D03860388-038A038C038E-03A103A3-03F503F7-0481048A-05270531-055605590561-058705D0-05EA05F0-05F20620-064A066E066F0671-06D306D506E506E606EE06EF06FA-06FC06FF07100712-072F074D-07A507B107CA-07EA07F407F507FA0800-0815081A082408280840-085808A008A2-08AC0904-0939093D09500958-09610971-09770979-097F0985-098C098F09900993-09A809AA-09B009B209B6-09B909BD09CE09DC09DD09DF-09E109F009F10A05-0A0A0A0F0A100A13-0A280A2A-0A300A320A330A350A360A380A390A59-0A5C0A5E0A72-0A740A85-0A8D0A8F-0A910A93-0AA80AAA-0AB00AB20AB30AB5-0AB90ABD0AD00AE00AE10B05-0B0C0B0F0B100B13-0B280B2A-0B300B320B330B35-0B390B3D0B5C0B5D0B5F-0B610B710B830B85-0B8A0B8E-0B900B92-0B950B990B9A0B9C0B9E0B9F0BA30BA40BA8-0BAA0BAE-0BB90BD00C05-0C0C0C0E-0C100C12-0C280C2A-0C330C35-0C390C3D0C580C590C600C610C85-0C8C0C8E-0C900C92-0CA80CAA-0CB30CB5-0CB90CBD0CDE0CE00CE10CF10CF20D05-0D0C0D0E-0D100D12-0D3A0D3D0D4E0D600D610D7A-0D7F0D85-0D960D9A-0DB10DB3-0DBB0DBD0DC0-0DC60E01-0E300E320E330E40-0E460E810E820E840E870E880E8A0E8D0E94-0E970E99-0E9F0EA1-0EA30EA50EA70EAA0EAB0EAD-0EB00EB20EB30EBD0EC0-0EC40EC60EDC-0EDF0F000F40-0F470F49-0F6C0F88-0F8C1000-102A103F1050-1055105A-105D106110651066106E-10701075-1081108E10A0-10C510C710CD10D0-10FA10FC-1248124A-124D1250-12561258125A-125D1260-1288128A-128D1290-12B012B2-12B512B8-12BE12C012C2-12C512C8-12D612D8-13101312-13151318-135A1380-138F13A0-13F41401-166C166F-167F1681-169A16A0-16EA1700-170C170E-17111720-17311740-17511760-176C176E-17701780-17B317D717DC1820-18771880-18A818AA18B0-18F51900-191C1950-196D1970-19741980-19AB19C1-19C71A00-1A161A20-1A541AA71B05-1B331B45-1B4B1B83-1BA01BAE1BAF1BBA-1BE51C00-1C231C4D-1C4F1C5A-1C7D1CE9-1CEC1CEE-1CF11CF51CF61D00-1DBF1E00-1F151F18-1F1D1F20-1F451F48-1F4D1F50-1F571F591F5B1F5D1F5F-1F7D1F80-1FB41FB6-1FBC1FBE1FC2-1FC41FC6-1FCC1FD0-1FD31FD6-1FDB1FE0-1FEC1FF2-1FF41FF6-1FFC2071207F2090-209C21022107210A-211321152119-211D212421262128212A-212D212F-2139213C-213F2145-2149214E218321842C00-2C2E2C30-2C5E2C60-2CE42CEB-2CEE2CF22CF32D00-2D252D272D2D2D30-2D672D6F2D80-2D962DA0-2DA62DA8-2DAE2DB0-2DB62DB8-2DBE2DC0-2DC62DC8-2DCE2DD0-2DD62DD8-2DDE2E2F300530063031-3035303B303C3041-3096309D-309F30A1-30FA30FC-30FF3105-312D3131-318E31A0-31BA31F0-31FF3400-4DB54E00-9FCCA000-A48CA4D0-A4FDA500-A60CA610-A61FA62AA62BA640-A66EA67F-A697A6A0-A6E5A717-A71FA722-A788A78B-A78EA790-A793A7A0-A7AAA7F8-A801A803-A805A807-A80AA80C-A822A840-A873A882-A8B3A8F2-A8F7A8FBA90A-A925A930-A946A960-A97CA984-A9B2A9CFAA00-AA28AA40-AA42AA44-AA4BAA60-AA76AA7AAA80-AAAFAAB1AAB5AAB6AAB9-AABDAAC0AAC2AADB-AADDAAE0-AAEAAAF2-AAF4AB01-AB06AB09-AB0EAB11-AB16AB20-AB26AB28-AB2EABC0-ABE2AC00-D7A3D7B0-D7C6D7CB-D7FBF900-FA6DFA70-FAD9FB00-FB06FB13-FB17FB1DFB1F-FB28FB2A-FB36FB38-FB3CFB3EFB40FB41FB43FB44FB46-FBB1FBD3-FD3DFD50-FD8FFD92-FDC7FDF0-FDFBFE70-FE74FE76-FEFCFF21-FF3AFF41-FF5AFF66-FFBEFFC2-FFC7FFCA-FFCFFFD2-FFD7FFDA-FFDC" }, { L: "Letter" }); /* Adds Unicode property syntax to XRegExp: \p{..}, \P{..}, \p{^..} */ XRegExp.addToken( /\\([pP]){(\^?)([^}]*)}/, function (match, scope) { var inv = (match[1] === "P" || match[2]) ? "^" : "", item = slug(match[3]); // The double negative \P{^..} is invalid if (match[1] === "P" && match[2]) { throw new SyntaxError("invalid double negation \\P{^"); } if (!unicode.hasOwnProperty(item)) { throw new SyntaxError("invalid or unknown Unicode property " + match[0]); } return scope === "class" ? (inv ? cacheInversion(item) : unicode[item]) : "[" + inv + unicode[item] + "]"; }, {scope: "all"} ); }(XRegExp)); /***** unicode-categories.js *****/ /*! * XRegExp Unicode Categories v1.2.0 * (c) 2010-2012 Steven Levithan <http://xregexp.com/> * MIT License * Uses Unicode 6.1 <http://unicode.org/> */ /** * Adds support for all Unicode categories (aka properties) E.g., `\p{Lu}` or * `\p{Uppercase Letter}`. Token names are case insensitive, and any spaces, hyphens, and * underscores are ignored. * @requires XRegExp, XRegExp Unicode Base */ (function (XRegExp) { "use strict"; if (!XRegExp.addUnicodePackage) { throw new ReferenceError("Unicode Base must be loaded before Unicode Categories"); } XRegExp.install("extensibility"); XRegExp.addUnicodePackage({ //L: "", // Included in the Unicode Base addon Ll: "0061-007A00B500DF-00F600F8-00FF01010103010501070109010B010D010F01110113011501170119011B011D011F01210123012501270129012B012D012F01310133013501370138013A013C013E014001420144014601480149014B014D014F01510153015501570159015B015D015F01610163016501670169016B016D016F0171017301750177017A017C017E-0180018301850188018C018D019201950199-019B019E01A101A301A501A801AA01AB01AD01B001B401B601B901BA01BD-01BF01C601C901CC01CE01D001D201D401D601D801DA01DC01DD01DF01E101E301E501E701E901EB01ED01EF01F001F301F501F901FB01FD01FF02010203020502070209020B020D020F02110213021502170219021B021D021F02210223022502270229022B022D022F02310233-0239023C023F0240024202470249024B024D024F-02930295-02AF037103730377037B-037D039003AC-03CE03D003D103D5-03D703D903DB03DD03DF03E103E303E503E703E903EB03ED03EF-03F303F503F803FB03FC0430-045F04610463046504670469046B046D046F04710473047504770479047B047D047F0481048B048D048F04910493049504970499049B049D049F04A104A304A504A704A904AB04AD04AF04B104B304B504B704B904BB04BD04BF04C204C404C604C804CA04CC04CE04CF04D104D304D504D704D904DB04DD04DF04E104E304E504E704E904EB04ED04EF04F104F304F504F704F904FB04FD04FF05010503050505070509050B050D050F05110513051505170519051B051D051F05210523052505270561-05871D00-1D2B1D6B-1D771D79-1D9A1E011E031E051E071E091E0B1E0D1E0F1E111E131E151E171E191E1B1E1D1E1F1E211E231E251E271E291E2B1E2D1E2F1E311E331E351E371E391E3B1E3D1E3F1E411E431E451E471E491E4B1E4D1E4F1E511E531E551E571E591E5B1E5D1E5F1E611E631E651E671E691E6B1E6D1E6F1E711E731E751E771E791E7B1E7D1E7F1E811E831E851E871E891E8B1E8D1E8F1E911E931E95-1E9D1E9F1EA11EA31EA51EA71EA91EAB1EAD1EAF1EB11EB31EB51EB71EB91EBB1EBD1EBF1EC11EC31EC51EC71EC91ECB1ECD1ECF1ED11ED31ED51ED71ED91EDB1EDD1EDF1EE11EE31EE51EE71EE91EEB1EED1EEF1EF11EF31EF51EF71EF91EFB1EFD1EFF-1F071F10-1F151F20-1F271F30-1F371F40-1F451F50-1F571F60-1F671F70-1F7D1F80-1F871F90-1F971FA0-1FA71FB0-1FB41FB61FB71FBE1FC2-1FC41FC61FC71FD0-1FD31FD61FD71FE0-1FE71FF2-1FF41FF61FF7210A210E210F2113212F21342139213C213D2146-2149214E21842C30-2C5E2C612C652C662C682C6A2C6C2C712C732C742C76-2C7B2C812C832C852C872C892C8B2C8D2C8F2C912C932C952C972C992C9B2C9D2C9F2CA12CA32CA52CA72CA92CAB2CAD2CAF2CB12CB32CB52CB72CB92CBB2CBD2CBF2CC12CC32CC52CC72CC92CCB2CCD2CCF2CD12CD32CD52CD72CD92CDB2CDD2CDF2CE12CE32CE42CEC2CEE2CF32D00-2D252D272D2DA641A643A645A647A649A64BA64DA64FA651A653A655A657A659A65BA65DA65FA661A663A665A667A669A66BA66DA681A683A685A687A689A68BA68DA68FA691A693A695A697A723A725A727A729A72BA72DA72F-A731A733A735A737A739A73BA73DA73FA741A743A745A747A749A74BA74DA74FA751A753A755A757A759A75BA75DA75FA761A763A765A767A769A76BA76DA76FA771-A778A77AA77CA77FA781A783A785A787A78CA78EA791A793A7A1A7A3A7A5A7A7A7A9A7FAFB00-FB06FB13-FB17FF41-FF5A", Lu: "0041-005A00C0-00D600D8-00DE01000102010401060108010A010C010E01100112011401160118011A011C011E01200122012401260128012A012C012E01300132013401360139013B013D013F0141014301450147014A014C014E01500152015401560158015A015C015E01600162016401660168016A016C016E017001720174017601780179017B017D018101820184018601870189-018B018E-0191019301940196-0198019C019D019F01A001A201A401A601A701A901AC01AE01AF01B1-01B301B501B701B801BC01C401C701CA01CD01CF01D101D301D501D701D901DB01DE01E001E201E401E601E801EA01EC01EE01F101F401F6-01F801FA01FC01FE02000202020402060208020A020C020E02100212021402160218021A021C021E02200222022402260228022A022C022E02300232023A023B023D023E02410243-02460248024A024C024E03700372037603860388-038A038C038E038F0391-03A103A3-03AB03CF03D2-03D403D803DA03DC03DE03E003E203E403E603E803EA03EC03EE03F403F703F903FA03FD-042F04600462046404660468046A046C046E04700472047404760478047A047C047E0480048A048C048E04900492049404960498049A049C049E04A004A204A404A604A804AA04AC04AE04B004B204B404B604B804BA04BC04BE04C004C104C304C504C704C904CB04CD04D004D204D404D604D804DA04DC04DE04E004E204E404E604E804EA04EC04EE04F004F204F404F604F804FA04FC04FE05000502050405060508050A050C050E05100512051405160518051A051C051E05200522052405260531-055610A0-10C510C710CD1E001E021E041E061E081E0A1E0C1E0E1E101E121E141E161E181E1A1E1C1E1E1E201E221E241E261E281E2A1E2C1E2E1E301E321E341E361E381E3A1E3C1E3E1E401E421E441E461E481E4A1E4C1E4E1E501E521E541E561E581E5A1E5C1E5E1E601E621E641E661E681E6A1E6C1E6E1E701E721E741E761E781E7A1E7C1E7E1E801E821E841E861E881E8A1E8C1E8E1E901E921E941E9E1EA01EA21EA41EA61EA81EAA1EAC1EAE1EB01EB21EB41EB61EB81EBA1EBC1EBE1EC01EC21EC41EC61EC81ECA1ECC1ECE1ED01ED21ED41ED61ED81EDA1EDC1EDE1EE01EE21EE41EE61EE81EEA1EEC1EEE1EF01EF21EF41EF61EF81EFA1EFC1EFE1F08-1F0F1F18-1F1D1F28-1F2F1F38-1F3F1F48-1F4D1F591F5B1F5D1F5F1F68-1F6F1FB8-1FBB1FC8-1FCB1FD8-1FDB1FE8-1FEC1FF8-1FFB21022107210B-210D2110-211221152119-211D212421262128212A-212D2130-2133213E213F214521832C00-2C2E2C602C62-2C642C672C692C6B2C6D-2C702C722C752C7E-2C802C822C842C862C882C8A2C8C2C8E2C902C922C942C962C982C9A2C9C2C9E2CA02CA22CA42CA62CA82CAA2CAC2CAE2CB02CB22CB42CB62CB82CBA2CBC2CBE2CC02CC22CC42CC62CC82CCA2CCC2CCE2CD02CD22CD42CD62CD82CDA2CDC2CDE2CE02CE22CEB2CED2CF2A640A642A644A646A648A64AA64CA64EA650A652A654A656A658A65AA65CA65EA660A662A664A666A668A66AA66CA680A682A684A686A688A68AA68CA68EA690A692A694A696A722A724A726A728A72AA72CA72EA732A734A736A738A73AA73CA73EA740A742A744A746A748A74AA74CA74EA750A752A754A756A758A75AA75CA75EA760A762A764A766A768A76AA76CA76EA779A77BA77DA77EA780A782A784A786A78BA78DA790A792A7A0A7A2A7A4A7A6A7A8A7AAFF21-FF3A", Lt: "01C501C801CB01F21F88-1F8F1F98-1F9F1FA8-1FAF1FBC1FCC1FFC", Lm: "02B0-02C102C6-02D102E0-02E402EC02EE0374037A0559064006E506E607F407F507FA081A0824082809710E460EC610FC17D718431AA71C78-1C7D1D2C-1D6A1D781D9B-1DBF2071207F2090-209C2C7C2C7D2D6F2E2F30053031-3035303B309D309E30FC-30FEA015A4F8-A4FDA60CA67FA717-A71FA770A788A7F8A7F9A9CFAA70AADDAAF3AAF4FF70FF9EFF9F", Lo: "00AA00BA01BB01C0-01C3029405D0-05EA05F0-05F20620-063F0641-064A066E066F0671-06D306D506EE06EF06FA-06FC06FF07100712-072F074D-07A507B107CA-07EA0800-08150840-085808A008A2-08AC0904-0939093D09500958-09610972-09770979-097F0985-098C098F09900993-09A809AA-09B009B209B6-09B909BD09CE09DC09DD09DF-09E109F009F10A05-0A0A0A0F0A100A13-0A280A2A-0A300A320A330A350A360A380A390A59-0A5C0A5E0A72-0A740A85-0A8D0A8F-0A910A93-0AA80AAA-0AB00AB20AB30AB5-0AB90ABD0AD00AE00AE10B05-0B0C0B0F0B100B13-0B280B2A-0B300B320B330B35-0B390B3D0B5C0B5D0B5F-0B610B710B830B85-0B8A0B8E-0B900B92-0B950B990B9A0B9C0B9E0B9F0BA30BA40BA8-0BAA0BAE-0BB90BD00C05-0C0C0C0E-0C100C12-0C280C2A-0C330C35-0C390C3D0C580C590C600C610C85-0C8C0C8E-0C900C92-0CA80CAA-0CB30CB5-0CB90CBD0CDE0CE00CE10CF10CF20D05-0D0C0D0E-0D100D12-0D3A0D3D0D4E0D600D610D7A-0D7F0D85-0D960D9A-0DB10DB3-0DBB0DBD0DC0-0DC60E01-0E300E320E330E40-0E450E810E820E840E870E880E8A0E8D0E94-0E970E99-0E9F0EA1-0EA30EA50EA70EAA0EAB0EAD-0EB00EB20EB30EBD0EC0-0EC40EDC-0EDF0F000F40-0F470F49-0F6C0F88-0F8C1000-102A103F1050-1055105A-105D106110651066106E-10701075-1081108E10D0-10FA10FD-1248124A-124D1250-12561258125A-125D1260-1288128A-128D1290-12B012B2-12B512B8-12BE12C012C2-12C512C8-12D612D8-13101312-13151318-135A1380-138F13A0-13F41401-166C166F-167F1681-169A16A0-16EA1700-170C170E-17111720-17311740-17511760-176C176E-17701780-17B317DC1820-18421844-18771880-18A818AA18B0-18F51900-191C1950-196D1970-19741980-19AB19C1-19C71A00-1A161A20-1A541B05-1B331B45-1B4B1B83-1BA01BAE1BAF1BBA-1BE51C00-1C231C4D-1C4F1C5A-1C771CE9-1CEC1CEE-1CF11CF51CF62135-21382D30-2D672D80-2D962DA0-2DA62DA8-2DAE2DB0-2DB62DB8-2DBE2DC0-2DC62DC8-2DCE2DD0-2DD62DD8-2DDE3006303C3041-3096309F30A1-30FA30FF3105-312D3131-318E31A0-31BA31F0-31FF3400-4DB54E00-9FCCA000-A014A016-A48CA4D0-A4F7A500-A60BA610-A61FA62AA62BA66EA6A0-A6E5A7FB-A801A803-A805A807-A80AA80C-A822A840-A873A882-A8B3A8F2-A8F7A8FBA90A-A925A930-A946A960-A97CA984-A9B2AA00-AA28AA40-AA42AA44-AA4BAA60-AA6FAA71-AA76AA7AAA80-AAAFAAB1AAB5AAB6AAB9-AABDAAC0AAC2AADBAADCAAE0-AAEAAAF2AB01-AB06AB09-AB0EAB11-AB16AB20-AB26AB28-AB2EABC0-ABE2AC00-D7A3D7B0-D7C6D7CB-D7FBF900-FA6DFA70-FAD9FB1DFB1F-FB28FB2A-FB36FB38-FB3CFB3EFB40FB41FB43FB44FB46-FBB1FBD3-FD3DFD50-FD8FFD92-FDC7FDF0-FDFBFE70-FE74FE76-FEFCFF66-FF6FFF71-FF9DFFA0-FFBEFFC2-FFC7FFCA-FFCFFFD2-FFD7FFDA-FFDC", M: "0300-036F0483-04890591-05BD05BF05C105C205C405C505C70610-061A064B-065F067006D6-06DC06DF-06E406E706E806EA-06ED07110730-074A07A6-07B007EB-07F30816-0819081B-08230825-08270829-082D0859-085B08E4-08FE0900-0903093A-093C093E-094F0951-0957096209630981-098309BC09BE-09C409C709C809CB-09CD09D709E209E30A01-0A030A3C0A3E-0A420A470A480A4B-0A4D0A510A700A710A750A81-0A830ABC0ABE-0AC50AC7-0AC90ACB-0ACD0AE20AE30B01-0B030B3C0B3E-0B440B470B480B4B-0B4D0B560B570B620B630B820BBE-0BC20BC6-0BC80BCA-0BCD0BD70C01-0C030C3E-0C440C46-0C480C4A-0C4D0C550C560C620C630C820C830CBC0CBE-0CC40CC6-0CC80CCA-0CCD0CD50CD60CE20CE30D020D030D3E-0D440D46-0D480D4A-0D4D0D570D620D630D820D830DCA0DCF-0DD40DD60DD8-0DDF0DF20DF30E310E34-0E3A0E47-0E4E0EB10EB4-0EB90EBB0EBC0EC8-0ECD0F180F190F350F370F390F3E0F3F0F71-0F840F860F870F8D-0F970F99-0FBC0FC6102B-103E1056-1059105E-10601062-10641067-106D1071-10741082-108D108F109A-109D135D-135F1712-17141732-1734175217531772177317B4-17D317DD180B-180D18A91920-192B1930-193B19B0-19C019C819C91A17-1A1B1A55-1A5E1A60-1A7C1A7F1B00-1B041B34-1B441B6B-1B731B80-1B821BA1-1BAD1BE6-1BF31C24-1C371CD0-1CD21CD4-1CE81CED1CF2-1CF41DC0-1DE61DFC-1DFF20D0-20F02CEF-2CF12D7F2DE0-2DFF302A-302F3099309AA66F-A672A674-A67DA69FA6F0A6F1A802A806A80BA823-A827A880A881A8B4-A8C4A8E0-A8F1A926-A92DA947-A953A980-A983A9B3-A9C0AA29-AA36AA43AA4CAA4DAA7BAAB0AAB2-AAB4AAB7AAB8AABEAABFAAC1AAEB-AAEFAAF5AAF6ABE3-ABEAABECABEDFB1EFE00-FE0FFE20-FE26", Mn: "0300-036F0483-04870591-05BD05BF05C105C205C405C505C70610-061A064B-065F067006D6-06DC06DF-06E406E706E806EA-06ED07110730-074A07A6-07B007EB-07F30816-0819081B-08230825-08270829-082D0859-085B08E4-08FE0900-0902093A093C0941-0948094D0951-095709620963098109BC09C1-09C409CD09E209E30A010A020A3C0A410A420A470A480A4B-0A4D0A510A700A710A750A810A820ABC0AC1-0AC50AC70AC80ACD0AE20AE30B010B3C0B3F0B41-0B440B4D0B560B620B630B820BC00BCD0C3E-0C400C46-0C480C4A-0C4D0C550C560C620C630CBC0CBF0CC60CCC0CCD0CE20CE30D41-0D440D4D0D620D630DCA0DD2-0DD40DD60E310E34-0E3A0E47-0E4E0EB10EB4-0EB90EBB0EBC0EC8-0ECD0F180F190F350F370F390F71-0F7E0F80-0F840F860F870F8D-0F970F99-0FBC0FC6102D-10301032-10371039103A103D103E10581059105E-10601071-1074108210851086108D109D135D-135F1712-17141732-1734175217531772177317B417B517B7-17BD17C617C9-17D317DD180B-180D18A91920-19221927192819321939-193B1A171A181A561A58-1A5E1A601A621A65-1A6C1A73-1A7C1A7F1B00-1B031B341B36-1B3A1B3C1B421B6B-1B731B801B811BA2-1BA51BA81BA91BAB1BE61BE81BE91BED1BEF-1BF11C2C-1C331C361C371CD0-1CD21CD4-1CE01CE2-1CE81CED1CF41DC0-1DE61DFC-1DFF20D0-20DC20E120E5-20F02CEF-2CF12D7F2DE0-2DFF302A-302D3099309AA66FA674-A67DA69FA6F0A6F1A802A806A80BA825A826A8C4A8E0-A8F1A926-A92DA947-A951A980-A982A9B3A9B6-A9B9A9BCAA29-AA2EAA31AA32AA35AA36AA43AA4CAAB0AAB2-AAB4AAB7AAB8AABEAABFAAC1AAECAAEDAAF6ABE5ABE8ABEDFB1EFE00-FE0FFE20-FE26", Mc: "0903093B093E-09400949-094C094E094F0982098309BE-09C009C709C809CB09CC09D70A030A3E-0A400A830ABE-0AC00AC90ACB0ACC0B020B030B3E0B400B470B480B4B0B4C0B570BBE0BBF0BC10BC20BC6-0BC80BCA-0BCC0BD70C01-0C030C41-0C440C820C830CBE0CC0-0CC40CC70CC80CCA0CCB0CD50CD60D020D030D3E-0D400D46-0D480D4A-0D4C0D570D820D830DCF-0DD10DD8-0DDF0DF20DF30F3E0F3F0F7F102B102C10311038103B103C105610571062-10641067-106D108310841087-108C108F109A-109C17B617BE-17C517C717C81923-19261929-192B193019311933-193819B0-19C019C819C91A19-1A1B1A551A571A611A631A641A6D-1A721B041B351B3B1B3D-1B411B431B441B821BA11BA61BA71BAA1BAC1BAD1BE71BEA-1BEC1BEE1BF21BF31C24-1C2B1C341C351CE11CF21CF3302E302FA823A824A827A880A881A8B4-A8C3A952A953A983A9B4A9B5A9BAA9BBA9BD-A9C0AA2FAA30AA33AA34AA4DAA7BAAEBAAEEAAEFAAF5ABE3ABE4ABE6ABE7ABE9ABEAABEC", Me: "0488048920DD-20E020E2-20E4A670-A672", N: "0030-003900B200B300B900BC-00BE0660-066906F0-06F907C0-07C90966-096F09E6-09EF09F4-09F90A66-0A6F0AE6-0AEF0B66-0B6F0B72-0B770BE6-0BF20C66-0C6F0C78-0C7E0CE6-0CEF0D66-0D750E50-0E590ED0-0ED90F20-0F331040-10491090-10991369-137C16EE-16F017E0-17E917F0-17F91810-18191946-194F19D0-19DA1A80-1A891A90-1A991B50-1B591BB0-1BB91C40-1C491C50-1C5920702074-20792080-20892150-21822185-21892460-249B24EA-24FF2776-27932CFD30073021-30293038-303A3192-31953220-32293248-324F3251-325F3280-328932B1-32BFA620-A629A6E6-A6EFA830-A835A8D0-A8D9A900-A909A9D0-A9D9AA50-AA59ABF0-ABF9FF10-FF19", Nd: "0030-00390660-066906F0-06F907C0-07C90966-096F09E6-09EF0A66-0A6F0AE6-0AEF0B66-0B6F0BE6-0BEF0C66-0C6F0CE6-0CEF0D66-0D6F0E50-0E590ED0-0ED90F20-0F291040-10491090-109917E0-17E91810-18191946-194F19D0-19D91A80-1A891A90-1A991B50-1B591BB0-1BB91C40-1C491C50-1C59A620-A629A8D0-A8D9A900-A909A9D0-A9D9AA50-AA59ABF0-ABF9FF10-FF19", Nl: "16EE-16F02160-21822185-218830073021-30293038-303AA6E6-A6EF", No: "00B200B300B900BC-00BE09F4-09F90B72-0B770BF0-0BF20C78-0C7E0D70-0D750F2A-0F331369-137C17F0-17F919DA20702074-20792080-20892150-215F21892460-249B24EA-24FF2776-27932CFD3192-31953220-32293248-324F3251-325F3280-328932B1-32BFA830-A835", P: "0021-00230025-002A002C-002F003A003B003F0040005B-005D005F007B007D00A100A700AB00B600B700BB00BF037E0387055A-055F0589058A05BE05C005C305C605F305F40609060A060C060D061B061E061F066A-066D06D40700-070D07F7-07F90830-083E085E0964096509700AF00DF40E4F0E5A0E5B0F04-0F120F140F3A-0F3D0F850FD0-0FD40FD90FDA104A-104F10FB1360-13681400166D166E169B169C16EB-16ED1735173617D4-17D617D8-17DA1800-180A194419451A1E1A1F1AA0-1AA61AA8-1AAD1B5A-1B601BFC-1BFF1C3B-1C3F1C7E1C7F1CC0-1CC71CD32010-20272030-20432045-20512053-205E207D207E208D208E2329232A2768-277527C527C627E6-27EF2983-299829D8-29DB29FC29FD2CF9-2CFC2CFE2CFF2D702E00-2E2E2E30-2E3B3001-30033008-30113014-301F3030303D30A030FBA4FEA4FFA60D-A60FA673A67EA6F2-A6F7A874-A877A8CEA8CFA8F8-A8FAA92EA92FA95FA9C1-A9CDA9DEA9DFAA5C-AA5FAADEAADFAAF0AAF1ABEBFD3EFD3FFE10-FE19FE30-FE52FE54-FE61FE63FE68FE6AFE6BFF01-FF03FF05-FF0AFF0C-FF0FFF1AFF1BFF1FFF20FF3B-FF3DFF3FFF5BFF5DFF5F-FF65", Pd: "002D058A05BE140018062010-20152E172E1A2E3A2E3B301C303030A0FE31FE32FE58FE63FF0D", Ps: "0028005B007B0F3A0F3C169B201A201E2045207D208D23292768276A276C276E27702772277427C527E627E827EA27EC27EE2983298529872989298B298D298F299129932995299729D829DA29FC2E222E242E262E283008300A300C300E3010301430163018301A301DFD3EFE17FE35FE37FE39FE3BFE3DFE3FFE41FE43FE47FE59FE5BFE5DFF08FF3BFF5BFF5FFF62", Pe: "0029005D007D0F3B0F3D169C2046207E208E232A2769276B276D276F27712773277527C627E727E927EB27ED27EF298429862988298A298C298E2990299229942996299829D929DB29FD2E232E252E272E293009300B300D300F3011301530173019301B301E301FFD3FFE18FE36FE38FE3AFE3CFE3EFE40FE42FE44FE48FE5AFE5CFE5EFF09FF3DFF5DFF60FF63", Pi: "00AB2018201B201C201F20392E022E042E092E0C2E1C2E20", Pf: "00BB2019201D203A2E032E052E0A2E0D2E1D2E21", Pc: "005F203F20402054FE33FE34FE4D-FE4FFF3F", Po: "0021-00230025-0027002A002C002E002F003A003B003F0040005C00A100A700B600B700BF037E0387055A-055F058905C005C305C605F305F40609060A060C060D061B061E061F066A-066D06D40700-070D07F7-07F90830-083E085E0964096509700AF00DF40E4F0E5A0E5B0F04-0F120F140F850FD0-0FD40FD90FDA104A-104F10FB1360-1368166D166E16EB-16ED1735173617D4-17D617D8-17DA1800-18051807-180A194419451A1E1A1F1AA0-1AA61AA8-1AAD1B5A-1B601BFC-1BFF1C3B-1C3F1C7E1C7F1CC0-1CC71CD3201620172020-20272030-2038203B-203E2041-20432047-205120532055-205E2CF9-2CFC2CFE2CFF2D702E002E012E06-2E082E0B2E0E-2E162E182E192E1B2E1E2E1F2E2A-2E2E2E30-2E393001-3003303D30FBA4FEA4FFA60D-A60FA673A67EA6F2-A6F7A874-A877A8CEA8CFA8F8-A8FAA92EA92FA95FA9C1-A9CDA9DEA9DFAA5C-AA5FAADEAADFAAF0AAF1ABEBFE10-FE16FE19FE30FE45FE46FE49-FE4CFE50-FE52FE54-FE57FE5F-FE61FE68FE6AFE6BFF01-FF03FF05-FF07FF0AFF0CFF0EFF0FFF1AFF1BFF1FFF20FF3CFF61FF64FF65", S: "0024002B003C-003E005E0060007C007E00A2-00A600A800A900AC00AE-00B100B400B800D700F702C2-02C502D2-02DF02E5-02EB02ED02EF-02FF03750384038503F60482058F0606-0608060B060E060F06DE06E906FD06FE07F609F209F309FA09FB0AF10B700BF3-0BFA0C7F0D790E3F0F01-0F030F130F15-0F170F1A-0F1F0F340F360F380FBE-0FC50FC7-0FCC0FCE0FCF0FD5-0FD8109E109F1390-139917DB194019DE-19FF1B61-1B6A1B74-1B7C1FBD1FBF-1FC11FCD-1FCF1FDD-1FDF1FED-1FEF1FFD1FFE20442052207A-207C208A-208C20A0-20B9210021012103-21062108210921142116-2118211E-2123212521272129212E213A213B2140-2144214A-214D214F2190-2328232B-23F32400-24262440-244A249C-24E92500-26FF2701-27672794-27C427C7-27E527F0-29822999-29D729DC-29FB29FE-2B4C2B50-2B592CE5-2CEA2E80-2E992E9B-2EF32F00-2FD52FF0-2FFB300430123013302030363037303E303F309B309C319031913196-319F31C0-31E33200-321E322A-324732503260-327F328A-32B032C0-32FE3300-33FF4DC0-4DFFA490-A4C6A700-A716A720A721A789A78AA828-A82BA836-A839AA77-AA79FB29FBB2-FBC1FDFCFDFDFE62FE64-FE66FE69FF04FF0BFF1C-FF1EFF3EFF40FF5CFF5EFFE0-FFE6FFE8-FFEEFFFCFFFD", Sm: "002B003C-003E007C007E00AC00B100D700F703F60606-060820442052207A-207C208A-208C21182140-2144214B2190-2194219A219B21A021A321A621AE21CE21CF21D221D421F4-22FF2308-230B23202321237C239B-23B323DC-23E125B725C125F8-25FF266F27C0-27C427C7-27E527F0-27FF2900-29822999-29D729DC-29FB29FE-2AFF2B30-2B442B47-2B4CFB29FE62FE64-FE66FF0BFF1C-FF1EFF5CFF5EFFE2FFE9-FFEC", Sc: "002400A2-00A5058F060B09F209F309FB0AF10BF90E3F17DB20A0-20B9A838FDFCFE69FF04FFE0FFE1FFE5FFE6", Sk: "005E006000A800AF00B400B802C2-02C502D2-02DF02E5-02EB02ED02EF-02FF0375038403851FBD1FBF-1FC11FCD-1FCF1FDD-1FDF1FED-1FEF1FFD1FFE309B309CA700-A716A720A721A789A78AFBB2-FBC1FF3EFF40FFE3", So: "00A600A900AE00B00482060E060F06DE06E906FD06FE07F609FA0B700BF3-0BF80BFA0C7F0D790F01-0F030F130F15-0F170F1A-0F1F0F340F360F380FBE-0FC50FC7-0FCC0FCE0FCF0FD5-0FD8109E109F1390-1399194019DE-19FF1B61-1B6A1B74-1B7C210021012103-210621082109211421162117211E-2123212521272129212E213A213B214A214C214D214F2195-2199219C-219F21A121A221A421A521A7-21AD21AF-21CD21D021D121D321D5-21F32300-2307230C-231F2322-2328232B-237B237D-239A23B4-23DB23E2-23F32400-24262440-244A249C-24E92500-25B625B8-25C025C2-25F72600-266E2670-26FF2701-27672794-27BF2800-28FF2B00-2B2F2B452B462B50-2B592CE5-2CEA2E80-2E992E9B-2EF32F00-2FD52FF0-2FFB300430123013302030363037303E303F319031913196-319F31C0-31E33200-321E322A-324732503260-327F328A-32B032C0-32FE3300-33FF4DC0-4DFFA490-A4C6A828-A82BA836A837A839AA77-AA79FDFDFFE4FFE8FFEDFFEEFFFCFFFD", Z: "002000A01680180E2000-200A20282029202F205F3000", Zs: "002000A01680180E2000-200A202F205F3000", Zl: "2028", Zp: "2029", C: "0000-001F007F-009F00AD03780379037F-0383038B038D03A20528-05300557055805600588058B-058E059005C8-05CF05EB-05EF05F5-0605061C061D06DD070E070F074B074C07B2-07BF07FB-07FF082E082F083F085C085D085F-089F08A108AD-08E308FF097809800984098D098E0991099209A909B109B3-09B509BA09BB09C509C609C909CA09CF-09D609D8-09DB09DE09E409E509FC-0A000A040A0B-0A0E0A110A120A290A310A340A370A3A0A3B0A3D0A43-0A460A490A4A0A4E-0A500A52-0A580A5D0A5F-0A650A76-0A800A840A8E0A920AA90AB10AB40ABA0ABB0AC60ACA0ACE0ACF0AD1-0ADF0AE40AE50AF2-0B000B040B0D0B0E0B110B120B290B310B340B3A0B3B0B450B460B490B4A0B4E-0B550B58-0B5B0B5E0B640B650B78-0B810B840B8B-0B8D0B910B96-0B980B9B0B9D0BA0-0BA20BA5-0BA70BAB-0BAD0BBA-0BBD0BC3-0BC50BC90BCE0BCF0BD1-0BD60BD8-0BE50BFB-0C000C040C0D0C110C290C340C3A-0C3C0C450C490C4E-0C540C570C5A-0C5F0C640C650C70-0C770C800C810C840C8D0C910CA90CB40CBA0CBB0CC50CC90CCE-0CD40CD7-0CDD0CDF0CE40CE50CF00CF3-0D010D040D0D0D110D3B0D3C0D450D490D4F-0D560D58-0D5F0D640D650D76-0D780D800D810D840D97-0D990DB20DBC0DBE0DBF0DC7-0DC90DCB-0DCE0DD50DD70DE0-0DF10DF5-0E000E3B-0E3E0E5C-0E800E830E850E860E890E8B0E8C0E8E-0E930E980EA00EA40EA60EA80EA90EAC0EBA0EBE0EBF0EC50EC70ECE0ECF0EDA0EDB0EE0-0EFF0F480F6D-0F700F980FBD0FCD0FDB-0FFF10C610C8-10CC10CE10CF1249124E124F12571259125E125F1289128E128F12B112B612B712BF12C112C612C712D7131113161317135B135C137D-137F139A-139F13F5-13FF169D-169F16F1-16FF170D1715-171F1737-173F1754-175F176D17711774-177F17DE17DF17EA-17EF17FA-17FF180F181A-181F1878-187F18AB-18AF18F6-18FF191D-191F192C-192F193C-193F1941-1943196E196F1975-197F19AC-19AF19CA-19CF19DB-19DD1A1C1A1D1A5F1A7D1A7E1A8A-1A8F1A9A-1A9F1AAE-1AFF1B4C-1B4F1B7D-1B7F1BF4-1BFB1C38-1C3A1C4A-1C4C1C80-1CBF1CC8-1CCF1CF7-1CFF1DE7-1DFB1F161F171F1E1F1F1F461F471F4E1F4F1F581F5A1F5C1F5E1F7E1F7F1FB51FC51FD41FD51FDC1FF01FF11FF51FFF200B-200F202A-202E2060-206F20722073208F209D-209F20BA-20CF20F1-20FF218A-218F23F4-23FF2427-243F244B-245F27002B4D-2B4F2B5A-2BFF2C2F2C5F2CF4-2CF82D262D28-2D2C2D2E2D2F2D68-2D6E2D71-2D7E2D97-2D9F2DA72DAF2DB72DBF2DC72DCF2DD72DDF2E3C-2E7F2E9A2EF4-2EFF2FD6-2FEF2FFC-2FFF3040309730983100-3104312E-3130318F31BB-31BF31E4-31EF321F32FF4DB6-4DBF9FCD-9FFFA48D-A48FA4C7-A4CFA62C-A63FA698-A69EA6F8-A6FFA78FA794-A79FA7AB-A7F7A82C-A82FA83A-A83FA878-A87FA8C5-A8CDA8DA-A8DFA8FC-A8FFA954-A95EA97D-A97FA9CEA9DA-A9DDA9E0-A9FFAA37-AA3FAA4EAA4FAA5AAA5BAA7C-AA7FAAC3-AADAAAF7-AB00AB07AB08AB0FAB10AB17-AB1FAB27AB2F-ABBFABEEABEFABFA-ABFFD7A4-D7AFD7C7-D7CAD7FC-F8FFFA6EFA6FFADA-FAFFFB07-FB12FB18-FB1CFB37FB3DFB3FFB42FB45FBC2-FBD2FD40-FD4FFD90FD91FDC8-FDEFFDFEFDFFFE1A-FE1FFE27-FE2FFE53FE67FE6C-FE6FFE75FEFD-FF00FFBF-FFC1FFC8FFC9FFD0FFD1FFD8FFD9FFDD-FFDFFFE7FFEF-FFFBFFFEFFFF", Cc: "0000-001F007F-009F", Cf: "00AD0600-060406DD070F200B-200F202A-202E2060-2064206A-206FFEFFFFF9-FFFB", Co: "E000-F8FF", Cs: "D800-DFFF", Cn: "03780379037F-0383038B038D03A20528-05300557055805600588058B-058E059005C8-05CF05EB-05EF05F5-05FF0605061C061D070E074B074C07B2-07BF07FB-07FF082E082F083F085C085D085F-089F08A108AD-08E308FF097809800984098D098E0991099209A909B109B3-09B509BA09BB09C509C609C909CA09CF-09D609D8-09DB09DE09E409E509FC-0A000A040A0B-0A0E0A110A120A290A310A340A370A3A0A3B0A3D0A43-0A460A490A4A0A4E-0A500A52-0A580A5D0A5F-0A650A76-0A800A840A8E0A920AA90AB10AB40ABA0ABB0AC60ACA0ACE0ACF0AD1-0ADF0AE40AE50AF2-0B000B040B0D0B0E0B110B120B290B310B340B3A0B3B0B450B460B490B4A0B4E-0B550B58-0B5B0B5E0B640B650B78-0B810B840B8B-0B8D0B910B96-0B980B9B0B9D0BA0-0BA20BA5-0BA70BAB-0BAD0BBA-0BBD0BC3-0BC50BC90BCE0BCF0BD1-0BD60BD8-0BE50BFB-0C000C040C0D0C110C290C340C3A-0C3C0C450C490C4E-0C540C570C5A-0C5F0C640C650C70-0C770C800C810C840C8D0C910CA90CB40CBA0CBB0CC50CC90CCE-0CD40CD7-0CDD0CDF0CE40CE50CF00CF3-0D010D040D0D0D110D3B0D3C0D450D490D4F-0D560D58-0D5F0D640D650D76-0D780D800D810D840D97-0D990DB20DBC0DBE0DBF0DC7-0DC90DCB-0DCE0DD50DD70DE0-0DF10DF5-0E000E3B-0E3E0E5C-0E800E830E850E860E890E8B0E8C0E8E-0E930E980EA00EA40EA60EA80EA90EAC0EBA0EBE0EBF0EC50EC70ECE0ECF0EDA0EDB0EE0-0EFF0F480F6D-0F700F980FBD0FCD0FDB-0FFF10C610C8-10CC10CE10CF1249124E124F12571259125E125F1289128E128F12B112B612B712BF12C112C612C712D7131113161317135B135C137D-137F139A-139F13F5-13FF169D-169F16F1-16FF170D1715-171F1737-173F1754-175F176D17711774-177F17DE17DF17EA-17EF17FA-17FF180F181A-181F1878-187F18AB-18AF18F6-18FF191D-191F192C-192F193C-193F1941-1943196E196F1975-197F19AC-19AF19CA-19CF19DB-19DD1A1C1A1D1A5F1A7D1A7E1A8A-1A8F1A9A-1A9F1AAE-1AFF1B4C-1B4F1B7D-1B7F1BF4-1BFB1C38-1C3A1C4A-1C4C1C80-1CBF1CC8-1CCF1CF7-1CFF1DE7-1DFB1F161F171F1E1F1F1F461F471F4E1F4F1F581F5A1F5C1F5E1F7E1F7F1FB51FC51FD41FD51FDC1FF01FF11FF51FFF2065-206920722073208F209D-209F20BA-20CF20F1-20FF218A-218F23F4-23FF2427-243F244B-245F27002B4D-2B4F2B5A-2BFF2C2F2C5F2CF4-2CF82D262D28-2D2C2D2E2D2F2D68-2D6E2D71-2D7E2D97-2D9F2DA72DAF2DB72DBF2DC72DCF2DD72DDF2E3C-2E7F2E9A2EF4-2EFF2FD6-2FEF2FFC-2FFF3040309730983100-3104312E-3130318F31BB-31BF31E4-31EF321F32FF4DB6-4DBF9FCD-9FFFA48D-A48FA4C7-A4CFA62C-A63FA698-A69EA6F8-A6FFA78FA794-A79FA7AB-A7F7A82C-A82FA83A-A83FA878-A87FA8C5-A8CDA8DA-A8DFA8FC-A8FFA954-A95EA97D-A97FA9CEA9DA-A9DDA9E0-A9FFAA37-AA3FAA4EAA4FAA5AAA5BAA7C-AA7FAAC3-AADAAAF7-AB00AB07AB08AB0FAB10AB17-AB1FAB27AB2F-ABBFABEEABEFABFA-ABFFD7A4-D7AFD7C7-D7CAD7FC-D7FFFA6EFA6FFADA-FAFFFB07-FB12FB18-FB1CFB37FB3DFB3FFB42FB45FBC2-FBD2FD40-FD4FFD90FD91FDC8-FDEFFDFEFDFFFE1A-FE1FFE27-FE2FFE53FE67FE6C-FE6FFE75FEFDFEFEFF00FFBF-FFC1FFC8FFC9FFD0FFD1FFD8FFD9FFDD-FFDFFFE7FFEF-FFF8FFFEFFFF" }, { //L: "Letter", // Included in the Unicode Base addon Ll: "Lowercase_Letter", Lu: "Uppercase_Letter", Lt: "Titlecase_Letter", Lm: "Modifier_Letter", Lo: "Other_Letter", M: "Mark", Mn: "Nonspacing_Mark", Mc: "Spacing_Mark", Me: "Enclosing_Mark", N: "Number", Nd: "Decimal_Number", Nl: "Letter_Number", No: "Other_Number", P: "Punctuation", Pd: "Dash_Punctuation", Ps: "Open_Punctuation", Pe: "Close_Punctuation", Pi: "Initial_Punctuation", Pf: "Final_Punctuation", Pc: "Connector_Punctuation", Po: "Other_Punctuation", S: "Symbol", Sm: "Math_Symbol", Sc: "Currency_Symbol", Sk: "Modifier_Symbol", So: "Other_Symbol", Z: "Separator", Zs: "Space_Separator", Zl: "Line_Separator", Zp: "Paragraph_Separator", C: "Other", Cc: "Control", Cf: "Format", Co: "Private_Use", Cs: "Surrogate", Cn: "Unassigned" }); }(XRegExp)); /***** unicode-scripts.js *****/ /*! * XRegExp Unicode Scripts v1.2.0 * (c) 2010-2012 Steven Levithan <http://xregexp.com/> * MIT License * Uses Unicode 6.1 <http://unicode.org/> */ /** * Adds support for all Unicode scripts in the Basic Multilingual Plane (U+0000-U+FFFF). * E.g., `\p{Latin}`. Token names are case insensitive, and any spaces, hyphens, and underscores * are ignored. * @requires XRegExp, XRegExp Unicode Base */ (function (XRegExp) { "use strict"; if (!XRegExp.addUnicodePackage) { throw new ReferenceError("Unicode Base must be loaded before Unicode Scripts"); } XRegExp.install("extensibility"); XRegExp.addUnicodePackage({ Arabic: "0600-06040606-060B060D-061A061E0620-063F0641-064A0656-065E066A-066F0671-06DC06DE-06FF0750-077F08A008A2-08AC08E4-08FEFB50-FBC1FBD3-FD3DFD50-FD8FFD92-FDC7FDF0-FDFCFE70-FE74FE76-FEFC", Armenian: "0531-05560559-055F0561-0587058A058FFB13-FB17", Balinese: "1B00-1B4B1B50-1B7C", Bamum: "A6A0-A6F7", Batak: "1BC0-1BF31BFC-1BFF", Bengali: "0981-09830985-098C098F09900993-09A809AA-09B009B209B6-09B909BC-09C409C709C809CB-09CE09D709DC09DD09DF-09E309E6-09FB", Bopomofo: "02EA02EB3105-312D31A0-31BA", Braille: "2800-28FF", Buginese: "1A00-1A1B1A1E1A1F", Buhid: "1740-1753", Canadian_Aboriginal: "1400-167F18B0-18F5", Cham: "AA00-AA36AA40-AA4DAA50-AA59AA5C-AA5F", Cherokee: "13A0-13F4", Common: "0000-0040005B-0060007B-00A900AB-00B900BB-00BF00D700F702B9-02DF02E5-02E902EC-02FF0374037E038503870589060C061B061F06400660-066906DD096409650E3F0FD5-0FD810FB16EB-16ED173517361802180318051CD31CE11CE9-1CEC1CEE-1CF31CF51CF62000-200B200E-2064206A-20702074-207E2080-208E20A0-20B92100-21252127-2129212C-21312133-214D214F-215F21892190-23F32400-24262440-244A2460-26FF2701-27FF2900-2B4C2B50-2B592E00-2E3B2FF0-2FFB3000-300430063008-30203030-3037303C-303F309B309C30A030FB30FC3190-319F31C0-31E33220-325F327F-32CF3358-33FF4DC0-4DFFA700-A721A788-A78AA830-A839FD3EFD3FFDFDFE10-FE19FE30-FE52FE54-FE66FE68-FE6BFEFFFF01-FF20FF3B-FF40FF5B-FF65FF70FF9EFF9FFFE0-FFE6FFE8-FFEEFFF9-FFFD", Coptic: "03E2-03EF2C80-2CF32CF9-2CFF", Cyrillic: "0400-04840487-05271D2B1D782DE0-2DFFA640-A697A69F", Devanagari: "0900-09500953-09630966-09770979-097FA8E0-A8FB", Ethiopic: "1200-1248124A-124D1250-12561258125A-125D1260-1288128A-128D1290-12B012B2-12B512B8-12BE12C012C2-12C512C8-12D612D8-13101312-13151318-135A135D-137C1380-13992D80-2D962DA0-2DA62DA8-2DAE2DB0-2DB62DB8-2DBE2DC0-2DC62DC8-2DCE2DD0-2DD62DD8-2DDEAB01-AB06AB09-AB0EAB11-AB16AB20-AB26AB28-AB2E", Georgian: "10A0-10C510C710CD10D0-10FA10FC-10FF2D00-2D252D272D2D", Glagolitic: "2C00-2C2E2C30-2C5E", Greek: "0370-03730375-0377037A-037D038403860388-038A038C038E-03A103A3-03E103F0-03FF1D26-1D2A1D5D-1D611D66-1D6A1DBF1F00-1F151F18-1F1D1F20-1F451F48-1F4D1F50-1F571F591F5B1F5D1F5F-1F7D1F80-1FB41FB6-1FC41FC6-1FD31FD6-1FDB1FDD-1FEF1FF2-1FF41FF6-1FFE2126", Gujarati: "0A81-0A830A85-0A8D0A8F-0A910A93-0AA80AAA-0AB00AB20AB30AB5-0AB90ABC-0AC50AC7-0AC90ACB-0ACD0AD00AE0-0AE30AE6-0AF1", Gurmukhi: "0A01-0A030A05-0A0A0A0F0A100A13-0A280A2A-0A300A320A330A350A360A380A390A3C0A3E-0A420A470A480A4B-0A4D0A510A59-0A5C0A5E0A66-0A75", Han: "2E80-2E992E9B-2EF32F00-2FD5300530073021-30293038-303B3400-4DB54E00-9FCCF900-FA6DFA70-FAD9", Hangul: "1100-11FF302E302F3131-318E3200-321E3260-327EA960-A97CAC00-D7A3D7B0-D7C6D7CB-D7FBFFA0-FFBEFFC2-FFC7FFCA-FFCFFFD2-FFD7FFDA-FFDC", Hanunoo: "1720-1734", Hebrew: "0591-05C705D0-05EA05F0-05F4FB1D-FB36FB38-FB3CFB3EFB40FB41FB43FB44FB46-FB4F", Hiragana: "3041-3096309D-309F", Inherited: "0300-036F04850486064B-0655065F0670095109521CD0-1CD21CD4-1CE01CE2-1CE81CED1CF41DC0-1DE61DFC-1DFF200C200D20D0-20F0302A-302D3099309AFE00-FE0FFE20-FE26", Javanese: "A980-A9CDA9CF-A9D9A9DEA9DF", Kannada: "0C820C830C85-0C8C0C8E-0C900C92-0CA80CAA-0CB30CB5-0CB90CBC-0CC40CC6-0CC80CCA-0CCD0CD50CD60CDE0CE0-0CE30CE6-0CEF0CF10CF2", Katakana: "30A1-30FA30FD-30FF31F0-31FF32D0-32FE3300-3357FF66-FF6FFF71-FF9D", Kayah_Li: "A900-A92F", Khmer: "1780-17DD17E0-17E917F0-17F919E0-19FF", Lao: "0E810E820E840E870E880E8A0E8D0E94-0E970E99-0E9F0EA1-0EA30EA50EA70EAA0EAB0EAD-0EB90EBB-0EBD0EC0-0EC40EC60EC8-0ECD0ED0-0ED90EDC-0EDF", Latin: "0041-005A0061-007A00AA00BA00C0-00D600D8-00F600F8-02B802E0-02E41D00-1D251D2C-1D5C1D62-1D651D6B-1D771D79-1DBE1E00-1EFF2071207F2090-209C212A212B2132214E2160-21882C60-2C7FA722-A787A78B-A78EA790-A793A7A0-A7AAA7F8-A7FFFB00-FB06FF21-FF3AFF41-FF5A", Lepcha: "1C00-1C371C3B-1C491C4D-1C4F", Limbu: "1900-191C1920-192B1930-193B19401944-194F", Lisu: "A4D0-A4FF", Malayalam: "0D020D030D05-0D0C0D0E-0D100D12-0D3A0D3D-0D440D46-0D480D4A-0D4E0D570D60-0D630D66-0D750D79-0D7F", Mandaic: "0840-085B085E", Meetei_Mayek: "AAE0-AAF6ABC0-ABEDABF0-ABF9", Mongolian: "1800180118041806-180E1810-18191820-18771880-18AA", Myanmar: "1000-109FAA60-AA7B", New_Tai_Lue: "1980-19AB19B0-19C919D0-19DA19DE19DF", Nko: "07C0-07FA", Ogham: "1680-169C", Ol_Chiki: "1C50-1C7F", Oriya: "0B01-0B030B05-0B0C0B0F0B100B13-0B280B2A-0B300B320B330B35-0B390B3C-0B440B470B480B4B-0B4D0B560B570B5C0B5D0B5F-0B630B66-0B77", Phags_Pa: "A840-A877", Rejang: "A930-A953A95F", Runic: "16A0-16EA16EE-16F0", Samaritan: "0800-082D0830-083E", Saurashtra: "A880-A8C4A8CE-A8D9", Sinhala: "0D820D830D85-0D960D9A-0DB10DB3-0DBB0DBD0DC0-0DC60DCA0DCF-0DD40DD60DD8-0DDF0DF2-0DF4", Sundanese: "1B80-1BBF1CC0-1CC7", Syloti_Nagri: "A800-A82B", Syriac: "0700-070D070F-074A074D-074F", Tagalog: "1700-170C170E-1714", Tagbanwa: "1760-176C176E-177017721773", Tai_Le: "1950-196D1970-1974", Tai_Tham: "1A20-1A5E1A60-1A7C1A7F-1A891A90-1A991AA0-1AAD", Tai_Viet: "AA80-AAC2AADB-AADF", Tamil: "0B820B830B85-0B8A0B8E-0B900B92-0B950B990B9A0B9C0B9E0B9F0BA30BA40BA8-0BAA0BAE-0BB90BBE-0BC20BC6-0BC80BCA-0BCD0BD00BD70BE6-0BFA", Telugu: "0C01-0C030C05-0C0C0C0E-0C100C12-0C280C2A-0C330C35-0C390C3D-0C440C46-0C480C4A-0C4D0C550C560C580C590C60-0C630C66-0C6F0C78-0C7F", Thaana: "0780-07B1", Thai: "0E01-0E3A0E40-0E5B", Tibetan: "0F00-0F470F49-0F6C0F71-0F970F99-0FBC0FBE-0FCC0FCE-0FD40FD90FDA", Tifinagh: "2D30-2D672D6F2D702D7F", Vai: "A500-A62B", Yi: "A000-A48CA490-A4C6" }); }(XRegExp)); /***** unicode-blocks.js *****/ /*! * XRegExp Unicode Blocks v1.2.0 * (c) 2010-2012 Steven Levithan <http://xregexp.com/> * MIT License * Uses Unicode 6.1 <http://unicode.org/> */ /** * Adds support for all Unicode blocks in the Basic Multilingual Plane (U+0000-U+FFFF). Unicode * blocks use the prefix "In". E.g., `\p{InBasicLatin}`. Token names are case insensitive, and any * spaces, hyphens, and underscores are ignored. * @requires XRegExp, XRegExp Unicode Base */ (function (XRegExp) { "use strict"; if (!XRegExp.addUnicodePackage) { throw new ReferenceError("Unicode Base must be loaded before Unicode Blocks"); } XRegExp.install("extensibility"); XRegExp.addUnicodePackage({ InBasic_Latin: "0000-007F", InLatin_1_Supplement: "0080-00FF", InLatin_Extended_A: "0100-017F", InLatin_Extended_B: "0180-024F", InIPA_Extensions: "0250-02AF", InSpacing_Modifier_Letters: "02B0-02FF", InCombining_Diacritical_Marks: "0300-036F", InGreek_and_Coptic: "0370-03FF", InCyrillic: "0400-04FF", InCyrillic_Supplement: "0500-052F", InArmenian: "0530-058F", InHebrew: "0590-05FF", InArabic: "0600-06FF", InSyriac: "0700-074F", InArabic_Supplement: "0750-077F", InThaana: "0780-07BF", InNKo: "07C0-07FF", InSamaritan: "0800-083F", InMandaic: "0840-085F", InArabic_Extended_A: "08A0-08FF", InDevanagari: "0900-097F", InBengali: "0980-09FF", InGurmukhi: "0A00-0A7F", InGujarati: "0A80-0AFF", InOriya: "0B00-0B7F", InTamil: "0B80-0BFF", InTelugu: "0C00-0C7F", InKannada: "0C80-0CFF", InMalayalam: "0D00-0D7F", InSinhala: "0D80-0DFF", InThai: "0E00-0E7F", InLao: "0E80-0EFF", InTibetan: "0F00-0FFF", InMyanmar: "1000-109F", InGeorgian: "10A0-10FF", InHangul_Jamo: "1100-11FF", InEthiopic: "1200-137F", InEthiopic_Supplement: "1380-139F", InCherokee: "13A0-13FF", InUnified_Canadian_Aboriginal_Syllabics: "1400-167F", InOgham: "1680-169F", InRunic: "16A0-16FF", InTagalog: "1700-171F", InHanunoo: "1720-173F", InBuhid: "1740-175F", InTagbanwa: "1760-177F", InKhmer: "1780-17FF", InMongolian: "1800-18AF", InUnified_Canadian_Aboriginal_Syllabics_Extended: "18B0-18FF", InLimbu: "1900-194F", InTai_Le: "1950-197F", InNew_Tai_Lue: "1980-19DF", InKhmer_Symbols: "19E0-19FF", InBuginese: "1A00-1A1F", InTai_Tham: "1A20-1AAF", InBalinese: "1B00-1B7F", InSundanese: "1B80-1BBF", InBatak: "1BC0-1BFF", InLepcha: "1C00-1C4F", InOl_Chiki: "1C50-1C7F", InSundanese_Supplement: "1CC0-1CCF", InVedic_Extensions: "1CD0-1CFF", InPhonetic_Extensions: "1D00-1D7F", InPhonetic_Extensions_Supplement: "1D80-1DBF", InCombining_Diacritical_Marks_Supplement: "1DC0-1DFF", InLatin_Extended_Additional: "1E00-1EFF", InGreek_Extended: "1F00-1FFF", InGeneral_Punctuation: "2000-206F", InSuperscripts_and_Subscripts: "2070-209F", InCurrency_Symbols: "20A0-20CF", InCombining_Diacritical_Marks_for_Symbols: "20D0-20FF", InLetterlike_Symbols: "2100-214F", InNumber_Forms: "2150-218F", InArrows: "2190-21FF", InMathematical_Operators: "2200-22FF", InMiscellaneous_Technical: "2300-23FF", InControl_Pictures: "2400-243F", InOptical_Character_Recognition: "2440-245F", InEnclosed_Alphanumerics: "2460-24FF", InBox_Drawing: "2500-257F", InBlock_Elements: "2580-259F", InGeometric_Shapes: "25A0-25FF", InMiscellaneous_Symbols: "2600-26FF", InDingbats: "2700-27BF", InMiscellaneous_Mathematical_Symbols_A: "27C0-27EF", InSupplemental_Arrows_A: "27F0-27FF", InBraille_Patterns: "2800-28FF", InSupplemental_Arrows_B: "2900-297F", InMiscellaneous_Mathematical_Symbols_B: "2980-29FF", InSupplemental_Mathematical_Operators: "2A00-2AFF", InMiscellaneous_Symbols_and_Arrows: "2B00-2BFF", InGlagolitic: "2C00-2C5F", InLatin_Extended_C: "2C60-2C7F", InCoptic: "2C80-2CFF", InGeorgian_Supplement: "2D00-2D2F", InTifinagh: "2D30-2D7F", InEthiopic_Extended: "2D80-2DDF", InCyrillic_Extended_A: "2DE0-2DFF", InSupplemental_Punctuation: "2E00-2E7F", InCJK_Radicals_Supplement: "2E80-2EFF", InKangxi_Radicals: "2F00-2FDF", InIdeographic_Description_Characters: "2FF0-2FFF", InCJK_Symbols_and_Punctuation: "3000-303F", InHiragana: "3040-309F", InKatakana: "30A0-30FF", InBopomofo: "3100-312F", InHangul_Compatibility_Jamo: "3130-318F", InKanbun: "3190-319F", InBopomofo_Extended: "31A0-31BF", InCJK_Strokes: "31C0-31EF", InKatakana_Phonetic_Extensions: "31F0-31FF", InEnclosed_CJK_Letters_and_Months: "3200-32FF", InCJK_Compatibility: "3300-33FF", InCJK_Unified_Ideographs_Extension_A: "3400-4DBF", InYijing_Hexagram_Symbols: "4DC0-4DFF", InCJK_Unified_Ideographs: "4E00-9FFF", InYi_Syllables: "A000-A48F", InYi_Radicals: "A490-A4CF", InLisu: "A4D0-A4FF", InVai: "A500-A63F", InCyrillic_Extended_B: "A640-A69F", InBamum: "A6A0-A6FF", InModifier_Tone_Letters: "A700-A71F", InLatin_Extended_D: "A720-A7FF", InSyloti_Nagri: "A800-A82F", InCommon_Indic_Number_Forms: "A830-A83F", InPhags_pa: "A840-A87F", InSaurashtra: "A880-A8DF", InDevanagari_Extended: "A8E0-A8FF", InKayah_Li: "A900-A92F", InRejang: "A930-A95F", InHangul_Jamo_Extended_A: "A960-A97F", InJavanese: "A980-A9DF", InCham: "AA00-AA5F", InMyanmar_Extended_A: "AA60-AA7F", InTai_Viet: "AA80-AADF", InMeetei_Mayek_Extensions: "AAE0-AAFF", InEthiopic_Extended_A: "AB00-AB2F", InMeetei_Mayek: "ABC0-ABFF", InHangul_Syllables: "AC00-D7AF", InHangul_Jamo_Extended_B: "D7B0-D7FF", InHigh_Surrogates: "D800-DB7F", InHigh_Private_Use_Surrogates: "DB80-DBFF", InLow_Surrogates: "DC00-DFFF", InPrivate_Use_Area: "E000-F8FF", InCJK_Compatibility_Ideographs: "F900-FAFF", InAlphabetic_Presentation_Forms: "FB00-FB4F", InArabic_Presentation_Forms_A: "FB50-FDFF", InVariation_Selectors: "FE00-FE0F", InVertical_Forms: "FE10-FE1F", InCombining_Half_Marks: "FE20-FE2F", InCJK_Compatibility_Forms: "FE30-FE4F", InSmall_Form_Variants: "FE50-FE6F", InArabic_Presentation_Forms_B: "FE70-FEFF", InHalfwidth_and_Fullwidth_Forms: "FF00-FFEF", InSpecials: "FFF0-FFFF" }); }(XRegExp)); /***** unicode-properties.js *****/ /*! * XRegExp Unicode Properties v1.0.0 * (c) 2012 Steven Levithan <http://xregexp.com/> * MIT License * Uses Unicode 6.1 <http://unicode.org/> */ /** * Adds Unicode properties necessary to meet Level 1 Unicode support (detailed in UTS#18 RL1.2). * Includes code points from the Basic Multilingual Plane (U+0000-U+FFFF) only. Token names are * case insensitive, and any spaces, hyphens, and underscores are ignored. * @requires XRegExp, XRegExp Unicode Base */ (function (XRegExp) { "use strict"; if (!XRegExp.addUnicodePackage) { throw new ReferenceError("Unicode Base must be loaded before Unicode Properties"); } XRegExp.install("extensibility"); XRegExp.addUnicodePackage({ Alphabetic: "0041-005A0061-007A00AA00B500BA00C0-00D600D8-00F600F8-02C102C6-02D102E0-02E402EC02EE03450370-037403760377037A-037D03860388-038A038C038E-03A103A3-03F503F7-0481048A-05270531-055605590561-058705B0-05BD05BF05C105C205C405C505C705D0-05EA05F0-05F20610-061A0620-06570659-065F066E-06D306D5-06DC06E1-06E806ED-06EF06FA-06FC06FF0710-073F074D-07B107CA-07EA07F407F507FA0800-0817081A-082C0840-085808A008A2-08AC08E4-08E908F0-08FE0900-093B093D-094C094E-09500955-09630971-09770979-097F0981-09830985-098C098F09900993-09A809AA-09B009B209B6-09B909BD-09C409C709C809CB09CC09CE09D709DC09DD09DF-09E309F009F10A01-0A030A05-0A0A0A0F0A100A13-0A280A2A-0A300A320A330A350A360A380A390A3E-0A420A470A480A4B0A4C0A510A59-0A5C0A5E0A70-0A750A81-0A830A85-0A8D0A8F-0A910A93-0AA80AAA-0AB00AB20AB30AB5-0AB90ABD-0AC50AC7-0AC90ACB0ACC0AD00AE0-0AE30B01-0B030B05-0B0C0B0F0B100B13-0B280B2A-0B300B320B330B35-0B390B3D-0B440B470B480B4B0B4C0B560B570B5C0B5D0B5F-0B630B710B820B830B85-0B8A0B8E-0B900B92-0B950B990B9A0B9C0B9E0B9F0BA30BA40BA8-0BAA0BAE-0BB90BBE-0BC20BC6-0BC80BCA-0BCC0BD00BD70C01-0C030C05-0C0C0C0E-0C100C12-0C280C2A-0C330C35-0C390C3D-0C440C46-0C480C4A-0C4C0C550C560C580C590C60-0C630C820C830C85-0C8C0C8E-0C900C92-0CA80CAA-0CB30CB5-0CB90CBD-0CC40CC6-0CC80CCA-0CCC0CD50CD60CDE0CE0-0CE30CF10CF20D020D030D05-0D0C0D0E-0D100D12-0D3A0D3D-0D440D46-0D480D4A-0D4C0D4E0D570D60-0D630D7A-0D7F0D820D830D85-0D960D9A-0DB10DB3-0DBB0DBD0DC0-0DC60DCF-0DD40DD60DD8-0DDF0DF20DF30E01-0E3A0E40-0E460E4D0E810E820E840E870E880E8A0E8D0E94-0E970E99-0E9F0EA1-0EA30EA50EA70EAA0EAB0EAD-0EB90EBB-0EBD0EC0-0EC40EC60ECD0EDC-0EDF0F000F40-0F470F49-0F6C0F71-0F810F88-0F970F99-0FBC1000-10361038103B-103F1050-10621065-1068106E-1086108E109C109D10A0-10C510C710CD10D0-10FA10FC-1248124A-124D1250-12561258125A-125D1260-1288128A-128D1290-12B012B2-12B512B8-12BE12C012C2-12C512C8-12D612D8-13101312-13151318-135A135F1380-138F13A0-13F41401-166C166F-167F1681-169A16A0-16EA16EE-16F01700-170C170E-17131720-17331740-17531760-176C176E-1770177217731780-17B317B6-17C817D717DC1820-18771880-18AA18B0-18F51900-191C1920-192B1930-19381950-196D1970-19741980-19AB19B0-19C91A00-1A1B1A20-1A5E1A61-1A741AA71B00-1B331B35-1B431B45-1B4B1B80-1BA91BAC-1BAF1BBA-1BE51BE7-1BF11C00-1C351C4D-1C4F1C5A-1C7D1CE9-1CEC1CEE-1CF31CF51CF61D00-1DBF1E00-1F151F18-1F1D1F20-1F451F48-1F4D1F50-1F571F591F5B1F5D1F5F-1F7D1F80-1FB41FB6-1FBC1FBE1FC2-1FC41FC6-1FCC1FD0-1FD31FD6-1FDB1FE0-1FEC1FF2-1FF41FF6-1FFC2071207F2090-209C21022107210A-211321152119-211D212421262128212A-212D212F-2139213C-213F2145-2149214E2160-218824B6-24E92C00-2C2E2C30-2C5E2C60-2CE42CEB-2CEE2CF22CF32D00-2D252D272D2D2D30-2D672D6F2D80-2D962DA0-2DA62DA8-2DAE2DB0-2DB62DB8-2DBE2DC0-2DC62DC8-2DCE2DD0-2DD62DD8-2DDE2DE0-2DFF2E2F3005-30073021-30293031-30353038-303C3041-3096309D-309F30A1-30FA30FC-30FF3105-312D3131-318E31A0-31BA31F0-31FF3400-4DB54E00-9FCCA000-A48CA4D0-A4FDA500-A60CA610-A61FA62AA62BA640-A66EA674-A67BA67F-A697A69F-A6EFA717-A71FA722-A788A78B-A78EA790-A793A7A0-A7AAA7F8-A801A803-A805A807-A80AA80C-A827A840-A873A880-A8C3A8F2-A8F7A8FBA90A-A92AA930-A952A960-A97CA980-A9B2A9B4-A9BFA9CFAA00-AA36AA40-AA4DAA60-AA76AA7AAA80-AABEAAC0AAC2AADB-AADDAAE0-AAEFAAF2-AAF5AB01-AB06AB09-AB0EAB11-AB16AB20-AB26AB28-AB2EABC0-ABEAAC00-D7A3D7B0-D7C6D7CB-D7FBF900-FA6DFA70-FAD9FB00-FB06FB13-FB17FB1D-FB28FB2A-FB36FB38-FB3CFB3EFB40FB41FB43FB44FB46-FBB1FBD3-FD3DFD50-FD8FFD92-FDC7FDF0-FDFBFE70-FE74FE76-FEFCFF21-FF3AFF41-FF5AFF66-FFBEFFC2-FFC7FFCA-FFCFFFD2-FFD7FFDA-FFDC", Uppercase: "0041-005A00C0-00D600D8-00DE01000102010401060108010A010C010E01100112011401160118011A011C011E01200122012401260128012A012C012E01300132013401360139013B013D013F0141014301450147014A014C014E01500152015401560158015A015C015E01600162016401660168016A016C016E017001720174017601780179017B017D018101820184018601870189-018B018E-0191019301940196-0198019C019D019F01A001A201A401A601A701A901AC01AE01AF01B1-01B301B501B701B801BC01C401C701CA01CD01CF01D101D301D501D701D901DB01DE01E001E201E401E601E801EA01EC01EE01F101F401F6-01F801FA01FC01FE02000202020402060208020A020C020E02100212021402160218021A021C021E02200222022402260228022A022C022E02300232023A023B023D023E02410243-02460248024A024C024E03700372037603860388-038A038C038E038F0391-03A103A3-03AB03CF03D2-03D403D803DA03DC03DE03E003E203E403E603E803EA03EC03EE03F403F703F903FA03FD-042F04600462046404660468046A046C046E04700472047404760478047A047C047E0480048A048C048E04900492049404960498049A049C049E04A004A204A404A604A804AA04AC04AE04B004B204B404B604B804BA04BC04BE04C004C104C304C504C704C904CB04CD04D004D204D404D604D804DA04DC04DE04E004E204E404E604E804EA04EC04EE04F004F204F404F604F804FA04FC04FE05000502050405060508050A050C050E05100512051405160518051A051C051E05200522052405260531-055610A0-10C510C710CD1E001E021E041E061E081E0A1E0C1E0E1E101E121E141E161E181E1A1E1C1E1E1E201E221E241E261E281E2A1E2C1E2E1E301E321E341E361E381E3A1E3C1E3E1E401E421E441E461E481E4A1E4C1E4E1E501E521E541E561E581E5A1E5C1E5E1E601E621E641E661E681E6A1E6C1E6E1E701E721E741E761E781E7A1E7C1E7E1E801E821E841E861E881E8A1E8C1E8E1E901E921E941E9E1EA01EA21EA41EA61EA81EAA1EAC1EAE1EB01EB21EB41EB61EB81EBA1EBC1EBE1EC01EC21EC41EC61EC81ECA1ECC1ECE1ED01ED21ED41ED61ED81EDA1EDC1EDE1EE01EE21EE41EE61EE81EEA1EEC1EEE1EF01EF21EF41EF61EF81EFA1EFC1EFE1F08-1F0F1F18-1F1D1F28-1F2F1F38-1F3F1F48-1F4D1F591F5B1F5D1F5F1F68-1F6F1FB8-1FBB1FC8-1FCB1FD8-1FDB1FE8-1FEC1FF8-1FFB21022107210B-210D2110-211221152119-211D212421262128212A-212D2130-2133213E213F21452160-216F218324B6-24CF2C00-2C2E2C602C62-2C642C672C692C6B2C6D-2C702C722C752C7E-2C802C822C842C862C882C8A2C8C2C8E2C902C922C942C962C982C9A2C9C2C9E2CA02CA22CA42CA62CA82CAA2CAC2CAE2CB02CB22CB42CB62CB82CBA2CBC2CBE2CC02CC22CC42CC62CC82CCA2CCC2CCE2CD02CD22CD42CD62CD82CDA2CDC2CDE2CE02CE22CEB2CED2CF2A640A642A644A646A648A64AA64CA64EA650A652A654A656A658A65AA65CA65EA660A662A664A666A668A66AA66CA680A682A684A686A688A68AA68CA68EA690A692A694A696A722A724A726A728A72AA72CA72EA732A734A736A738A73AA73CA73EA740A742A744A746A748A74AA74CA74EA750A752A754A756A758A75AA75CA75EA760A762A764A766A768A76AA76CA76EA779A77BA77DA77EA780A782A784A786A78BA78DA790A792A7A0A7A2A7A4A7A6A7A8A7AAFF21-FF3A", Lowercase: "0061-007A00AA00B500BA00DF-00F600F8-00FF01010103010501070109010B010D010F01110113011501170119011B011D011F01210123012501270129012B012D012F01310133013501370138013A013C013E014001420144014601480149014B014D014F01510153015501570159015B015D015F01610163016501670169016B016D016F0171017301750177017A017C017E-0180018301850188018C018D019201950199-019B019E01A101A301A501A801AA01AB01AD01B001B401B601B901BA01BD-01BF01C601C901CC01CE01D001D201D401D601D801DA01DC01DD01DF01E101E301E501E701E901EB01ED01EF01F001F301F501F901FB01FD01FF02010203020502070209020B020D020F02110213021502170219021B021D021F02210223022502270229022B022D022F02310233-0239023C023F0240024202470249024B024D024F-02930295-02B802C002C102E0-02E40345037103730377037A-037D039003AC-03CE03D003D103D5-03D703D903DB03DD03DF03E103E303E503E703E903EB03ED03EF-03F303F503F803FB03FC0430-045F04610463046504670469046B046D046F04710473047504770479047B047D047F0481048B048D048F04910493049504970499049B049D049F04A104A304A504A704A904AB04AD04AF04B104B304B504B704B904BB04BD04BF04C204C404C604C804CA04CC04CE04CF04D104D304D504D704D904DB04DD04DF04E104E304E504E704E904EB04ED04EF04F104F304F504F704F904FB04FD04FF05010503050505070509050B050D050F05110513051505170519051B051D051F05210523052505270561-05871D00-1DBF1E011E031E051E071E091E0B1E0D1E0F1E111E131E151E171E191E1B1E1D1E1F1E211E231E251E271E291E2B1E2D1E2F1E311E331E351E371E391E3B1E3D1E3F1E411E431E451E471E491E4B1E4D1E4F1E511E531E551E571E591E5B1E5D1E5F1E611E631E651E671E691E6B1E6D1E6F1E711E731E751E771E791E7B1E7D1E7F1E811E831E851E871E891E8B1E8D1E8F1E911E931E95-1E9D1E9F1EA11EA31EA51EA71EA91EAB1EAD1EAF1EB11EB31EB51EB71EB91EBB1EBD1EBF1EC11EC31EC51EC71EC91ECB1ECD1ECF1ED11ED31ED51ED71ED91EDB1EDD1EDF1EE11EE31EE51EE71EE91EEB1EED1EEF1EF11EF31EF51EF71EF91EFB1EFD1EFF-1F071F10-1F151F20-1F271F30-1F371F40-1F451F50-1F571F60-1F671F70-1F7D1F80-1F871F90-1F971FA0-1FA71FB0-1FB41FB61FB71FBE1FC2-1FC41FC61FC71FD0-1FD31FD61FD71FE0-1FE71FF2-1FF41FF61FF72071207F2090-209C210A210E210F2113212F21342139213C213D2146-2149214E2170-217F218424D0-24E92C30-2C5E2C612C652C662C682C6A2C6C2C712C732C742C76-2C7D2C812C832C852C872C892C8B2C8D2C8F2C912C932C952C972C992C9B2C9D2C9F2CA12CA32CA52CA72CA92CAB2CAD2CAF2CB12CB32CB52CB72CB92CBB2CBD2CBF2CC12CC32CC52CC72CC92CCB2CCD2CCF2CD12CD32CD52CD72CD92CDB2CDD2CDF2CE12CE32CE42CEC2CEE2CF32D00-2D252D272D2DA641A643A645A647A649A64BA64DA64FA651A653A655A657A659A65BA65DA65FA661A663A665A667A669A66BA66DA681A683A685A687A689A68BA68DA68FA691A693A695A697A723A725A727A729A72BA72DA72F-A731A733A735A737A739A73BA73DA73FA741A743A745A747A749A74BA74DA74FA751A753A755A757A759A75BA75DA75FA761A763A765A767A769A76BA76DA76F-A778A77AA77CA77FA781A783A785A787A78CA78EA791A793A7A1A7A3A7A5A7A7A7A9A7F8-A7FAFB00-FB06FB13-FB17FF41-FF5A", White_Space: "0009-000D0020008500A01680180E2000-200A20282029202F205F3000", Noncharacter_Code_Point: "FDD0-FDEFFFFEFFFF", Default_Ignorable_Code_Point: "00AD034F115F116017B417B5180B-180D200B-200F202A-202E2060-206F3164FE00-FE0FFEFFFFA0FFF0-FFF8", // \p{Any} matches a code unit. To match any code point via surrogate pairs, use (?:[\0-\uD7FF\uDC00-\uFFFF]|[\uD800-\uDBFF][\uDC00-\uDFFF]|[\uD800-\uDBFF]) Any: "0000-FFFF", // \p{^Any} compiles to [^\u0000-\uFFFF]; [\p{^Any}] to [] Ascii: "0000-007F", // \p{Assigned} is equivalent to \p{^Cn} //Assigned: XRegExp("[\\p{^Cn}]").source.replace(/[[\]]|\\u/g, "") // Negation inside a character class triggers inversion Assigned: "0000-0377037A-037E0384-038A038C038E-03A103A3-05270531-05560559-055F0561-05870589058A058F0591-05C705D0-05EA05F0-05F40600-06040606-061B061E-070D070F-074A074D-07B107C0-07FA0800-082D0830-083E0840-085B085E08A008A2-08AC08E4-08FE0900-09770979-097F0981-09830985-098C098F09900993-09A809AA-09B009B209B6-09B909BC-09C409C709C809CB-09CE09D709DC09DD09DF-09E309E6-09FB0A01-0A030A05-0A0A0A0F0A100A13-0A280A2A-0A300A320A330A350A360A380A390A3C0A3E-0A420A470A480A4B-0A4D0A510A59-0A5C0A5E0A66-0A750A81-0A830A85-0A8D0A8F-0A910A93-0AA80AAA-0AB00AB20AB30AB5-0AB90ABC-0AC50AC7-0AC90ACB-0ACD0AD00AE0-0AE30AE6-0AF10B01-0B030B05-0B0C0B0F0B100B13-0B280B2A-0B300B320B330B35-0B390B3C-0B440B470B480B4B-0B4D0B560B570B5C0B5D0B5F-0B630B66-0B770B820B830B85-0B8A0B8E-0B900B92-0B950B990B9A0B9C0B9E0B9F0BA30BA40BA8-0BAA0BAE-0BB90BBE-0BC20BC6-0BC80BCA-0BCD0BD00BD70BE6-0BFA0C01-0C030C05-0C0C0C0E-0C100C12-0C280C2A-0C330C35-0C390C3D-0C440C46-0C480C4A-0C4D0C550C560C580C590C60-0C630C66-0C6F0C78-0C7F0C820C830C85-0C8C0C8E-0C900C92-0CA80CAA-0CB30CB5-0CB90CBC-0CC40CC6-0CC80CCA-0CCD0CD50CD60CDE0CE0-0CE30CE6-0CEF0CF10CF20D020D030D05-0D0C0D0E-0D100D12-0D3A0D3D-0D440D46-0D480D4A-0D4E0D570D60-0D630D66-0D750D79-0D7F0D820D830D85-0D960D9A-0DB10DB3-0DBB0DBD0DC0-0DC60DCA0DCF-0DD40DD60DD8-0DDF0DF2-0DF40E01-0E3A0E3F-0E5B0E810E820E840E870E880E8A0E8D0E94-0E970E99-0E9F0EA1-0EA30EA50EA70EAA0EAB0EAD-0EB90EBB-0EBD0EC0-0EC40EC60EC8-0ECD0ED0-0ED90EDC-0EDF0F00-0F470F49-0F6C0F71-0F970F99-0FBC0FBE-0FCC0FCE-0FDA1000-10C510C710CD10D0-1248124A-124D1250-12561258125A-125D1260-1288128A-128D1290-12B012B2-12B512B8-12BE12C012C2-12C512C8-12D612D8-13101312-13151318-135A135D-137C1380-139913A0-13F41400-169C16A0-16F01700-170C170E-17141720-17361740-17531760-176C176E-1770177217731780-17DD17E0-17E917F0-17F91800-180E1810-18191820-18771880-18AA18B0-18F51900-191C1920-192B1930-193B19401944-196D1970-19741980-19AB19B0-19C919D0-19DA19DE-1A1B1A1E-1A5E1A60-1A7C1A7F-1A891A90-1A991AA0-1AAD1B00-1B4B1B50-1B7C1B80-1BF31BFC-1C371C3B-1C491C4D-1C7F1CC0-1CC71CD0-1CF61D00-1DE61DFC-1F151F18-1F1D1F20-1F451F48-1F4D1F50-1F571F591F5B1F5D1F5F-1F7D1F80-1FB41FB6-1FC41FC6-1FD31FD6-1FDB1FDD-1FEF1FF2-1FF41FF6-1FFE2000-2064206A-20712074-208E2090-209C20A0-20B920D0-20F02100-21892190-23F32400-24262440-244A2460-26FF2701-2B4C2B50-2B592C00-2C2E2C30-2C5E2C60-2CF32CF9-2D252D272D2D2D30-2D672D6F2D702D7F-2D962DA0-2DA62DA8-2DAE2DB0-2DB62DB8-2DBE2DC0-2DC62DC8-2DCE2DD0-2DD62DD8-2DDE2DE0-2E3B2E80-2E992E9B-2EF32F00-2FD52FF0-2FFB3000-303F3041-30963099-30FF3105-312D3131-318E3190-31BA31C0-31E331F0-321E3220-32FE3300-4DB54DC0-9FCCA000-A48CA490-A4C6A4D0-A62BA640-A697A69F-A6F7A700-A78EA790-A793A7A0-A7AAA7F8-A82BA830-A839A840-A877A880-A8C4A8CE-A8D9A8E0-A8FBA900-A953A95F-A97CA980-A9CDA9CF-A9D9A9DEA9DFAA00-AA36AA40-AA4DAA50-AA59AA5C-AA7BAA80-AAC2AADB-AAF6AB01-AB06AB09-AB0EAB11-AB16AB20-AB26AB28-AB2EABC0-ABEDABF0-ABF9AC00-D7A3D7B0-D7C6D7CB-D7FBD800-FA6DFA70-FAD9FB00-FB06FB13-FB17FB1D-FB36FB38-FB3CFB3EFB40FB41FB43FB44FB46-FBC1FBD3-FD3FFD50-FD8FFD92-FDC7FDF0-FDFDFE00-FE19FE20-FE26FE30-FE52FE54-FE66FE68-FE6BFE70-FE74FE76-FEFCFEFFFF01-FFBEFFC2-FFC7FFCA-FFCFFFD2-FFD7FFDA-FFDCFFE0-FFE6FFE8-FFEEFFF9-FFFD" }); }(XRegExp)); /***** matchrecursive.js *****/ /*! * XRegExp.matchRecursive v0.2.0 * (c) 2009-2012 Steven Levithan <http://xregexp.com/> * MIT License */ (function (XRegExp) { "use strict"; /** * Returns a match detail object composed of the provided values. * @private */ function row(value, name, start, end) { return {value: value, name: name, start: start, end: end}; } /** * Returns an array of match strings between outermost left and right delimiters, or an array of * objects with detailed match parts and position data. An error is thrown if delimiters are * unbalanced within the data. * @memberOf XRegExp * @param {String} str String to search. * @param {String} left Left delimiter as an XRegExp pattern. * @param {String} right Right delimiter as an XRegExp pattern. * @param {String} [flags] Flags for the left and right delimiters. Use any of: `gimnsxy`. * @param {Object} [options] Lets you specify `valueNames` and `escapeChar` options. * @returns {Array} Array of matches, or an empty array. * @example * * // Basic usage * var str = '(t((e))s)t()(ing)'; * XRegExp.matchRecursive(str, '\\(', '\\)', 'g'); * // -> ['t((e))s', '', 'ing'] * * // Extended information mode with valueNames * str = 'Here is <div> <div>an</div></div> example'; * XRegExp.matchRecursive(str, '<div\\s*>', '</div>', 'gi', { * valueNames: ['between', 'left', 'match', 'right'] * }); * // -> [ * // {name: 'between', value: 'Here is ', start: 0, end: 8}, * // {name: 'left', value: '<div>', start: 8, end: 13}, * // {name: 'match', value: ' <div>an</div>', start: 13, end: 27}, * // {name: 'right', value: '</div>', start: 27, end: 33}, * // {name: 'between', value: ' example', start: 33, end: 41} * // ] * * // Omitting unneeded parts with null valueNames, and using escapeChar * str = '...{1}\\{{function(x,y){return y+x;}}'; * XRegExp.matchRecursive(str, '{', '}', 'g', { * valueNames: ['literal', null, 'value', null], * escapeChar: '\\' * }); * // -> [ * // {name: 'literal', value: '...', start: 0, end: 3}, * // {name: 'value', value: '1', start: 4, end: 5}, * // {name: 'literal', value: '\\{', start: 6, end: 8}, * // {name: 'value', value: 'function(x,y){return y+x;}', start: 9, end: 35} * // ] * * // Sticky mode via flag y * str = '<1><<<2>>><3>4<5>'; * XRegExp.matchRecursive(str, '<', '>', 'gy'); * // -> ['1', '<<2>>', '3'] */ XRegExp.matchRecursive = function (str, left, right, flags, options) { flags = flags || ""; options = options || {}; var global = flags.indexOf("g") > -1, sticky = flags.indexOf("y") > -1, basicFlags = flags.replace(/y/g, ""), // Flag y controlled internally escapeChar = options.escapeChar, vN = options.valueNames, output = [], openTokens = 0, delimStart = 0, delimEnd = 0, lastOuterEnd = 0, outerStart, innerStart, leftMatch, rightMatch, esc; left = XRegExp(left, basicFlags); right = XRegExp(right, basicFlags); if (escapeChar) { if (escapeChar.length > 1) { throw new SyntaxError("can't use more than one escape character"); } escapeChar = XRegExp.escape(escapeChar); // Using XRegExp.union safely rewrites backreferences in `left` and `right` esc = new RegExp( "(?:" + escapeChar + "[\\S\\s]|(?:(?!" + XRegExp.union([left, right]).source + ")[^" + escapeChar + "])+)+", flags.replace(/[^im]+/g, "") // Flags gy not needed here; flags nsx handled by XRegExp ); } while (true) { // If using an escape character, advance to the delimiter's next starting position, // skipping any escaped characters in between if (escapeChar) { delimEnd += (XRegExp.exec(str, esc, delimEnd, "sticky") || [""])[0].length; } leftMatch = XRegExp.exec(str, left, delimEnd); rightMatch = XRegExp.exec(str, right, delimEnd); // Keep the leftmost match only if (leftMatch && rightMatch) { if (leftMatch.index <= rightMatch.index) { rightMatch = null; } else { leftMatch = null; } } /* Paths (LM:leftMatch, RM:rightMatch, OT:openTokens): LM | RM | OT | Result 1 | 0 | 1 | loop 1 | 0 | 0 | loop 0 | 1 | 1 | loop 0 | 1 | 0 | throw 0 | 0 | 1 | throw 0 | 0 | 0 | break * Doesn't include the sticky mode special case * Loop ends after the first completed match if `!global` */ if (leftMatch || rightMatch) { delimStart = (leftMatch || rightMatch).index; delimEnd = delimStart + (leftMatch || rightMatch)[0].length; } else if (!openTokens) { break; } if (sticky && !openTokens && delimStart > lastOuterEnd) { break; } if (leftMatch) { if (!openTokens) { outerStart = delimStart; innerStart = delimEnd; } ++openTokens; } else if (rightMatch && openTokens) { if (!--openTokens) { if (vN) { if (vN[0] && outerStart > lastOuterEnd) { output.push(row(vN[0], str.slice(lastOuterEnd, outerStart), lastOuterEnd, outerStart)); } if (vN[1]) { output.push(row(vN[1], str.slice(outerStart, innerStart), outerStart, innerStart)); } if (vN[2]) { output.push(row(vN[2], str.slice(innerStart, delimStart), innerStart, delimStart)); } if (vN[3]) { output.push(row(vN[3], str.slice(delimStart, delimEnd), delimStart, delimEnd)); } } else { output.push(str.slice(innerStart, delimStart)); } lastOuterEnd = delimEnd; if (!global) { break; } } } else { throw new Error("string contains unbalanced delimiters"); } // If the delimiter matched an empty string, avoid an infinite loop if (delimStart === delimEnd) { ++delimEnd; } } if (global && !sticky && vN && vN[0] && str.length > lastOuterEnd) { output.push(row(vN[0], str.slice(lastOuterEnd), lastOuterEnd, str.length)); } return output; }; }(XRegExp)); /***** build.js *****/ /*! * XRegExp.build v0.1.0 * (c) 2012 Steven Levithan <http://xregexp.com/> * MIT License * Inspired by RegExp.create by Lea Verou <http://lea.verou.me/> */ (function (XRegExp) { "use strict"; var subparts = /(\()(?!\?)|\\([1-9]\d*)|\\[\s\S]|\[(?:[^\\\]]|\\[\s\S])*]/g, parts = XRegExp.union([/\({{([\w$]+)}}\)|{{([\w$]+)}}/, subparts], "g"); /** * Strips a leading `^` and trailing unescaped `$`, if both are present. * @private * @param {String} pattern Pattern to process. * @returns {String} Pattern with edge anchors removed. */ function deanchor(pattern) { var startAnchor = /^(?:\(\?:\))?\^/, // Leading `^` or `(?:)^` (handles /x cruft) endAnchor = /\$(?:\(\?:\))?$/; // Trailing `$` or `$(?:)` (handles /x cruft) if (endAnchor.test(pattern.replace(/\\[\s\S]/g, ""))) { // Ensure trailing `$` isn't escaped return pattern.replace(startAnchor, "").replace(endAnchor, ""); } return pattern; } /** * Converts the provided value to an XRegExp. * @private * @param {String|RegExp} value Value to convert. * @returns {RegExp} XRegExp object with XRegExp syntax applied. */ function asXRegExp(value) { return XRegExp.isRegExp(value) ? (value.xregexp && !value.xregexp.isNative ? value : XRegExp(value.source)) : XRegExp(value); } /** * Builds regexes using named subpatterns, for readability and pattern reuse. Backreferences in the * outer pattern and provided subpatterns are automatically renumbered to work correctly. Native * flags used by provided subpatterns are ignored in favor of the `flags` argument. * @memberOf XRegExp * @param {String} pattern XRegExp pattern using `{{name}}` for embedded subpatterns. Allows * `({{name}})` as shorthand for `(?<name>{{name}})`. Patterns cannot be embedded within * character classes. * @param {Object} subs Lookup object for named subpatterns. Values can be strings or regexes. A * leading `^` and trailing unescaped `$` are stripped from subpatterns, if both are present. * @param {String} [flags] Any combination of XRegExp flags. * @returns {RegExp} Regex with interpolated subpatterns. * @example * * var time = XRegExp.build('(?x)^ {{hours}} ({{minutes}}) $', { * hours: XRegExp.build('{{h12}} : | {{h24}}', { * h12: /1[0-2]|0?[1-9]/, * h24: /2[0-3]|[01][0-9]/ * }, 'x'), * minutes: /^[0-5][0-9]$/ * }); * time.test('10:59'); // -> true * XRegExp.exec('10:59', time).minutes; // -> '59' */ XRegExp.build = function (pattern, subs, flags) { var inlineFlags = /^\(\?([\w$]+)\)/.exec(pattern), data = {}, numCaps = 0, // Caps is short for captures numPriorCaps, numOuterCaps = 0, outerCapsMap = [0], outerCapNames, sub, p; // Add flags within a leading mode modifier to the overall pattern's flags if (inlineFlags) { flags = flags || ""; inlineFlags[1].replace(/./g, function (flag) { flags += (flags.indexOf(flag) > -1 ? "" : flag); // Don't add duplicates }); } for (p in subs) { if (subs.hasOwnProperty(p)) { // Passing to XRegExp enables entended syntax for subpatterns provided as strings // and ensures independent validity, lest an unescaped `(`, `)`, `[`, or trailing // `\` breaks the `(?:)` wrapper. For subpatterns provided as regexes, it dies on // octals and adds the `xregexp` property, for simplicity sub = asXRegExp(subs[p]); // Deanchoring allows embedding independently useful anchored regexes. If you // really need to keep your anchors, double them (i.e., `^^...$$`) data[p] = {pattern: deanchor(sub.source), names: sub.xregexp.captureNames || []}; } } // Passing to XRegExp dies on octals and ensures the outer pattern is independently valid; // helps keep this simple. Named captures will be put back pattern = asXRegExp(pattern); outerCapNames = pattern.xregexp.captureNames || []; pattern = pattern.source.replace(parts, function ($0, $1, $2, $3, $4) { var subName = $1 || $2, capName, intro; if (subName) { // Named subpattern if (!data.hasOwnProperty(subName)) { throw new ReferenceError("undefined property " + $0); } if ($1) { // Named subpattern was wrapped in a capturing group capName = outerCapNames[numOuterCaps]; outerCapsMap[++numOuterCaps] = ++numCaps; // If it's a named group, preserve the name. Otherwise, use the subpattern name // as the capture name intro = "(?<" + (capName || subName) + ">"; } else { intro = "(?:"; } numPriorCaps = numCaps; return intro + data[subName].pattern.replace(subparts, function (match, paren, backref) { if (paren) { // Capturing group capName = data[subName].names[numCaps - numPriorCaps]; ++numCaps; if (capName) { // If the current capture has a name, preserve the name return "(?<" + capName + ">"; } } else if (backref) { // Backreference return "\\" + (+backref + numPriorCaps); // Rewrite the backreference } return match; }) + ")"; } if ($3) { // Capturing group capName = outerCapNames[numOuterCaps]; outerCapsMap[++numOuterCaps] = ++numCaps; if (capName) { // If the current capture has a name, preserve the name return "(?<" + capName + ">"; } } else if ($4) { // Backreference return "\\" + outerCapsMap[+$4]; // Rewrite the backreference } return $0; }); return XRegExp(pattern, flags); }; }(XRegExp)); /***** prototypes.js *****/ /*! * XRegExp Prototype Methods v1.0.0 * (c) 2012 Steven Levithan <http://xregexp.com/> * MIT License */ /** * Adds a collection of methods to `XRegExp.prototype`. RegExp objects copied by XRegExp are also * augmented with any `XRegExp.prototype` methods. Hence, the following work equivalently: * * XRegExp('[a-z]', 'ig').xexec('abc'); * XRegExp(/[a-z]/ig).xexec('abc'); * XRegExp.globalize(/[a-z]/i).xexec('abc'); */ (function (XRegExp) { "use strict"; /** * Copy properties of `b` to `a`. * @private * @param {Object} a Object that will receive new properties. * @param {Object} b Object whose properties will be copied. */ function extend(a, b) { for (var p in b) { if (b.hasOwnProperty(p)) { a[p] = b[p]; } } //return a; } extend(XRegExp.prototype, { /** * Implicitly calls the regex's `test` method with the first value in the provided arguments array. * @memberOf XRegExp.prototype * @param {*} context Ignored. Accepted only for congruity with `Function.prototype.apply`. * @param {Array} args Array with the string to search as its first value. * @returns {Boolean} Whether the regex matched the provided value. * @example * * XRegExp('[a-z]').apply(null, ['abc']); // -> true */ apply: function (context, args) { return this.test(args[0]); }, /** * Implicitly calls the regex's `test` method with the provided string. * @memberOf XRegExp.prototype * @param {*} context Ignored. Accepted only for congruity with `Function.prototype.call`. * @param {String} str String to search. * @returns {Boolean} Whether the regex matched the provided value. * @example * * XRegExp('[a-z]').call(null, 'abc'); // -> true */ call: function (context, str) { return this.test(str); }, /** * Implicitly calls {@link #XRegExp.forEach}. * @memberOf XRegExp.prototype * @example * * XRegExp('\\d').forEach('1a2345', function (match, i) { * if (i % 2) this.push(+match[0]); * }, []); * // -> [2, 4] */ forEach: function (str, callback, context) { return XRegExp.forEach(str, this, callback, context); }, /** * Implicitly calls {@link #XRegExp.globalize}. * @memberOf XRegExp.prototype * @example * * var globalCopy = XRegExp('regex').globalize(); * globalCopy.global; // -> true */ globalize: function () { return XRegExp.globalize(this); }, /** * Implicitly calls {@link #XRegExp.exec}. * @memberOf XRegExp.prototype * @example * * var match = XRegExp('U\\+(?<hex>[0-9A-F]{4})').xexec('U+2620'); * match.hex; // -> '2620' */ xexec: function (str, pos, sticky) { return XRegExp.exec(str, this, pos, sticky); }, /** * Implicitly calls {@link #XRegExp.test}. * @memberOf XRegExp.prototype * @example * * XRegExp('c').xtest('abc'); // -> true */ xtest: function (str, pos, sticky) { return XRegExp.test(str, this, pos, sticky); } }); }(XRegExp));
PypiClean
/C-Telethon-1.28.5.tar.gz/C-Telethon-1.28.5/telethon/password.py
import hashlib import os from .crypto import factorization from .tl import types def check_prime_and_good_check(prime: int, g: int): good_prime_bits_count = 2048 if prime < 0 or prime.bit_length() != good_prime_bits_count: raise ValueError('bad prime count {}, expected {}' .format(prime.bit_length(), good_prime_bits_count)) # TODO This is awfully slow if factorization.Factorization.factorize(prime)[0] != 1: raise ValueError('given "prime" is not prime') if g == 2: if prime % 8 != 7: raise ValueError('bad g {}, mod8 {}'.format(g, prime % 8)) elif g == 3: if prime % 3 != 2: raise ValueError('bad g {}, mod3 {}'.format(g, prime % 3)) elif g == 4: pass elif g == 5: if prime % 5 not in (1, 4): raise ValueError('bad g {}, mod5 {}'.format(g, prime % 5)) elif g == 6: if prime % 24 not in (19, 23): raise ValueError('bad g {}, mod24 {}'.format(g, prime % 24)) elif g == 7: if prime % 7 not in (3, 5, 6): raise ValueError('bad g {}, mod7 {}'.format(g, prime % 7)) else: raise ValueError('bad g {}'.format(g)) prime_sub1_div2 = (prime - 1) // 2 if factorization.Factorization.factorize(prime_sub1_div2)[0] != 1: raise ValueError('(prime - 1) // 2 is not prime') # Else it's good def check_prime_and_good(prime_bytes: bytes, g: int): good_prime = bytes(( 0xC7, 0x1C, 0xAE, 0xB9, 0xC6, 0xB1, 0xC9, 0x04, 0x8E, 0x6C, 0x52, 0x2F, 0x70, 0xF1, 0x3F, 0x73, 0x98, 0x0D, 0x40, 0x23, 0x8E, 0x3E, 0x21, 0xC1, 0x49, 0x34, 0xD0, 0x37, 0x56, 0x3D, 0x93, 0x0F, 0x48, 0x19, 0x8A, 0x0A, 0xA7, 0xC1, 0x40, 0x58, 0x22, 0x94, 0x93, 0xD2, 0x25, 0x30, 0xF4, 0xDB, 0xFA, 0x33, 0x6F, 0x6E, 0x0A, 0xC9, 0x25, 0x13, 0x95, 0x43, 0xAE, 0xD4, 0x4C, 0xCE, 0x7C, 0x37, 0x20, 0xFD, 0x51, 0xF6, 0x94, 0x58, 0x70, 0x5A, 0xC6, 0x8C, 0xD4, 0xFE, 0x6B, 0x6B, 0x13, 0xAB, 0xDC, 0x97, 0x46, 0x51, 0x29, 0x69, 0x32, 0x84, 0x54, 0xF1, 0x8F, 0xAF, 0x8C, 0x59, 0x5F, 0x64, 0x24, 0x77, 0xFE, 0x96, 0xBB, 0x2A, 0x94, 0x1D, 0x5B, 0xCD, 0x1D, 0x4A, 0xC8, 0xCC, 0x49, 0x88, 0x07, 0x08, 0xFA, 0x9B, 0x37, 0x8E, 0x3C, 0x4F, 0x3A, 0x90, 0x60, 0xBE, 0xE6, 0x7C, 0xF9, 0xA4, 0xA4, 0xA6, 0x95, 0x81, 0x10, 0x51, 0x90, 0x7E, 0x16, 0x27, 0x53, 0xB5, 0x6B, 0x0F, 0x6B, 0x41, 0x0D, 0xBA, 0x74, 0xD8, 0xA8, 0x4B, 0x2A, 0x14, 0xB3, 0x14, 0x4E, 0x0E, 0xF1, 0x28, 0x47, 0x54, 0xFD, 0x17, 0xED, 0x95, 0x0D, 0x59, 0x65, 0xB4, 0xB9, 0xDD, 0x46, 0x58, 0x2D, 0xB1, 0x17, 0x8D, 0x16, 0x9C, 0x6B, 0xC4, 0x65, 0xB0, 0xD6, 0xFF, 0x9C, 0xA3, 0x92, 0x8F, 0xEF, 0x5B, 0x9A, 0xE4, 0xE4, 0x18, 0xFC, 0x15, 0xE8, 0x3E, 0xBE, 0xA0, 0xF8, 0x7F, 0xA9, 0xFF, 0x5E, 0xED, 0x70, 0x05, 0x0D, 0xED, 0x28, 0x49, 0xF4, 0x7B, 0xF9, 0x59, 0xD9, 0x56, 0x85, 0x0C, 0xE9, 0x29, 0x85, 0x1F, 0x0D, 0x81, 0x15, 0xF6, 0x35, 0xB1, 0x05, 0xEE, 0x2E, 0x4E, 0x15, 0xD0, 0x4B, 0x24, 0x54, 0xBF, 0x6F, 0x4F, 0xAD, 0xF0, 0x34, 0xB1, 0x04, 0x03, 0x11, 0x9C, 0xD8, 0xE3, 0xB9, 0x2F, 0xCC, 0x5B)) if good_prime == prime_bytes: if g in (3, 4, 5, 7): return # It's good check_prime_and_good_check(int.from_bytes(prime_bytes, 'big'), g) def is_good_large(number: int, p: int) -> bool: return number > 0 and p - number > 0 SIZE_FOR_HASH = 256 def num_bytes_for_hash(number: bytes) -> bytes: return bytes(SIZE_FOR_HASH - len(number)) + number def big_num_for_hash(g: int) -> bytes: return g.to_bytes(SIZE_FOR_HASH, 'big') def sha256(*p: bytes) -> bytes: hash = hashlib.sha256() for q in p: hash.update(q) return hash.digest() def is_good_mod_exp_first(modexp, prime) -> bool: diff = prime - modexp min_diff_bits_count = 2048 - 64 max_mod_exp_size = 256 if diff < 0 or \ diff.bit_length() < min_diff_bits_count or \ modexp.bit_length() < min_diff_bits_count or \ (modexp.bit_length() + 7) // 8 > max_mod_exp_size: return False return True def xor(a: bytes, b: bytes) -> bytes: return bytes(x ^ y for x, y in zip(a, b)) def pbkdf2sha512(password: bytes, salt: bytes, iterations: int): return hashlib.pbkdf2_hmac('sha512', password, salt, iterations) def compute_hash(algo: types.PasswordKdfAlgoSHA256SHA256PBKDF2HMACSHA512iter100000SHA256ModPow, password: str): hash1 = sha256(algo.salt1, password.encode('utf-8'), algo.salt1) hash2 = sha256(algo.salt2, hash1, algo.salt2) hash3 = pbkdf2sha512(hash2, algo.salt1, 100000) return sha256(algo.salt2, hash3, algo.salt2) def compute_digest(algo: types.PasswordKdfAlgoSHA256SHA256PBKDF2HMACSHA512iter100000SHA256ModPow, password: str): try: check_prime_and_good(algo.p, algo.g) except ValueError: raise ValueError('bad p/g in password') value = pow(algo.g, int.from_bytes(compute_hash(algo, password), 'big'), int.from_bytes(algo.p, 'big')) return big_num_for_hash(value) # https://github.com/telegramdesktop/tdesktop/blob/18b74b90451a7db2379a9d753c9cbaf8734b4d5d/Telegram/SourceFiles/core/core_cloud_password.cpp def compute_check(request: types.account.Password, password: str): algo = request.current_algo if not isinstance(algo, types.PasswordKdfAlgoSHA256SHA256PBKDF2HMACSHA512iter100000SHA256ModPow): raise ValueError('unsupported password algorithm {}' .format(algo.__class__.__name__)) pw_hash = compute_hash(algo, password) p = int.from_bytes(algo.p, 'big') g = algo.g B = int.from_bytes(request.srp_B, 'big') try: check_prime_and_good(algo.p, g) except ValueError: raise ValueError('bad p/g in password') if not is_good_large(B, p): raise ValueError('bad b in check') x = int.from_bytes(pw_hash, 'big') p_for_hash = num_bytes_for_hash(algo.p) g_for_hash = big_num_for_hash(g) b_for_hash = num_bytes_for_hash(request.srp_B) g_x = pow(g, x, p) k = int.from_bytes(sha256(p_for_hash, g_for_hash), 'big') kg_x = (k * g_x) % p def generate_and_check_random(): random_size = 256 while True: random = os.urandom(random_size) a = int.from_bytes(random, 'big') A = pow(g, a, p) if is_good_mod_exp_first(A, p): a_for_hash = big_num_for_hash(A) u = int.from_bytes(sha256(a_for_hash, b_for_hash), 'big') if u > 0: return (a, a_for_hash, u) a, a_for_hash, u = generate_and_check_random() g_b = (B - kg_x) % p if not is_good_mod_exp_first(g_b, p): raise ValueError('bad g_b') ux = u * x a_ux = a + ux S = pow(g_b, a_ux, p) K = sha256(big_num_for_hash(S)) M1 = sha256( xor(sha256(p_for_hash), sha256(g_for_hash)), sha256(algo.salt1), sha256(algo.salt2), a_for_hash, b_for_hash, K ) return types.InputCheckPasswordSRP( request.srp_id, bytes(a_for_hash), bytes(M1))
PypiClean
/MetaCalls-0.0.5-cp310-cp310-manylinux2014_x86_64.whl/metacalls/node_modules/node-fetch/lib/index.js
'use strict'; Object.defineProperty(exports, '__esModule', { value: true }); function _interopDefault (ex) { return (ex && (typeof ex === 'object') && 'default' in ex) ? ex['default'] : ex; } var Stream = _interopDefault(require('stream')); var http = _interopDefault(require('http')); var Url = _interopDefault(require('url')); var whatwgUrl = _interopDefault(require('whatwg-url')); var https = _interopDefault(require('https')); var zlib = _interopDefault(require('zlib')); // Based on https://github.com/tmpvar/jsdom/blob/aa85b2abf07766ff7bf5c1f6daafb3726f2f2db5/lib/jsdom/living/blob.js // fix for "Readable" isn't a named export issue const Readable = Stream.Readable; const BUFFER = Symbol('buffer'); const TYPE = Symbol('type'); class Blob { constructor() { this[TYPE] = ''; const blobParts = arguments[0]; const options = arguments[1]; const buffers = []; let size = 0; if (blobParts) { const a = blobParts; const length = Number(a.length); for (let i = 0; i < length; i++) { const element = a[i]; let buffer; if (element instanceof Buffer) { buffer = element; } else if (ArrayBuffer.isView(element)) { buffer = Buffer.from(element.buffer, element.byteOffset, element.byteLength); } else if (element instanceof ArrayBuffer) { buffer = Buffer.from(element); } else if (element instanceof Blob) { buffer = element[BUFFER]; } else { buffer = Buffer.from(typeof element === 'string' ? element : String(element)); } size += buffer.length; buffers.push(buffer); } } this[BUFFER] = Buffer.concat(buffers); let type = options && options.type !== undefined && String(options.type).toLowerCase(); if (type && !/[^\u0020-\u007E]/.test(type)) { this[TYPE] = type; } } get size() { return this[BUFFER].length; } get type() { return this[TYPE]; } text() { return Promise.resolve(this[BUFFER].toString()); } arrayBuffer() { const buf = this[BUFFER]; const ab = buf.buffer.slice(buf.byteOffset, buf.byteOffset + buf.byteLength); return Promise.resolve(ab); } stream() { const readable = new Readable(); readable._read = function () {}; readable.push(this[BUFFER]); readable.push(null); return readable; } toString() { return '[object Blob]'; } slice() { const size = this.size; const start = arguments[0]; const end = arguments[1]; let relativeStart, relativeEnd; if (start === undefined) { relativeStart = 0; } else if (start < 0) { relativeStart = Math.max(size + start, 0); } else { relativeStart = Math.min(start, size); } if (end === undefined) { relativeEnd = size; } else if (end < 0) { relativeEnd = Math.max(size + end, 0); } else { relativeEnd = Math.min(end, size); } const span = Math.max(relativeEnd - relativeStart, 0); const buffer = this[BUFFER]; const slicedBuffer = buffer.slice(relativeStart, relativeStart + span); const blob = new Blob([], { type: arguments[2] }); blob[BUFFER] = slicedBuffer; return blob; } } Object.defineProperties(Blob.prototype, { size: { enumerable: true }, type: { enumerable: true }, slice: { enumerable: true } }); Object.defineProperty(Blob.prototype, Symbol.toStringTag, { value: 'Blob', writable: false, enumerable: false, configurable: true }); /** * fetch-error.js * * FetchError interface for operational errors */ /** * Create FetchError instance * * @param String message Error message for human * @param String type Error type for machine * @param String systemError For Node.js system error * @return FetchError */ function FetchError(message, type, systemError) { Error.call(this, message); this.message = message; this.type = type; // when err.type is `system`, err.code contains system error code if (systemError) { this.code = this.errno = systemError.code; } // hide custom error implementation details from end-users Error.captureStackTrace(this, this.constructor); } FetchError.prototype = Object.create(Error.prototype); FetchError.prototype.constructor = FetchError; FetchError.prototype.name = 'FetchError'; let convert; try { convert = require('encoding').convert; } catch (e) {} const INTERNALS = Symbol('Body internals'); // fix an issue where "PassThrough" isn't a named export for node <10 const PassThrough = Stream.PassThrough; /** * Body mixin * * Ref: https://fetch.spec.whatwg.org/#body * * @param Stream body Readable stream * @param Object opts Response options * @return Void */ function Body(body) { var _this = this; var _ref = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : {}, _ref$size = _ref.size; let size = _ref$size === undefined ? 0 : _ref$size; var _ref$timeout = _ref.timeout; let timeout = _ref$timeout === undefined ? 0 : _ref$timeout; if (body == null) { // body is undefined or null body = null; } else if (isURLSearchParams(body)) { // body is a URLSearchParams body = Buffer.from(body.toString()); } else if (isBlob(body)) ; else if (Buffer.isBuffer(body)) ; else if (Object.prototype.toString.call(body) === '[object ArrayBuffer]') { // body is ArrayBuffer body = Buffer.from(body); } else if (ArrayBuffer.isView(body)) { // body is ArrayBufferView body = Buffer.from(body.buffer, body.byteOffset, body.byteLength); } else if (body instanceof Stream) ; else { // none of the above // coerce to string then buffer body = Buffer.from(String(body)); } this[INTERNALS] = { body, disturbed: false, error: null }; this.size = size; this.timeout = timeout; if (body instanceof Stream) { body.on('error', function (err) { const error = err.name === 'AbortError' ? err : new FetchError(`Invalid response body while trying to fetch ${_this.url}: ${err.message}`, 'system', err); _this[INTERNALS].error = error; }); } } Body.prototype = { get body() { return this[INTERNALS].body; }, get bodyUsed() { return this[INTERNALS].disturbed; }, /** * Decode response as ArrayBuffer * * @return Promise */ arrayBuffer() { return consumeBody.call(this).then(function (buf) { return buf.buffer.slice(buf.byteOffset, buf.byteOffset + buf.byteLength); }); }, /** * Return raw response as Blob * * @return Promise */ blob() { let ct = this.headers && this.headers.get('content-type') || ''; return consumeBody.call(this).then(function (buf) { return Object.assign( // Prevent copying new Blob([], { type: ct.toLowerCase() }), { [BUFFER]: buf }); }); }, /** * Decode response as json * * @return Promise */ json() { var _this2 = this; return consumeBody.call(this).then(function (buffer) { try { return JSON.parse(buffer.toString()); } catch (err) { return Body.Promise.reject(new FetchError(`invalid json response body at ${_this2.url} reason: ${err.message}`, 'invalid-json')); } }); }, /** * Decode response as text * * @return Promise */ text() { return consumeBody.call(this).then(function (buffer) { return buffer.toString(); }); }, /** * Decode response as buffer (non-spec api) * * @return Promise */ buffer() { return consumeBody.call(this); }, /** * Decode response as text, while automatically detecting the encoding and * trying to decode to UTF-8 (non-spec api) * * @return Promise */ textConverted() { var _this3 = this; return consumeBody.call(this).then(function (buffer) { return convertBody(buffer, _this3.headers); }); } }; // In browsers, all properties are enumerable. Object.defineProperties(Body.prototype, { body: { enumerable: true }, bodyUsed: { enumerable: true }, arrayBuffer: { enumerable: true }, blob: { enumerable: true }, json: { enumerable: true }, text: { enumerable: true } }); Body.mixIn = function (proto) { for (const name of Object.getOwnPropertyNames(Body.prototype)) { // istanbul ignore else: future proof if (!(name in proto)) { const desc = Object.getOwnPropertyDescriptor(Body.prototype, name); Object.defineProperty(proto, name, desc); } } }; /** * Consume and convert an entire Body to a Buffer. * * Ref: https://fetch.spec.whatwg.org/#concept-body-consume-body * * @return Promise */ function consumeBody() { var _this4 = this; if (this[INTERNALS].disturbed) { return Body.Promise.reject(new TypeError(`body used already for: ${this.url}`)); } this[INTERNALS].disturbed = true; if (this[INTERNALS].error) { return Body.Promise.reject(this[INTERNALS].error); } let body = this.body; // body is null if (body === null) { return Body.Promise.resolve(Buffer.alloc(0)); } // body is blob if (isBlob(body)) { body = body.stream(); } // body is buffer if (Buffer.isBuffer(body)) { return Body.Promise.resolve(body); } // istanbul ignore if: should never happen if (!(body instanceof Stream)) { return Body.Promise.resolve(Buffer.alloc(0)); } // body is stream // get ready to actually consume the body let accum = []; let accumBytes = 0; let abort = false; return new Body.Promise(function (resolve, reject) { let resTimeout; // allow timeout on slow response body if (_this4.timeout) { resTimeout = setTimeout(function () { abort = true; reject(new FetchError(`Response timeout while trying to fetch ${_this4.url} (over ${_this4.timeout}ms)`, 'body-timeout')); }, _this4.timeout); } // handle stream errors body.on('error', function (err) { if (err.name === 'AbortError') { // if the request was aborted, reject with this Error abort = true; reject(err); } else { // other errors, such as incorrect content-encoding reject(new FetchError(`Invalid response body while trying to fetch ${_this4.url}: ${err.message}`, 'system', err)); } }); body.on('data', function (chunk) { if (abort || chunk === null) { return; } if (_this4.size && accumBytes + chunk.length > _this4.size) { abort = true; reject(new FetchError(`content size at ${_this4.url} over limit: ${_this4.size}`, 'max-size')); return; } accumBytes += chunk.length; accum.push(chunk); }); body.on('end', function () { if (abort) { return; } clearTimeout(resTimeout); try { resolve(Buffer.concat(accum, accumBytes)); } catch (err) { // handle streams that have accumulated too much data (issue #414) reject(new FetchError(`Could not create Buffer from response body for ${_this4.url}: ${err.message}`, 'system', err)); } }); }); } /** * Detect buffer encoding and convert to target encoding * ref: http://www.w3.org/TR/2011/WD-html5-20110113/parsing.html#determining-the-character-encoding * * @param Buffer buffer Incoming buffer * @param String encoding Target encoding * @return String */ function convertBody(buffer, headers) { if (typeof convert !== 'function') { throw new Error('The package `encoding` must be installed to use the textConverted() function'); } const ct = headers.get('content-type'); let charset = 'utf-8'; let res, str; // header if (ct) { res = /charset=([^;]*)/i.exec(ct); } // no charset in content type, peek at response body for at most 1024 bytes str = buffer.slice(0, 1024).toString(); // html5 if (!res && str) { res = /<meta.+?charset=(['"])(.+?)\1/i.exec(str); } // html4 if (!res && str) { res = /<meta[\s]+?http-equiv=(['"])content-type\1[\s]+?content=(['"])(.+?)\2/i.exec(str); if (!res) { res = /<meta[\s]+?content=(['"])(.+?)\1[\s]+?http-equiv=(['"])content-type\3/i.exec(str); if (res) { res.pop(); // drop last quote } } if (res) { res = /charset=(.*)/i.exec(res.pop()); } } // xml if (!res && str) { res = /<\?xml.+?encoding=(['"])(.+?)\1/i.exec(str); } // found charset if (res) { charset = res.pop(); // prevent decode issues when sites use incorrect encoding // ref: https://hsivonen.fi/encoding-menu/ if (charset === 'gb2312' || charset === 'gbk') { charset = 'gb18030'; } } // turn raw buffers into a single utf-8 buffer return convert(buffer, 'UTF-8', charset).toString(); } /** * Detect a URLSearchParams object * ref: https://github.com/bitinn/node-fetch/issues/296#issuecomment-307598143 * * @param Object obj Object to detect by type or brand * @return String */ function isURLSearchParams(obj) { // Duck-typing as a necessary condition. if (typeof obj !== 'object' || typeof obj.append !== 'function' || typeof obj.delete !== 'function' || typeof obj.get !== 'function' || typeof obj.getAll !== 'function' || typeof obj.has !== 'function' || typeof obj.set !== 'function') { return false; } // Brand-checking and more duck-typing as optional condition. return obj.constructor.name === 'URLSearchParams' || Object.prototype.toString.call(obj) === '[object URLSearchParams]' || typeof obj.sort === 'function'; } /** * Check if `obj` is a W3C `Blob` object (which `File` inherits from) * @param {*} obj * @return {boolean} */ function isBlob(obj) { return typeof obj === 'object' && typeof obj.arrayBuffer === 'function' && typeof obj.type === 'string' && typeof obj.stream === 'function' && typeof obj.constructor === 'function' && typeof obj.constructor.name === 'string' && /^(Blob|File)$/.test(obj.constructor.name) && /^(Blob|File)$/.test(obj[Symbol.toStringTag]); } /** * Clone body given Res/Req instance * * @param Mixed instance Response or Request instance * @return Mixed */ function clone(instance) { let p1, p2; let body = instance.body; // don't allow cloning a used body if (instance.bodyUsed) { throw new Error('cannot clone body after it is used'); } // check that body is a stream and not form-data object // note: we can't clone the form-data object without having it as a dependency if (body instanceof Stream && typeof body.getBoundary !== 'function') { // tee instance body p1 = new PassThrough(); p2 = new PassThrough(); body.pipe(p1); body.pipe(p2); // set instance body to teed body and return the other teed body instance[INTERNALS].body = p1; body = p2; } return body; } /** * Performs the operation "extract a `Content-Type` value from |object|" as * specified in the specification: * https://fetch.spec.whatwg.org/#concept-bodyinit-extract * * This function assumes that instance.body is present. * * @param Mixed instance Any options.body input */ function extractContentType(body) { if (body === null) { // body is null return null; } else if (typeof body === 'string') { // body is string return 'text/plain;charset=UTF-8'; } else if (isURLSearchParams(body)) { // body is a URLSearchParams return 'application/x-www-form-urlencoded;charset=UTF-8'; } else if (isBlob(body)) { // body is blob return body.type || null; } else if (Buffer.isBuffer(body)) { // body is buffer return null; } else if (Object.prototype.toString.call(body) === '[object ArrayBuffer]') { // body is ArrayBuffer return null; } else if (ArrayBuffer.isView(body)) { // body is ArrayBufferView return null; } else if (typeof body.getBoundary === 'function') { // detect form data input from form-data module return `multipart/form-data;boundary=${body.getBoundary()}`; } else if (body instanceof Stream) { // body is stream // can't really do much about this return null; } else { // Body constructor defaults other things to string return 'text/plain;charset=UTF-8'; } } /** * The Fetch Standard treats this as if "total bytes" is a property on the body. * For us, we have to explicitly get it with a function. * * ref: https://fetch.spec.whatwg.org/#concept-body-total-bytes * * @param Body instance Instance of Body * @return Number? Number of bytes, or null if not possible */ function getTotalBytes(instance) { const body = instance.body; if (body === null) { // body is null return 0; } else if (isBlob(body)) { return body.size; } else if (Buffer.isBuffer(body)) { // body is buffer return body.length; } else if (body && typeof body.getLengthSync === 'function') { // detect form data input from form-data module if (body._lengthRetrievers && body._lengthRetrievers.length == 0 || // 1.x body.hasKnownLength && body.hasKnownLength()) { // 2.x return body.getLengthSync(); } return null; } else { // body is stream return null; } } /** * Write a Body to a Node.js WritableStream (e.g. http.Request) object. * * @param Body instance Instance of Body * @return Void */ function writeToStream(dest, instance) { const body = instance.body; if (body === null) { // body is null dest.end(); } else if (isBlob(body)) { body.stream().pipe(dest); } else if (Buffer.isBuffer(body)) { // body is buffer dest.write(body); dest.end(); } else { // body is stream body.pipe(dest); } } // expose Promise Body.Promise = global.Promise; /** * headers.js * * Headers class offers convenient helpers */ const invalidTokenRegex = /[^\^_`a-zA-Z\-0-9!#$%&'*+.|~]/; const invalidHeaderCharRegex = /[^\t\x20-\x7e\x80-\xff]/; function validateName(name) { name = `${name}`; if (invalidTokenRegex.test(name) || name === '') { throw new TypeError(`${name} is not a legal HTTP header name`); } } function validateValue(value) { value = `${value}`; if (invalidHeaderCharRegex.test(value)) { throw new TypeError(`${value} is not a legal HTTP header value`); } } /** * Find the key in the map object given a header name. * * Returns undefined if not found. * * @param String name Header name * @return String|Undefined */ function find(map, name) { name = name.toLowerCase(); for (const key in map) { if (key.toLowerCase() === name) { return key; } } return undefined; } const MAP = Symbol('map'); class Headers { /** * Headers class * * @param Object headers Response headers * @return Void */ constructor() { let init = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : undefined; this[MAP] = Object.create(null); if (init instanceof Headers) { const rawHeaders = init.raw(); const headerNames = Object.keys(rawHeaders); for (const headerName of headerNames) { for (const value of rawHeaders[headerName]) { this.append(headerName, value); } } return; } // We don't worry about converting prop to ByteString here as append() // will handle it. if (init == null) ; else if (typeof init === 'object') { const method = init[Symbol.iterator]; if (method != null) { if (typeof method !== 'function') { throw new TypeError('Header pairs must be iterable'); } // sequence<sequence<ByteString>> // Note: per spec we have to first exhaust the lists then process them const pairs = []; for (const pair of init) { if (typeof pair !== 'object' || typeof pair[Symbol.iterator] !== 'function') { throw new TypeError('Each header pair must be iterable'); } pairs.push(Array.from(pair)); } for (const pair of pairs) { if (pair.length !== 2) { throw new TypeError('Each header pair must be a name/value tuple'); } this.append(pair[0], pair[1]); } } else { // record<ByteString, ByteString> for (const key of Object.keys(init)) { const value = init[key]; this.append(key, value); } } } else { throw new TypeError('Provided initializer must be an object'); } } /** * Return combined header value given name * * @param String name Header name * @return Mixed */ get(name) { name = `${name}`; validateName(name); const key = find(this[MAP], name); if (key === undefined) { return null; } return this[MAP][key].join(', '); } /** * Iterate over all headers * * @param Function callback Executed for each item with parameters (value, name, thisArg) * @param Boolean thisArg `this` context for callback function * @return Void */ forEach(callback) { let thisArg = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : undefined; let pairs = getHeaders(this); let i = 0; while (i < pairs.length) { var _pairs$i = pairs[i]; const name = _pairs$i[0], value = _pairs$i[1]; callback.call(thisArg, value, name, this); pairs = getHeaders(this); i++; } } /** * Overwrite header values given name * * @param String name Header name * @param String value Header value * @return Void */ set(name, value) { name = `${name}`; value = `${value}`; validateName(name); validateValue(value); const key = find(this[MAP], name); this[MAP][key !== undefined ? key : name] = [value]; } /** * Append a value onto existing header * * @param String name Header name * @param String value Header value * @return Void */ append(name, value) { name = `${name}`; value = `${value}`; validateName(name); validateValue(value); const key = find(this[MAP], name); if (key !== undefined) { this[MAP][key].push(value); } else { this[MAP][name] = [value]; } } /** * Check for header name existence * * @param String name Header name * @return Boolean */ has(name) { name = `${name}`; validateName(name); return find(this[MAP], name) !== undefined; } /** * Delete all header values given name * * @param String name Header name * @return Void */ delete(name) { name = `${name}`; validateName(name); const key = find(this[MAP], name); if (key !== undefined) { delete this[MAP][key]; } } /** * Return raw headers (non-spec api) * * @return Object */ raw() { return this[MAP]; } /** * Get an iterator on keys. * * @return Iterator */ keys() { return createHeadersIterator(this, 'key'); } /** * Get an iterator on values. * * @return Iterator */ values() { return createHeadersIterator(this, 'value'); } /** * Get an iterator on entries. * * This is the default iterator of the Headers object. * * @return Iterator */ [Symbol.iterator]() { return createHeadersIterator(this, 'key+value'); } } Headers.prototype.entries = Headers.prototype[Symbol.iterator]; Object.defineProperty(Headers.prototype, Symbol.toStringTag, { value: 'Headers', writable: false, enumerable: false, configurable: true }); Object.defineProperties(Headers.prototype, { get: { enumerable: true }, forEach: { enumerable: true }, set: { enumerable: true }, append: { enumerable: true }, has: { enumerable: true }, delete: { enumerable: true }, keys: { enumerable: true }, values: { enumerable: true }, entries: { enumerable: true } }); function getHeaders(headers) { let kind = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : 'key+value'; const keys = Object.keys(headers[MAP]).sort(); return keys.map(kind === 'key' ? function (k) { return k.toLowerCase(); } : kind === 'value' ? function (k) { return headers[MAP][k].join(', '); } : function (k) { return [k.toLowerCase(), headers[MAP][k].join(', ')]; }); } const INTERNAL = Symbol('internal'); function createHeadersIterator(target, kind) { const iterator = Object.create(HeadersIteratorPrototype); iterator[INTERNAL] = { target, kind, index: 0 }; return iterator; } const HeadersIteratorPrototype = Object.setPrototypeOf({ next() { // istanbul ignore if if (!this || Object.getPrototypeOf(this) !== HeadersIteratorPrototype) { throw new TypeError('Value of `this` is not a HeadersIterator'); } var _INTERNAL = this[INTERNAL]; const target = _INTERNAL.target, kind = _INTERNAL.kind, index = _INTERNAL.index; const values = getHeaders(target, kind); const len = values.length; if (index >= len) { return { value: undefined, done: true }; } this[INTERNAL].index = index + 1; return { value: values[index], done: false }; } }, Object.getPrototypeOf(Object.getPrototypeOf([][Symbol.iterator]()))); Object.defineProperty(HeadersIteratorPrototype, Symbol.toStringTag, { value: 'HeadersIterator', writable: false, enumerable: false, configurable: true }); /** * Export the Headers object in a form that Node.js can consume. * * @param Headers headers * @return Object */ function exportNodeCompatibleHeaders(headers) { const obj = Object.assign({ __proto__: null }, headers[MAP]); // http.request() only supports string as Host header. This hack makes // specifying custom Host header possible. const hostHeaderKey = find(headers[MAP], 'Host'); if (hostHeaderKey !== undefined) { obj[hostHeaderKey] = obj[hostHeaderKey][0]; } return obj; } /** * Create a Headers object from an object of headers, ignoring those that do * not conform to HTTP grammar productions. * * @param Object obj Object of headers * @return Headers */ function createHeadersLenient(obj) { const headers = new Headers(); for (const name of Object.keys(obj)) { if (invalidTokenRegex.test(name)) { continue; } if (Array.isArray(obj[name])) { for (const val of obj[name]) { if (invalidHeaderCharRegex.test(val)) { continue; } if (headers[MAP][name] === undefined) { headers[MAP][name] = [val]; } else { headers[MAP][name].push(val); } } } else if (!invalidHeaderCharRegex.test(obj[name])) { headers[MAP][name] = [obj[name]]; } } return headers; } const INTERNALS$1 = Symbol('Response internals'); // fix an issue where "STATUS_CODES" aren't a named export for node <10 const STATUS_CODES = http.STATUS_CODES; /** * Response class * * @param Stream body Readable stream * @param Object opts Response options * @return Void */ class Response { constructor() { let body = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : null; let opts = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : {}; Body.call(this, body, opts); const status = opts.status || 200; const headers = new Headers(opts.headers); if (body != null && !headers.has('Content-Type')) { const contentType = extractContentType(body); if (contentType) { headers.append('Content-Type', contentType); } } this[INTERNALS$1] = { url: opts.url, status, statusText: opts.statusText || STATUS_CODES[status], headers, counter: opts.counter }; } get url() { return this[INTERNALS$1].url || ''; } get status() { return this[INTERNALS$1].status; } /** * Convenience property representing if the request ended normally */ get ok() { return this[INTERNALS$1].status >= 200 && this[INTERNALS$1].status < 300; } get redirected() { return this[INTERNALS$1].counter > 0; } get statusText() { return this[INTERNALS$1].statusText; } get headers() { return this[INTERNALS$1].headers; } /** * Clone this response * * @return Response */ clone() { return new Response(clone(this), { url: this.url, status: this.status, statusText: this.statusText, headers: this.headers, ok: this.ok, redirected: this.redirected }); } } Body.mixIn(Response.prototype); Object.defineProperties(Response.prototype, { url: { enumerable: true }, status: { enumerable: true }, ok: { enumerable: true }, redirected: { enumerable: true }, statusText: { enumerable: true }, headers: { enumerable: true }, clone: { enumerable: true } }); Object.defineProperty(Response.prototype, Symbol.toStringTag, { value: 'Response', writable: false, enumerable: false, configurable: true }); const INTERNALS$2 = Symbol('Request internals'); const URL = Url.URL || whatwgUrl.URL; // fix an issue where "format", "parse" aren't a named export for node <10 const parse_url = Url.parse; const format_url = Url.format; /** * Wrapper around `new URL` to handle arbitrary URLs * * @param {string} urlStr * @return {void} */ function parseURL(urlStr) { /* Check whether the URL is absolute or not Scheme: https://tools.ietf.org/html/rfc3986#section-3.1 Absolute URL: https://tools.ietf.org/html/rfc3986#section-4.3 */ if (/^[a-zA-Z][a-zA-Z\d+\-.]*:/.exec(urlStr)) { urlStr = new URL(urlStr).toString(); } // Fallback to old implementation for arbitrary URLs return parse_url(urlStr); } const streamDestructionSupported = 'destroy' in Stream.Readable.prototype; /** * Check if a value is an instance of Request. * * @param Mixed input * @return Boolean */ function isRequest(input) { return typeof input === 'object' && typeof input[INTERNALS$2] === 'object'; } function isAbortSignal(signal) { const proto = signal && typeof signal === 'object' && Object.getPrototypeOf(signal); return !!(proto && proto.constructor.name === 'AbortSignal'); } /** * Request class * * @param Mixed input Url or Request instance * @param Object init Custom options * @return Void */ class Request { constructor(input) { let init = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : {}; let parsedURL; // normalize input if (!isRequest(input)) { if (input && input.href) { // in order to support Node.js' Url objects; though WHATWG's URL objects // will fall into this branch also (since their `toString()` will return // `href` property anyway) parsedURL = parseURL(input.href); } else { // coerce input to a string before attempting to parse parsedURL = parseURL(`${input}`); } input = {}; } else { parsedURL = parseURL(input.url); } let method = init.method || input.method || 'GET'; method = method.toUpperCase(); if ((init.body != null || isRequest(input) && input.body !== null) && (method === 'GET' || method === 'HEAD')) { throw new TypeError('Request with GET/HEAD method cannot have body'); } let inputBody = init.body != null ? init.body : isRequest(input) && input.body !== null ? clone(input) : null; Body.call(this, inputBody, { timeout: init.timeout || input.timeout || 0, size: init.size || input.size || 0 }); const headers = new Headers(init.headers || input.headers || {}); if (inputBody != null && !headers.has('Content-Type')) { const contentType = extractContentType(inputBody); if (contentType) { headers.append('Content-Type', contentType); } } let signal = isRequest(input) ? input.signal : null; if ('signal' in init) signal = init.signal; if (signal != null && !isAbortSignal(signal)) { throw new TypeError('Expected signal to be an instanceof AbortSignal'); } this[INTERNALS$2] = { method, redirect: init.redirect || input.redirect || 'follow', headers, parsedURL, signal }; // node-fetch-only options this.follow = init.follow !== undefined ? init.follow : input.follow !== undefined ? input.follow : 20; this.compress = init.compress !== undefined ? init.compress : input.compress !== undefined ? input.compress : true; this.counter = init.counter || input.counter || 0; this.agent = init.agent || input.agent; } get method() { return this[INTERNALS$2].method; } get url() { return format_url(this[INTERNALS$2].parsedURL); } get headers() { return this[INTERNALS$2].headers; } get redirect() { return this[INTERNALS$2].redirect; } get signal() { return this[INTERNALS$2].signal; } /** * Clone this request * * @return Request */ clone() { return new Request(this); } } Body.mixIn(Request.prototype); Object.defineProperty(Request.prototype, Symbol.toStringTag, { value: 'Request', writable: false, enumerable: false, configurable: true }); Object.defineProperties(Request.prototype, { method: { enumerable: true }, url: { enumerable: true }, headers: { enumerable: true }, redirect: { enumerable: true }, clone: { enumerable: true }, signal: { enumerable: true } }); /** * Convert a Request to Node.js http request options. * * @param Request A Request instance * @return Object The options object to be passed to http.request */ function getNodeRequestOptions(request) { const parsedURL = request[INTERNALS$2].parsedURL; const headers = new Headers(request[INTERNALS$2].headers); // fetch step 1.3 if (!headers.has('Accept')) { headers.set('Accept', '*/*'); } // Basic fetch if (!parsedURL.protocol || !parsedURL.hostname) { throw new TypeError('Only absolute URLs are supported'); } if (!/^https?:$/.test(parsedURL.protocol)) { throw new TypeError('Only HTTP(S) protocols are supported'); } if (request.signal && request.body instanceof Stream.Readable && !streamDestructionSupported) { throw new Error('Cancellation of streamed requests with AbortSignal is not supported in node < 8'); } // HTTP-network-or-cache fetch steps 2.4-2.7 let contentLengthValue = null; if (request.body == null && /^(POST|PUT)$/i.test(request.method)) { contentLengthValue = '0'; } if (request.body != null) { const totalBytes = getTotalBytes(request); if (typeof totalBytes === 'number') { contentLengthValue = String(totalBytes); } } if (contentLengthValue) { headers.set('Content-Length', contentLengthValue); } // HTTP-network-or-cache fetch step 2.11 if (!headers.has('User-Agent')) { headers.set('User-Agent', 'node-fetch/1.0 (+https://github.com/bitinn/node-fetch)'); } // HTTP-network-or-cache fetch step 2.15 if (request.compress && !headers.has('Accept-Encoding')) { headers.set('Accept-Encoding', 'gzip,deflate'); } let agent = request.agent; if (typeof agent === 'function') { agent = agent(parsedURL); } if (!headers.has('Connection') && !agent) { headers.set('Connection', 'close'); } // HTTP-network fetch step 4.2 // chunked encoding is handled by Node.js return Object.assign({}, parsedURL, { method: request.method, headers: exportNodeCompatibleHeaders(headers), agent }); } /** * abort-error.js * * AbortError interface for cancelled requests */ /** * Create AbortError instance * * @param String message Error message for human * @return AbortError */ function AbortError(message) { Error.call(this, message); this.type = 'aborted'; this.message = message; // hide custom error implementation details from end-users Error.captureStackTrace(this, this.constructor); } AbortError.prototype = Object.create(Error.prototype); AbortError.prototype.constructor = AbortError; AbortError.prototype.name = 'AbortError'; const URL$1 = Url.URL || whatwgUrl.URL; // fix an issue where "PassThrough", "resolve" aren't a named export for node <10 const PassThrough$1 = Stream.PassThrough; const isDomainOrSubdomain = function isDomainOrSubdomain(destination, original) { const orig = new URL$1(original).hostname; const dest = new URL$1(destination).hostname; return orig === dest || orig[orig.length - dest.length - 1] === '.' && orig.endsWith(dest); }; /** * isSameProtocol reports whether the two provided URLs use the same protocol. * * Both domains must already be in canonical form. * @param {string|URL} original * @param {string|URL} destination */ const isSameProtocol = function isSameProtocol(destination, original) { const orig = new URL$1(original).protocol; const dest = new URL$1(destination).protocol; return orig === dest; }; /** * Fetch function * * @param Mixed url Absolute url or Request instance * @param Object opts Fetch options * @return Promise */ function fetch(url, opts) { // allow custom promise if (!fetch.Promise) { throw new Error('native promise missing, set fetch.Promise to your favorite alternative'); } Body.Promise = fetch.Promise; // wrap http.request into fetch return new fetch.Promise(function (resolve, reject) { // build request object const request = new Request(url, opts); const options = getNodeRequestOptions(request); const send = (options.protocol === 'https:' ? https : http).request; const signal = request.signal; let response = null; const abort = function abort() { let error = new AbortError('The user aborted a request.'); reject(error); if (request.body && request.body instanceof Stream.Readable) { destroyStream(request.body, error); } if (!response || !response.body) return; response.body.emit('error', error); }; if (signal && signal.aborted) { abort(); return; } const abortAndFinalize = function abortAndFinalize() { abort(); finalize(); }; // send request const req = send(options); let reqTimeout; if (signal) { signal.addEventListener('abort', abortAndFinalize); } function finalize() { req.abort(); if (signal) signal.removeEventListener('abort', abortAndFinalize); clearTimeout(reqTimeout); } if (request.timeout) { req.once('socket', function (socket) { reqTimeout = setTimeout(function () { reject(new FetchError(`network timeout at: ${request.url}`, 'request-timeout')); finalize(); }, request.timeout); }); } req.on('error', function (err) { reject(new FetchError(`request to ${request.url} failed, reason: ${err.message}`, 'system', err)); if (response && response.body) { destroyStream(response.body, err); } finalize(); }); fixResponseChunkedTransferBadEnding(req, function (err) { if (signal && signal.aborted) { return; } if (response && response.body) { destroyStream(response.body, err); } }); /* c8 ignore next 18 */ if (parseInt(process.version.substring(1)) < 14) { // Before Node.js 14, pipeline() does not fully support async iterators and does not always // properly handle when the socket close/end events are out of order. req.on('socket', function (s) { s.addListener('close', function (hadError) { // if a data listener is still present we didn't end cleanly const hasDataListener = s.listenerCount('data') > 0; // if end happened before close but the socket didn't emit an error, do it now if (response && hasDataListener && !hadError && !(signal && signal.aborted)) { const err = new Error('Premature close'); err.code = 'ERR_STREAM_PREMATURE_CLOSE'; response.body.emit('error', err); } }); }); } req.on('response', function (res) { clearTimeout(reqTimeout); const headers = createHeadersLenient(res.headers); // HTTP fetch step 5 if (fetch.isRedirect(res.statusCode)) { // HTTP fetch step 5.2 const location = headers.get('Location'); // HTTP fetch step 5.3 let locationURL = null; try { locationURL = location === null ? null : new URL$1(location, request.url).toString(); } catch (err) { // error here can only be invalid URL in Location: header // do not throw when options.redirect == manual // let the user extract the errorneous redirect URL if (request.redirect !== 'manual') { reject(new FetchError(`uri requested responds with an invalid redirect URL: ${location}`, 'invalid-redirect')); finalize(); return; } } // HTTP fetch step 5.5 switch (request.redirect) { case 'error': reject(new FetchError(`uri requested responds with a redirect, redirect mode is set to error: ${request.url}`, 'no-redirect')); finalize(); return; case 'manual': // node-fetch-specific step: make manual redirect a bit easier to use by setting the Location header value to the resolved URL. if (locationURL !== null) { // handle corrupted header try { headers.set('Location', locationURL); } catch (err) { // istanbul ignore next: nodejs server prevent invalid response headers, we can't test this through normal request reject(err); } } break; case 'follow': // HTTP-redirect fetch step 2 if (locationURL === null) { break; } // HTTP-redirect fetch step 5 if (request.counter >= request.follow) { reject(new FetchError(`maximum redirect reached at: ${request.url}`, 'max-redirect')); finalize(); return; } // HTTP-redirect fetch step 6 (counter increment) // Create a new Request object. const requestOpts = { headers: new Headers(request.headers), follow: request.follow, counter: request.counter + 1, agent: request.agent, compress: request.compress, method: request.method, body: request.body, signal: request.signal, timeout: request.timeout, size: request.size }; if (!isDomainOrSubdomain(request.url, locationURL) || !isSameProtocol(request.url, locationURL)) { for (const name of ['authorization', 'www-authenticate', 'cookie', 'cookie2']) { requestOpts.headers.delete(name); } } // HTTP-redirect fetch step 9 if (res.statusCode !== 303 && request.body && getTotalBytes(request) === null) { reject(new FetchError('Cannot follow redirect with body being a readable stream', 'unsupported-redirect')); finalize(); return; } // HTTP-redirect fetch step 11 if (res.statusCode === 303 || (res.statusCode === 301 || res.statusCode === 302) && request.method === 'POST') { requestOpts.method = 'GET'; requestOpts.body = undefined; requestOpts.headers.delete('content-length'); } // HTTP-redirect fetch step 15 resolve(fetch(new Request(locationURL, requestOpts))); finalize(); return; } } // prepare response res.once('end', function () { if (signal) signal.removeEventListener('abort', abortAndFinalize); }); let body = res.pipe(new PassThrough$1()); const response_options = { url: request.url, status: res.statusCode, statusText: res.statusMessage, headers: headers, size: request.size, timeout: request.timeout, counter: request.counter }; // HTTP-network fetch step 12.1.1.3 const codings = headers.get('Content-Encoding'); // HTTP-network fetch step 12.1.1.4: handle content codings // in following scenarios we ignore compression support // 1. compression support is disabled // 2. HEAD request // 3. no Content-Encoding header // 4. no content response (204) // 5. content not modified response (304) if (!request.compress || request.method === 'HEAD' || codings === null || res.statusCode === 204 || res.statusCode === 304) { response = new Response(body, response_options); resolve(response); return; } // For Node v6+ // Be less strict when decoding compressed responses, since sometimes // servers send slightly invalid responses that are still accepted // by common browsers. // Always using Z_SYNC_FLUSH is what cURL does. const zlibOptions = { flush: zlib.Z_SYNC_FLUSH, finishFlush: zlib.Z_SYNC_FLUSH }; // for gzip if (codings == 'gzip' || codings == 'x-gzip') { body = body.pipe(zlib.createGunzip(zlibOptions)); response = new Response(body, response_options); resolve(response); return; } // for deflate if (codings == 'deflate' || codings == 'x-deflate') { // handle the infamous raw deflate response from old servers // a hack for old IIS and Apache servers const raw = res.pipe(new PassThrough$1()); raw.once('data', function (chunk) { // see http://stackoverflow.com/questions/37519828 if ((chunk[0] & 0x0F) === 0x08) { body = body.pipe(zlib.createInflate()); } else { body = body.pipe(zlib.createInflateRaw()); } response = new Response(body, response_options); resolve(response); }); raw.on('end', function () { // some old IIS servers return zero-length OK deflate responses, so 'data' is never emitted. if (!response) { response = new Response(body, response_options); resolve(response); } }); return; } // for br if (codings == 'br' && typeof zlib.createBrotliDecompress === 'function') { body = body.pipe(zlib.createBrotliDecompress()); response = new Response(body, response_options); resolve(response); return; } // otherwise, use response as-is response = new Response(body, response_options); resolve(response); }); writeToStream(req, request); }); } function fixResponseChunkedTransferBadEnding(request, errorCallback) { let socket; request.on('socket', function (s) { socket = s; }); request.on('response', function (response) { const headers = response.headers; if (headers['transfer-encoding'] === 'chunked' && !headers['content-length']) { response.once('close', function (hadError) { // tests for socket presence, as in some situations the // the 'socket' event is not triggered for the request // (happens in deno), avoids `TypeError` // if a data listener is still present we didn't end cleanly const hasDataListener = socket && socket.listenerCount('data') > 0; if (hasDataListener && !hadError) { const err = new Error('Premature close'); err.code = 'ERR_STREAM_PREMATURE_CLOSE'; errorCallback(err); } }); } }); } function destroyStream(stream, err) { if (stream.destroy) { stream.destroy(err); } else { // node < 8 stream.emit('error', err); stream.end(); } } /** * Redirect code matching * * @param Number code Status code * @return Boolean */ fetch.isRedirect = function (code) { return code === 301 || code === 302 || code === 303 || code === 307 || code === 308; }; // expose Promise fetch.Promise = global.Promise; module.exports = exports = fetch; Object.defineProperty(exports, "__esModule", { value: true }); exports.default = exports; exports.Headers = Headers; exports.Request = Request; exports.Response = Response; exports.FetchError = FetchError;
PypiClean
/Assemyaml-0.3.0.tar.gz/Assemyaml-0.3.0/assemyaml/types.py
from __future__ import absolute_import, print_function from logging import getLogger from six.moves import range from yaml.nodes import MappingNode, ScalarNode, SequenceNode log = getLogger("assemyaml.types") # Assemyaml-specific tags ASSEMYAML_NS = u"tag:assemyaml.nz,2017:" GLOBAL_ASSEMBLY_TAG = ASSEMYAML_NS + u"Assembly" GLOBAL_TRANSCLUDE_TAG = ASSEMYAML_NS + u"Transclude" LOCAL_ASSEMBLY_TAG = u"!Assembly" LOCAL_TRANSCLUDE_TAG = u"!Transclude" # YAML native types YAML_NS = u"tag:yaml.org,2002:" YAML_BINARY_TAG = YAML_NS + u"binary" YAML_BOOL_TAG = YAML_NS + u"bool" YAML_FLOAT_TAG = YAML_NS + u"float" YAML_INT_TAG = YAML_NS + u"int" YAML_NULL_TAG = YAML_NS + u"null" YAML_MAP_TAG = YAML_NS + u"map" YAML_OMAP_TAG = YAML_NS + u"omap" YAML_PAIRS_TAG = YAML_NS + u"pairs" YAML_SEQ_TAG = YAML_NS + u"seq" YAML_SET_TAG = YAML_NS + u"set" YAML_STR_TAG = YAML_NS + u"str" YAML_TIMESTAMP_TAG = YAML_NS + u"timestamp" # Because Python3 removed this from types <sigh> NoneType = type(None) # tag-to-function mapping for comparing nodes comparison_functions = {} def comparison_function(*tags): def add_function(f): for tag in tags: comparison_functions[tag] = f return f return add_function def nodes_equal(a, b): """ nodes_equal(a, b) -> bool Indicates whether two nodes are equal (examining both tags and values). """ global comparison_functions if a.tag != b.tag: return False try: return comparison_functions[a.tag](a, b) except KeyError: log.info("No comparison function found for %s", a.tag) if type(a) is not type(b): return False if isinstance(a, ScalarNode): return scalar_compare(a, b) elif isinstance(a, SequenceNode): return seq_compare(a, b) elif isinstance(a, MappingNode): return map_compare(a, b) return False @comparison_function(YAML_BINARY_TAG, YAML_BOOL_TAG, YAML_FLOAT_TAG, YAML_INT_TAG, YAML_STR_TAG, YAML_TIMESTAMP_TAG) def scalar_compare(a, b): return a.value == b.value @comparison_function(YAML_NULL_TAG) def null_compare(a, b): return True @comparison_function(YAML_OMAP_TAG, YAML_PAIRS_TAG, YAML_SEQ_TAG) def seq_compare(a, b): if len(a.value) != len(b.value): return False for a_el, b_el in zip(a.value, b.value): return nodes_equal(a_el, b_el) @comparison_function(YAML_SET_TAG) def set_compare(a, b): # We need to do an unordered comparison. Since we can't put this into a # Python datastructure, the comparison is O(n^2). if len(a.value) != len(b.value): return False a_values = [key for key, _ in a.value] b_values = [key for key, _ in b.value] for a_el in a_values: # Look for this value anywhere in the b_values for i in range(len(b_values)): b_el = b_values[i] if nodes_equal(a_el, b_el): # Found a match. Mark it as seen from b_values by deleting it. del b_values[i] break else: # Not found. We're done. return False assert len(b_values) == 0 return True @comparison_function(YAML_MAP_TAG) def map_compare(a, b): # This is similar to set_compare, except the values are 2-tuples in the # form (key, value). if len(a.value) != len(b.value): return False b_values = list(b.value) for a_key, a_value in a.value: # Look for this key anywhere in the b_values for i in range(len(b_values)): b_key, b_value = b_values[i] if nodes_equal(a_key, b_key): if not nodes_equal(a_value, b_value): return False # Found a match. Mark it as seen from b_values by deleting it. del b_values[i] break else: # Not found. We're done. return False assert len(b_values) == 0 return True def mapping_find(mapping, node): for i, kv in enumerate(mapping.value): if nodes_equal(kv[0], node): return (i, kv[0], kv[1]) return None
PypiClean
/MUGAlyser-1.0.6a0.tar.gz/MUGAlyser-1.0.6a0/mugalyser/audit2.py
from datetime import datetime from version import __version__, __schemaVersion__ from apikey import get_meetup_key class AuditSingleton( object ): class __Audit_Impl( object ): name="audit" def __init__(self, mdb ): self._mdb = mdb self._auditCollection = mdb.auditCollection() self._currentBatch = self._auditCollection.find_one( { "name" : "Current Batch"}) if self._currentBatch is None: self._currentBatch = {} self._currentBatch[ "name"] = "Current Batch" self._currentBatch[ "currentID" ] = 0 self._currentBatch[ "batchID" ] = 0 self._currentBatch[ 'valid'] = False self._currentBatch[ "schemaVersion" ] = __version__ self._auditCollection.insert_one( self._currentBatch ) else: # Migrate schema from version 0.7 to 0.8 if self._currentBatch.has_key( "ID" ): self._auditCollection.update( { "_id" : self._currentBatch[ "_id"]}, { "$rename" : { "ID" : "currentID" }}) curid = self._currentBatch[ "ID"] del self._currentBatch[ "ID"] self._currentBatch[ "currentID" ]= curid if not "batchID" in self._currentBatch : self._auditCollection.update( { "_id" : self._currentBatch[ "_id"]}, { "$set" : { "batchID" : 0 }}) self._currentBatch[ "batchID" ] = 0 self._auditCollection.update( { "_id" : self._currentBatch[ "_id"]}, { "$set" : { "schemaVersion" : __schemaVersion__ }}) self._currentBatchID = None def collection(self): return self._auditCollection def mdb( self ): return self._mdb def inBatch(self): return self._currentBatchID def insertTimestamp( self, doc, ts=None ): if ts : doc[ "timestamp" ] = ts else: doc[ "timestamp" ] = datetime.utcnow() return doc def addTimestamp( self, name, doc, ts=None ): tsDoc = { name : doc, "timestamp" : None, "batchID": self.getCurrentBatchID()} return self.insertTimestamp( tsDoc, ts ) def addInfoTimestamp(self, doc, ts=None ): return self.addTimestamp( "info", doc, ts ) def addMemberTimestamp(self, doc, ts=None ): return self.addTimestamp( "member", doc, ts ) def addEventTimestamp(self, doc, ts=None ): return self.addTimestamp( "event", doc, ts ) def addGroupTimestamp(self, doc, ts=None ): return self.addTimestamp( "group", doc, ts ) def addAttendeeTimestamp(self, doc, ts=None ): return self.addTimestamp( "attendee", doc, ts ) def getBatchIDs(self): cursor = self._auditCollection.find( { "batchID" : { "$exists" : 1 }}, { "_id" : 0, "batchID" : 1}) for i in cursor: if i[ "batchID"] == 0 : continue yield i[ 'batchID' ] def incrementBatchID(self): # # We can have multiple batches running in parallel as long as each has a unique # batch ID. Find And Modify ensures that each batch ID is unique. curBatch = self._auditCollection.find_and_modify( { "name" : "Current Batch" }, update= { "$inc" : { "currentID" : 1 }, "$set" : { "timestamp" : datetime.now() }}, new = True ) return curBatch[ "currentID" ] # self._currentBatch[ "currentID" ] = self.getCurrentBatchID() + 1 # self._currentBatch[ "timestamp" ] = datetime.now() # self._auditCollection.update( { "name" : "Current Batch" }, # { "$set" : { "currentID" : self._currentBatch[ "currentID"], # "timestamp" : self._currentBatch[ "timestamp" ] }} ) def startBatch(self, doc, name=None, trial=False, apikey = get_meetup_key()): thisBatchID = self.incrementBatchID() if name is None : name = "Standard Batch: %i" % thisBatchID self._auditCollection.insert_one( { "batchID" : thisBatchID, "start" : datetime.now(), "trial" : trial, "end" : None, "name" : name, "apikey" : apikey, "info" : doc }) self._currentBatchID = thisBatchID return thisBatchID def getBatch(self, batchID ): return self._auditCollection.find_one( { "batchID" : batchID }) def incomplete(self, batchID ): return self.getBatch( batchID )[ "end" ] == None def endBatch(self, batchID ): self._auditCollection.update( { "batchID" : batchID }, { "$set" : { "end" : datetime.now(), "valid" : True }}) self._currentBatchID = None def auditCollection(self): return self._auditCollection def getLastBatchID(self): curBatch = self._auditCollection.find_one( { "name" : 'Current Batch'} ) if curBatch[ "currentID" ] < 2 : raise ValueError( "No valid last batch") else: return curBatch[ "currentID"] - 1 def getCurrentValidBatchID( self ): raise ValueError( "not implemented") def getCurrentBatchID(self ): if self._currentBatchID : return self._currentBatchID else: curBatch = self._auditCollection.find_one( { "name" : 'Current Batch'} ) if curBatch[ "currentID" ] == 0 : raise ValueError( "No batches in database" ) else: return curBatch[ "currentID" ] __instance = None def __init__(self, mdb ): """ Create singleton instance """ # Check whether we already have an instance if Audit.__instance is None: # Create and remember instance Audit.__instance = Audit.__Audit_Impl( mdb ) # Store instance reference as the only member in the handle self.__dict__['_Singleton__instance'] = Audit.__instance def __getattr__(self, attr): """ Delegate access to implementation """ return getattr(self.__instance, attr) def __setattr__(self, attr, value): """ Delegate access to implementation """ return setattr(self.__instance, attr, value)
PypiClean
/ImSwitchUC2-2.1.0.tar.gz/ImSwitchUC2-2.1.0/imswitch/imcontrol/controller/controllers/STORMReconController.py
import numpy as np import time import tifffile as tif import os from datetime import datetime from imswitch.imcommon.framework import Signal, Thread, Worker, Mutex from imswitch.imcontrol.view import guitools from imswitch.imcommon.model import initLogger, dirtools from ..basecontrollers import LiveUpdatedController from imswitch.imcommon.model import APIExport import numpy as np try: import microEye isMicroEye = True from microEye.Filters import BandpassFilter from microEye.fitting.fit import CV_BlobDetector from microEye.fitting.results import FittingMethod from microEye.fitting.fit import localize_frame except: isMicroEye = False class STORMReconController(LiveUpdatedController): """ Linked to STORMReconWidget.""" sigImageReceived = Signal() def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.updateRate = 0 self.it = 0 self.showPos = False self.threshold = 0.2 # reconstruction related settings #TODO: Make parameters adaptable from Plugin # Prepare image computation worker self.imageComputationWorker = self.STORMReconImageComputationWorker() self.imageComputationWorker.sigSTORMReconImageComputed.connect(self.displayImage) # get the detector allDetectorNames = self._master.detectorsManager.getAllDeviceNames() self.detector = self._master.detectorsManager[allDetectorNames[0]] if isMicroEye: self.imageComputationThread = Thread() self.imageComputationWorker.moveToThread(self.imageComputationThread) self.sigImageReceived.connect(self.imageComputationWorker.computeSTORMReconImage) self.imageComputationThread.start() # Connect CommunicationChannel signals self._commChannel.sigUpdateImage.connect(self.update) # Connect STORMReconWidget signals self._widget.sigShowToggled.connect(self.setShowSTORMRecon) self._widget.sigUpdateRateChanged.connect(self.changeRate) self._widget.sigSliderValueChanged.connect(self.valueChanged) self.changeRate(self.updateRate) self.setShowSTORMRecon(False) # setup reconstructor self.peakDetector = CV_BlobDetector() self.preFilter = BandpassFilter() self.imageComputationWorker.setDetector(self.peakDetector) self.imageComputationWorker.setFilter(self.preFilter) def valueChanged(self, magnitude): """ Change magnitude. """ self.dz = magnitude*1e-3 self.imageComputationWorker.set_dz(self.dz) def __del__(self): self.imageComputationThread.quit() self.imageComputationThread.wait() if hasattr(super(), '__del__'): super().__del__() def setShowSTORMRecon(self, enabled): """ Show or hide STORMRecon. """ # read parameters from GUI for reconstruction the data on the fly # Filters + Blob detector params filter = self._widget.image_filter.currentData().filter tempEnabled = self._widget.tempMedianFilter.enabled.isChecked() detector = self._widget.detection_method.currentData().detector threshold = self._widget.th_min_slider.value() fit_roi_size = self._widget.fit_roi_size.value() fitting_method = self._widget.fitting_cbox.currentData() # write parameters to worker self.imageComputationWorker.setFilter(filter) self.imageComputationWorker.setTempEnabled(tempEnabled) self.imageComputationWorker.setDetector(detector) self.imageComputationWorker.setThreshold(threshold) self.imageComputationWorker.setFitRoiSize(fit_roi_size) self.imageComputationWorker.setFittingMethod(fitting_method) self.active = enabled # if it will be deactivated, trigger an image-save operation if not self.active: self.imageComputationWorker.saveImage() else: # this will activate/deactivate the live reconstruction self.imageComputationWorker.setActive(enabled) def update(self, detectorName, im, init, isCurrentDetector): """ Update with new detector frame. """ if not isCurrentDetector or not self.active: return if self.it == self.updateRate: self.it = 0 self.imageComputationWorker.prepareForNewImage(im) self.sigImageReceived.emit() else: self.it += 1 def displayImage(self, im): """ Displays the image in the view. """ self._widget.setImage(im) def changeRate(self, updateRate): """ Change update rate. """ self.updateRate = updateRate self.it = 0 @APIExport() def triggerSTORMReconstruction(self, frame=None): """ Trigger reconstruction. """ if frame is None: frame = self.detector.getLatestFrame() self.imageComputationWorker.reconSTORMFrame(frame=frame) class STORMReconImageComputationWorker(Worker): sigSTORMReconImageComputed = Signal(np.ndarray) def __init__(self): super().__init__() self.threshold = 0.2 # default threshold self.fit_roi_size = 13 # default roi size self._logger = initLogger(self, tryInheritParent=False) self._numQueuedImages = 0 self._numQueuedImagesMutex = Mutex() # store the sum of all reconstructed frames self.sumReconstruction = None self.allParameters = [] self.active = False def reconSTORMFrame(self, frame, preFilter=None, peakDetector=None, rel_threshold=0.4, PSFparam=np.array([1.5]), roiSize=13, method=None): # tune parameters if method is None: # avoid error when microeye is not installed.. method = FittingMethod._2D_Phasor_CPU if preFilter is None: preFilter = self.preFilter if peakDetector is None: peakDetector = self.peakDetector # parameters are read only once the SMLM reconstruction is initiated # cannot be altered during recroding index = 1 filtered = frame.copy() # nip.gaussf(frame, 1.5) varim = None # localize frame # params = > x,y,background, max(0, intensity), magnitudeX / magnitudeY frames, params, crlbs, loglike = localize_frame( index, frame, filtered, varim, preFilter, peakDetector, rel_threshold, PSFparam, roiSize, method) # create a simple render frameLocalized = np.zeros(frame.shape) try: allX = np.int32(params[:,0]) allY = np.int32(params[:,1]) frameLocalized[(allY, allX)] = 1 except Exception as e: pass return frameLocalized, params def setThreshold(self, threshold): self.threshold = threshold def setFitRoiSize(self, roiSize): self.fit_roi_size = roiSize def computeSTORMReconImage(self): """ Compute STORMRecon of an image. """ try: if self._numQueuedImages > 1 or not self.active: return # Skip this frame in order to catch up STORMReconrecon, params = self.reconSTORMFrame(frame=self._image, preFilter=self.preFilter, peakDetector=self.peakDetector, rel_threshold=self.threshold, roiSize=self.fit_roi_size) self.allParameters.append(params) if self.sumReconstruction is None: self.sumReconstruction = STORMReconrecon else: self.sumReconstruction += STORMReconrecon self.sigSTORMReconImageComputed.emit(np.array(self.sumReconstruction)) finally: self._numQueuedImagesMutex.lock() self._numQueuedImages -= 1 self._numQueuedImagesMutex.unlock() def prepareForNewImage(self, image): """ Must always be called before the worker receives a new image. """ self._image = image self._numQueuedImagesMutex.lock() self._numQueuedImages += 1 self._numQueuedImagesMutex.unlock() def setFittingMethod(self, method): self.fittingMethod = method def setFilter(self, filter): self.preFilter = filter def setTempEnabled(self, tempEnabled): self.tempEnabled = tempEnabled def setDetector(self, detector): self.peakDetector = detector def saveImage(self, filename="STORMRecon", fileExtension="tif"): if self.sumReconstruction is None: return # wait to finish all queued images while self._numQueuedImages > 0: time.sleep(0.1) Ntime = datetime.now().strftime("%Y_%m_%d-%I-%M-%S_%p") filePath = self.getSaveFilePath(date=Ntime, filename=filename, extension=fileExtension) # self.switchOffIllumination() self._logger.debug(filePath) tif.imwrite(filePath, self.sumReconstruction, append=False) # Reset sumReconstruction self.sumReconstruction *= 0 self.allParameters = [] def getSaveFilePath(self, date, filename, extension): mFilename = f"{date}_{filename}.{extension}" dirPath = os.path.join(dirtools.UserFileDirs.Root, 'recordings', date) newPath = os.path.join(dirPath,mFilename) if not os.path.exists(dirPath): os.makedirs(dirPath) return newPath def setActive(self, enabled): self.active = enabled # Copyright (C) 2020-2021 ImSwitch developers # This file is part of ImSwitch. # # ImSwitch is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version.
PypiClean
/AutoTransform-1.1.1a8-py3-none-any.whl/autotransform/filter/script.py
# @black_format """The implementation for script based Filters.""" from __future__ import annotations import json import subprocess from tempfile import NamedTemporaryFile as TmpFile from typing import ClassVar, List, Optional, Sequence, Set from autotransform.event.handler import EventHandler from autotransform.event.verbose import VerboseEvent from autotransform.filter.base import BulkFilter, FilterName from autotransform.item.base import Item from autotransform.util.functions import replace_script_args class ScriptFilter(BulkFilter): """A Filter that uses a script to validate Items. Attributes: args (List[str]): The arguments to supply to the script. script (str): The script to run. timeout (int): The timeout to use for the script process. chunk_size (Optional[int], optional): The maximum number of items per run of the script. If None, then no chunking is used. Defaults to None. name (ClassVar[FilterName]): The name of the component. """ args: List[str] script: str timeout: int chunk_size: Optional[int] = None name: ClassVar[FilterName] = FilterName.SCRIPT def _get_valid_keys(self, items: Sequence[Item]) -> Set[str]: """Gets the valid keys from the Items using a script. If a <<RESULT_FILE>> arg is used it will be replaced with the path of a temporary file that can be used to store a JSON encoded list of keys for valid Items. If no such arg is used, the STDOUT of the script will be interpreted as a JSON encoded list of keys for valid Items. Additionally, the <<ITEM_FILE>> argument will be replaced with the path to a file containing a JSON encoded list of the items to validate. Args: items (Sequence[Item]): The Items to check for valid items. Returns: Set[str]: The keys of the valid Items. """ event_handler = EventHandler.get() # Get Command cmd = [self.script] cmd.extend(self.args) chunk_size = self.chunk_size or len(items) item_chunks = [items[i : i + chunk_size] for i in range(0, len(items), chunk_size)] valid_keys: Set[str] = set() for chunk in item_chunks: with TmpFile(mode="r+b") as res_file, TmpFile(mode="w+") as item_file: json.dump([item.bundle() for item in chunk], item_file) item_file.flush() arg_replacements = { "<<RESULT_FILE>>": [res_file.name], "<<ITEM_FILE>>": [item_file.name], } uses_result_file = "<<RESULT_FILE>>" in cmd replaced_cmd = replace_script_args(cmd, arg_replacements) # Run script event_handler.handle(VerboseEvent({"message": f"Running command: {replaced_cmd}"})) proc = subprocess.run( replaced_cmd, capture_output=True, encoding="utf-8", check=False, timeout=self.timeout, ) if proc.stdout.strip() != "" and uses_result_file: event_handler.handle( VerboseEvent({"message": f"STDOUT:\n{proc.stdout.strip()}"}), ) elif uses_result_file: event_handler.handle(VerboseEvent({"message": "No STDOUT"})) if proc.stderr.strip() != "": event_handler.handle( VerboseEvent({"message": f"STDERR:\n{proc.stderr.strip()}"}), ) else: event_handler.handle(VerboseEvent({"message": "No STDERR"})) proc.check_returncode() if uses_result_file: with open(res_file.name, encoding="utf-8") as results: key_data = json.loads(results.read()) else: key_data = json.loads(proc.stdout.strip()) valid_keys = valid_keys.union(set(key_data)) return valid_keys
PypiClean
/Basic_Statistical_distributions-0.4.tar.gz/Basic_Statistical_distributions-0.4/Basic_Statistical_distributions/Gaussiandistribution.py
import math import matplotlib.pyplot as plt from .Generaldistribution import Distribution class Gaussian(Distribution): """ Gaussian distribution class for calculating and visualizing a Gaussian distribution. Attributes: mean (float) representing the mean value of the distribution stdev (float) representing the standard deviation of the distribution data_list (list of floats) a list of floats extracted from the data file """ def __init__(self, mu=0, sigma=1): Distribution.__init__(self, mu, sigma) def calculate_mean(self): """Function to calculate the mean of the data set. Args: None Returns: float: mean of the data set """ avg = 1.0 * sum(self.data) / len(self.data) self.mean = avg return self.mean def calculate_stdev(self, sample=True): """Function to calculate the standard deviation of the data set. Args: sample (bool): whether the data represents a sample or population Returns: float: standard deviation of the data set """ if sample: n = len(self.data) - 1 else: n = len(self.data) mean = self.calculate_mean() sigma = 0 for d in self.data: sigma += (d - mean) ** 2 sigma = math.sqrt(sigma / n) self.stdev = sigma return self.stdev def plot_histogram(self): """Function to output a histogram of the instance variable data using matplotlib pyplot library. Args: None Returns: None """ plt.hist(self.data) plt.title('Histogram of Data') plt.xlabel('data') plt.ylabel('count') def pdf(self, x): """Probability density function calculator for the gaussian distribution. Args: x (float): point for calculating the probability density function Returns: float: probability density function output """ return (1.0 / (self.stdev * math.sqrt(2*math.pi))) * math.exp(-0.5*((x - self.mean) / self.stdev) ** 2) def plot_histogram_pdf(self, n_spaces = 50): """Function to plot the normalized histogram of the data and a plot of the probability density function along the same range Args: n_spaces (int): number of data points Returns: list: x values for the pdf plot list: y values for the pdf plot """ mu = self.mean sigma = self.stdev min_range = min(self.data) max_range = max(self.data) # calculates the interval between x values interval = 1.0 * (max_range - min_range) / n_spaces x = [] y = [] # calculate the x values to visualize for i in range(n_spaces): tmp = min_range + interval*i x.append(tmp) y.append(self.pdf(tmp)) # make the plots fig, axes = plt.subplots(2,sharex=True) fig.subplots_adjust(hspace=.5) axes[0].hist(self.data, density=True) axes[0].set_title('Normed Histogram of Data') axes[0].set_ylabel('Density') axes[1].plot(x, y) axes[1].set_title('Normal Distribution for \n Sample Mean and Sample Standard Deviation') axes[0].set_ylabel('Density') plt.show() return x, y def __add__(self, other): """Function to add together two Gaussian distributions Args: other (Gaussian): Gaussian instance Returns: Gaussian: Gaussian distribution """ result = Gaussian() result.mean = self.mean + other.mean result.stdev = math.sqrt(self.stdev ** 2 + other.stdev ** 2) return result def __repr__(self): """Function to output the characteristics of the Gaussian instance Args: None Returns: string: characteristics of the Gaussian """ return "mean {}, standard deviation {}".format(self.mean, self.stdev)
PypiClean
/LUBEAT-0.13.1-cp38-cp38-macosx_10_9_x86_64.whl/econml/solutions/causal_analysis/_causal_analysis.py
import warnings from collections import OrderedDict, namedtuple import joblib import lightgbm as lgb from numba.core.utils import erase_traceback import numpy as np from numpy.lib.function_base import iterable import pandas as pd from sklearn.base import BaseEstimator, TransformerMixin from sklearn.compose import ColumnTransformer from sklearn.ensemble import GradientBoostingClassifier, RandomForestClassifier, RandomForestRegressor from sklearn.linear_model import Lasso, LassoCV, LogisticRegression, LogisticRegressionCV from sklearn.pipeline import make_pipeline, Pipeline from sklearn.preprocessing import OneHotEncoder, PolynomialFeatures, StandardScaler from sklearn.tree import _tree from sklearn.utils.validation import column_or_1d from ...cate_interpreter import SingleTreeCateInterpreter, SingleTreePolicyInterpreter from ...dml import LinearDML, CausalForestDML from ...inference import NormalInferenceResults from ...sklearn_extensions.linear_model import WeightedLasso from ...sklearn_extensions.model_selection import GridSearchCVList from ...utilities import _RegressionWrapper, inverse_onehot # TODO: this utility is documented but internal; reimplement? from sklearn.utils import _safe_indexing # TODO: this utility is even less public... from sklearn.utils import _get_column_indices class _CausalInsightsConstants: RawFeatureNameKey = 'raw_name' EngineeredNameKey = 'name' CategoricalColumnKey = 'cat' TypeKey = 'type' PointEstimateKey = 'point' StandardErrorKey = 'stderr' ZStatKey = 'zstat' ConfidenceIntervalLowerKey = 'ci_lower' ConfidenceIntervalUpperKey = 'ci_upper' PValueKey = 'p_value' Version = 'version' CausalComputationTypeKey = 'causal_computation_type' ConfoundingIntervalKey = 'confounding_interval' ViewKey = 'view' InitArgsKey = 'init_args' RowData = 'row_data' # NOTE: RowData is mutually exclusive with the other data columns ALL = [RawFeatureNameKey, EngineeredNameKey, CategoricalColumnKey, TypeKey, PointEstimateKey, StandardErrorKey, ZStatKey, ConfidenceIntervalLowerKey, ConfidenceIntervalUpperKey, PValueKey, Version, CausalComputationTypeKey, ConfoundingIntervalKey, ViewKey, InitArgsKey, RowData] def _get_default_shared_insights_output(): """ Dictionary elements shared among all analyses. In case of breaking changes to this dictionary output, the major version of this dictionary should be updated. In case of a change to this dictionary, the minor version should be updated. """ return { _CausalInsightsConstants.RawFeatureNameKey: [], _CausalInsightsConstants.EngineeredNameKey: [], _CausalInsightsConstants.CategoricalColumnKey: [], _CausalInsightsConstants.TypeKey: [], _CausalInsightsConstants.Version: '1.0', _CausalInsightsConstants.CausalComputationTypeKey: "simple", _CausalInsightsConstants.ConfoundingIntervalKey: None, _CausalInsightsConstants.InitArgsKey: {} } def _get_default_specific_insights(view): # keys should be mutually exclusive with shared keys, so that the dictionaries can be cleanly merged return { _CausalInsightsConstants.PointEstimateKey: [], _CausalInsightsConstants.StandardErrorKey: [], _CausalInsightsConstants.ZStatKey: [], _CausalInsightsConstants.ConfidenceIntervalLowerKey: [], _CausalInsightsConstants.ConfidenceIntervalUpperKey: [], _CausalInsightsConstants.PValueKey: [], _CausalInsightsConstants.ViewKey: view } def _get_metadata_causal_insights_keys(): return [_CausalInsightsConstants.Version, _CausalInsightsConstants.CausalComputationTypeKey, _CausalInsightsConstants.ConfoundingIntervalKey, _CausalInsightsConstants.ViewKey] def _get_column_causal_insights_keys(): return [_CausalInsightsConstants.RawFeatureNameKey, _CausalInsightsConstants.EngineeredNameKey, _CausalInsightsConstants.CategoricalColumnKey, _CausalInsightsConstants.TypeKey] def _get_data_causal_insights_keys(): return [_CausalInsightsConstants.PointEstimateKey, _CausalInsightsConstants.StandardErrorKey, _CausalInsightsConstants.ZStatKey, _CausalInsightsConstants.ConfidenceIntervalLowerKey, _CausalInsightsConstants.ConfidenceIntervalUpperKey, _CausalInsightsConstants.PValueKey] def _first_stage_reg(X, y, *, automl=True, random_state=None, verbose=0): if automl: model = GridSearchCVList([LassoCV(random_state=random_state), RandomForestRegressor( n_estimators=100, random_state=random_state, min_samples_leaf=10), lgb.LGBMRegressor(num_leaves=32, random_state=random_state)], param_grid_list=[{}, {'min_weight_fraction_leaf': [.001, .01, .1]}, {'learning_rate': [0.1, 0.3], 'max_depth': [3, 5]}], cv=3, scoring='r2', verbose=verbose) best_est = model.fit(X, y).best_estimator_ if isinstance(best_est, LassoCV): return Lasso(alpha=best_est.alpha_, random_state=random_state) else: return best_est else: model = LassoCV(cv=5, random_state=random_state).fit(X, y) return Lasso(alpha=model.alpha_, random_state=random_state) def _first_stage_clf(X, y, *, make_regressor=False, automl=True, min_count=None, random_state=None, verbose=0): # use same Cs as would be used by default by LogisticRegressionCV cs = np.logspace(-4, 4, 10) if min_count is None: min_count = _CAT_LIMIT # we have at least this many instances if automl: # NOTE: we don't use LogisticRegressionCV inside the grid search because of the nested stratification # which could affect how many times each distinct Y value needs to be present in the data model = GridSearchCVList([LogisticRegression(max_iter=1000, random_state=random_state), RandomForestClassifier(n_estimators=100, min_samples_leaf=10, random_state=random_state), lgb.LGBMClassifier(num_leaves=32, random_state=random_state)], param_grid_list=[{'C': cs}, {'max_depth': [3, None], 'min_weight_fraction_leaf': [.001, .01, .1]}, {'learning_rate': [0.1, 0.3], 'max_depth': [3, 5]}], cv=min(3, min_count), scoring='neg_log_loss', verbose=verbose) est = model.fit(X, y).best_estimator_ else: model = LogisticRegressionCV( cv=min(5, min_count), max_iter=1000, Cs=cs, random_state=random_state).fit(X, y) est = LogisticRegression(C=model.C_[0], max_iter=1000, random_state=random_state) if make_regressor: return _RegressionWrapper(est) else: return est def _final_stage(*, random_state=None, verbose=0): return GridSearchCVList([WeightedLasso(random_state=random_state), RandomForestRegressor(n_estimators=100, random_state=random_state, verbose=verbose)], param_grid_list=[{'alpha': [.001, .01, .1, 1, 10]}, {'max_depth': [3, 5], 'min_samples_leaf': [10, 50]}], cv=3, scoring='neg_mean_squared_error', verbose=verbose) # simplification of sklearn's ColumnTransformer that encodes categoricals and passes through selected other columns # but also supports get_feature_names with expected signature class _ColumnTransformer(TransformerMixin): def __init__(self, categorical, passthrough): self.categorical = categorical self.passthrough = passthrough def fit(self, X): cat_cols = _safe_indexing(X, self.categorical, axis=1) if cat_cols.shape[1] > 0: self.has_cats = True # NOTE: set handle_unknown to 'ignore' so that we don't throw at runtime if given a novel value self.one_hot_encoder = OneHotEncoder(sparse=False, handle_unknown='ignore').fit(cat_cols) else: self.has_cats = False self.d_x = X.shape[1] return self def transform(self, X): rest = _safe_indexing(X, self.passthrough, axis=1) if self.has_cats: cats = self.one_hot_encoder.transform(_safe_indexing(X, self.categorical, axis=1)) # NOTE: we rely on the passthrough columns coming first in the concatenated X;W # when we pipeline scaling with our first stage models later, so the order here is important return np.hstack((rest, cats)) else: return rest def get_feature_names(self, names=None): if names is None: names = [f"x{i}" for i in range(self.d_x)] rest = _safe_indexing(names, self.passthrough, axis=0) if self.has_cats: cats = self.one_hot_encoder.get_feature_names( _safe_indexing(names, self.categorical, axis=0)) return np.concatenate((rest, cats)) else: return rest # Wrapper to make sure that we get a deep copy of the contents instead of clone returning an untrained copy class _Wrapper: def __init__(self, item): self.item = item class _FrozenTransformer(TransformerMixin, BaseEstimator): def __init__(self, wrapper): self.wrapper = wrapper def fit(self, X, y): return self def transform(self, X): return self.wrapper.item.transform(X) def _freeze(transformer): return _FrozenTransformer(_Wrapper(transformer)) # Convert python objects to (possibly nested) types that can easily be represented as literals def _sanitize(obj): if obj is None or isinstance(obj, (bool, int, str, float)): return obj elif isinstance(obj, dict): return {_sanitize(key): _sanitize(obj[key]) for key in obj} else: try: return [_sanitize(item) for item in obj] except Exception: raise ValueError(f"Could not sanitize input {obj}") # Convert SingleTreeInterpreter to a python dictionary def _tree_interpreter_to_dict(interp, features, leaf_data=lambda t, n: {}): tree = interp.tree_model_.tree_ node_dict = interp.node_dict_ def recurse(node_id): if tree.children_left[node_id] == _tree.TREE_LEAF: return {'leaf': True, 'n_samples': tree.n_node_samples[node_id], **leaf_data(tree, node_id, node_dict)} else: return {'leaf': False, 'feature': features[tree.feature[node_id]], 'threshold': tree.threshold[node_id], 'left': recurse(tree.children_left[node_id]), 'right': recurse(tree.children_right[node_id])} return recurse(0) class _PolicyOutput: """ A type encapsulating various information related to a learned policy. Attributes ---------- tree_dictionary:dict The policy tree represented as a dictionary, policy_value:float The average value of applying the recommended policy (over using the control), always_treat:dict of string to float A dictionary mapping each non-control treatment to the value of always treating with it (over control), control_name:string The name of the control treatment """ def __init__(self, tree_dictionary, policy_value, always_treat, control_name): self.tree_dictionary = tree_dictionary self.policy_value = policy_value self.always_treat = always_treat self.control_name = control_name # named tuple type for storing results inside CausalAnalysis class; # must be lifted to module level to enable pickling _result = namedtuple("_result", field_names=[ "feature_index", "feature_name", "feature_baseline", "feature_levels", "hinds", "X_transformer", "W_transformer", "estimator", "global_inference", "treatment_value"]) def _process_feature(name, feat_ind, verbose, categorical_inds, categories, heterogeneity_inds, min_counts, y, X, nuisance_models, h_model, random_state, model_y, cv, mc_iters): try: if verbose > 0: print(f"CausalAnalysis: Feature {name}") discrete_treatment = feat_ind in categorical_inds if discrete_treatment: cats = categories[categorical_inds.index(feat_ind)] else: cats = 'auto' # just leave the setting at the default otherwise # the transformation logic here is somewhat tricky; we always need to encode the categorical columns, # whether they end up in X or in W. However, for the continuous columns, we want to scale them all # when running the first stage models, but don't want to scale the X columns when running the final model, # since then our coefficients will have odd units and our trees will also have decisions using those units. # # we achieve this by pipelining the X scaling with the Y and T models (with fixed scaling, not refitting) hinds = heterogeneity_inds[feat_ind] WX_transformer = ColumnTransformer([('encode', OneHotEncoder(drop='first', sparse=False), [ind for ind in categorical_inds if ind != feat_ind]), ('drop', 'drop', feat_ind)], remainder=StandardScaler()) W_transformer = ColumnTransformer([('encode', OneHotEncoder(drop='first', sparse=False), [ind for ind in categorical_inds if ind != feat_ind and ind not in hinds]), ('drop', 'drop', hinds), ('drop_feat', 'drop', feat_ind)], remainder=StandardScaler()) X_cont_inds = [ind for ind in hinds if ind != feat_ind and ind not in categorical_inds] # Use _ColumnTransformer instead of ColumnTransformer so we can get feature names X_transformer = _ColumnTransformer([ind for ind in categorical_inds if ind != feat_ind and ind in hinds], X_cont_inds) # Controls are all other columns of X WX = WX_transformer.fit_transform(X) # can't use X[:, feat_ind] when X is a DataFrame T = _safe_indexing(X, feat_ind, axis=1) # TODO: we can't currently handle unseen values of the feature column when getting the effect; # we might want to modify OrthoLearner (and other discrete treatment classes) # so that the user can opt-in to allowing unseen treatment values # (and return NaN or something in that case) W = W_transformer.fit_transform(X) X_xf = X_transformer.fit_transform(X) # HACK: this is slightly ugly because we rely on the fact that DML passes [X;W] to the first stage models # and so we can just peel the first columns off of that combined array for rescaling in the pipeline # TODO: consider addding an API to DML that allows for better understanding of how the nuisance inputs are # built, such as model_y_feature_names, model_t_feature_names, model_y_transformer, etc., so that this # becomes a valid approach to handling this X_scaler = ColumnTransformer([('scale', StandardScaler(), list(range(len(X_cont_inds))))], remainder='passthrough').fit(np.hstack([X_xf, W])).named_transformers_['scale'] X_scaler_fixed = ColumnTransformer([('scale', _freeze(X_scaler), list(range(len(X_cont_inds))))], remainder='passthrough') if W.shape[1] == 0: # array checking routines don't accept 0-width arrays W = None if X_xf.shape[1] == 0: X_xf = None if verbose > 0: print("CausalAnalysis: performing model selection on T model") # perform model selection model_t = (_first_stage_clf(WX, T, automl=nuisance_models == 'automl', min_count=min_counts.get(feat_ind, None), random_state=random_state, verbose=verbose) if discrete_treatment else _first_stage_reg(WX, T, automl=nuisance_models == 'automl', random_state=random_state, verbose=verbose)) pipelined_model_t = Pipeline([('scale', X_scaler_fixed), ('model', model_t)]) pipelined_model_y = Pipeline([('scale', X_scaler_fixed), ('model', model_y)]) if X_xf is None and h_model == 'forest': warnings.warn(f"Using a linear model instead of a forest model for feature '{name}' " "because forests don't support models with no heterogeneity indices") h_model = 'linear' if h_model == 'linear': est = LinearDML(model_y=pipelined_model_y, model_t=pipelined_model_t, discrete_treatment=discrete_treatment, fit_cate_intercept=True, linear_first_stages=False, categories=cats, random_state=random_state, cv=cv, mc_iters=mc_iters) elif h_model == 'forest': est = CausalForestDML(model_y=pipelined_model_y, model_t=pipelined_model_t, discrete_treatment=discrete_treatment, n_estimators=4000, min_var_leaf_on_val=True, categories=cats, random_state=random_state, verbose=verbose, cv=cv, mc_iters=mc_iters) if verbose > 0: print("CausalAnalysis: tuning forest") est.tune(y, T, X=X_xf, W=W) if verbose > 0: print("CausalAnalysis: training causal model") est.fit(y, T, X=X_xf, W=W, cache_values=True) # Prefer ate__inference to const_marginal_ate_inference(X) because it is doubly-robust and not conservative if h_model == 'forest' and discrete_treatment: global_inference = est.ate__inference() else: # convert to NormalInferenceResults for consistency inf = est.const_marginal_ate_inference(X=X_xf) global_inference = NormalInferenceResults(d_t=inf.d_t, d_y=inf.d_y, pred=inf.mean_point, pred_stderr=inf.stderr_mean, mean_pred_stderr=None, inf_type='ate') # Set the dictionary values shared between local and global summaries if discrete_treatment: cats = est.transformer.categories_[0] baseline = cats[est.transformer.drop_idx_[0]] cats = cats[np.setdiff1d(np.arange(len(cats)), est.transformer.drop_idx_[0])] d_t = len(cats) insights = { _CausalInsightsConstants.TypeKey: ['cat'] * d_t, _CausalInsightsConstants.RawFeatureNameKey: [name] * d_t, _CausalInsightsConstants.CategoricalColumnKey: cats.tolist(), _CausalInsightsConstants.EngineeredNameKey: [ f"{name} (base={baseline}): {c}" for c in cats] } treatment_value = 1 else: d_t = 1 cats = ["num"] baseline = None insights = { _CausalInsightsConstants.TypeKey: ["num"], _CausalInsightsConstants.RawFeatureNameKey: [name], _CausalInsightsConstants.CategoricalColumnKey: [name], _CausalInsightsConstants.EngineeredNameKey: [name] } # calculate a "typical" treatment value, using the mean of the absolute value of non-zero treatments treatment_value = np.mean(np.abs(T[T != 0])) result = _result(feature_index=feat_ind, feature_name=name, feature_baseline=baseline, feature_levels=cats, hinds=hinds, X_transformer=X_transformer, W_transformer=W_transformer, estimator=est, global_inference=global_inference, treatment_value=treatment_value) return insights, result except Exception as e: warnings.warn(f"Exception caught when training model for feature {name}: {e}") return e # Unless we're opting into minimal cross-fitting, this is the minimum number of instances of each category # required to fit a discrete DML model _CAT_LIMIT = 10 # TODO: Add other nuisance model options, such as {'azure_automl', 'forests', 'boosting'} that will use particular # sub-cases of models or also integrate with azure autoML. (post-MVP) # TODO: Add other heterogeneity model options, such as {'automl'} for performing # model selection for the causal effect, or {'sparse_linear'} for using a debiased lasso. (post-MVP) # TODO: Enable multi-class classification (post-MVP) class CausalAnalysis: """ Note: this class is experimental and the API may evolve over our next few releases. Gets causal importance of features. Parameters ---------- feature_inds: array-like of int, str, or bool The features for which to estimate causal effects, expressed as either column indices, column names, or boolean flags indicating which columns to pick categorical: array-like of int, str, or bool The features which are categorical in nature, expressed as either column indices, column names, or boolean flags indicating which columns to pick heterogeneity_inds: array-like of int, str, or bool, or None or list of array-like elements or None, default None If a 1d array, then whenever estimating a heterogeneous (local) treatment effect model, then only the features in this array will be used for heterogeneity. If a 2d array then its first dimension should be len(feature_inds) and whenever estimating a local causal effect for target feature feature_inds[i], then only features in heterogeneity_inds[i] will be used for heterogeneity. If heterogeneity_inds[i]=None, then all features are used for heterogeneity when estimating local causal effect for feature_inds[i], and likewise if heterogeneity_inds[i]=[] then no features will be used for heterogeneity. If heterogeneity_ind=None then all features are used for heterogeneity for all features, and if heterogeneity_inds=[] then no features will be. feature_names: list of str, default None The names for all of the features in the data. Not necessary if the input will be a dataframe. If None and the input is a plain numpy array, generated feature names will be ['X1', 'X2', ...]. upper_bound_on_cat_expansion: int, default 5 The maximum number of categorical values allowed, because they are expanded via one-hot encoding. If a feature has more than this many values, then a causal effect model is not fitted for that target feature and a warning flag is raised. The remainder of the models are fitted. classification: bool, default False Whether this is a classification (as opposed to regression) task nuisance_models: one of {'linear', 'automl'}, default 'linear' The model class to use for nuisance estimation. Separate nuisance models are trained to predict the outcome and also each individual feature column from all of the other columns in the dataset as a prerequisite step before computing the actual causal effect for that feature column. If 'linear', then :class:`~sklearn.linear_model.LassoCV` (for regression) or :class:`~sklearn.linear_model.LogisticRegressionCV` (for classification) is used for these models. If 'automl', then model selection picks the best-performing among several different model classes for each model being trained using k-fold cross-validation, which requires additional computation. heterogeneity_model: one of {'linear', 'forest'}, default 'linear' The type of model to use for the final heterogeneous treatment effect model. 'linear' means that a the estimated treatment effect for a column will be a linear function of the heterogeneity features for that column, while 'forest' means that a forest model will be trained to compute the effect from those heterogeneity features instead. categories: 'auto' or list of ('auto' or list of values), default 'auto' What categories to use for the categorical columns. If 'auto', then the categories will be inferred for all categorical columns; otherwise this argument should have as many entries as there are categorical columns, and each entry should be either 'auto' to infer the values for that column or the list of values for the column. If explicit values are provided, the first value is treated as the "control" value for that column against which other values are compared. n_jobs: int, default -1 Degree of parallelism to use when training models via joblib.Parallel verbose : int, default=0 Controls the verbosity when fitting and predicting. cv: int, cross-validation generator or an iterable, default 5 Determines the strategy for cross-fitting used when training causal models for each feature. Possible inputs for cv are: - integer, to specify the number of folds. - :term:`CV splitter` - An iterable yielding (train, test) splits as arrays of indices. For integer inputs, if the treatment is discrete :class:`~sklearn.model_selection.StratifiedKFold` is used, else, :class:`~sklearn.model_selection.KFold` is used (with a random shuffle in either case). mc_iters: int, default 3 The number of times to rerun the first stage models to reduce the variance of the causal model nuisances. skip_cat_limit_checks: bool, default False By default, categorical features need to have several instances of each category in order for a model to be fit robustly. Setting this to True will skip these checks (although at least 2 instances will always be required for linear heterogeneity models, and 4 for forest heterogeneity models even in that case). random_state : int, RandomState instance or None, default=None Controls the randomness of the estimator. The features are always randomly permuted at each split. When ``max_features < n_features``, the algorithm will select ``max_features`` at random at each split before finding the best split among them. But the best found split may vary across different runs, even if ``max_features=n_features``. That is the case, if the improvement of the criterion is identical for several splits and one split has to be selected at random. To obtain a deterministic behaviour during fitting, ``random_state`` has to be fixed to an integer. Attributes ---------- nuisance_models_: string The nuisance models setting used for the most recent call to fit heterogeneity_model: string The heterogeneity model setting used for the most recent call to fit feature_names_: list of string The list of feature names from the data in the most recent call to fit trained_feature_indices_: list of int The list of feature indices where models were trained successfully untrained_feature_indices_: list of tuple of (int, string or Exception) The list of indices that were requested but not able to be trained succesfully, along with either a reason or caught Exception for each """ def __init__(self, feature_inds, categorical, heterogeneity_inds=None, feature_names=None, classification=False, upper_bound_on_cat_expansion=5, nuisance_models='linear', heterogeneity_model='linear', *, categories='auto', n_jobs=-1, verbose=0, cv=5, mc_iters=3, skip_cat_limit_checks=False, random_state=None): self.feature_inds = feature_inds self.categorical = categorical self.heterogeneity_inds = heterogeneity_inds self.feature_names = feature_names self.classification = classification self.upper_bound_on_cat_expansion = upper_bound_on_cat_expansion self.nuisance_models = nuisance_models self.heterogeneity_model = heterogeneity_model self.categories = categories self.n_jobs = n_jobs self.verbose = verbose self.cv = cv self.mc_iters = mc_iters self.skip_cat_limit_checks = skip_cat_limit_checks self.random_state = random_state def fit(self, X, y, warm_start=False): """ Fits global and local causal effect models for each feature in feature_inds on the data Parameters ---------- X : array-like Feature data y : array-like of shape (n,) or (n,1) Outcome. If classification=True, then y should take two values. Otherwise an error is raised that only binary classification is implemented for now. TODO. enable multi-class classification for y (post-MVP) warm_start : boolean, default False If False, train models for each feature in `feature_inds`. If True, train only models for features in `feature_inds` that had not already been trained by the previous call to `fit`, and for which neither the corresponding heterogeneity_inds, nor the automl flag have changed. If heterogeneity_inds have changed, then the final stage model of these features will be refit. If the automl flag has changed, then whole model is refit, despite the warm start flag. """ # Validate inputs assert self.nuisance_models in ['automl', 'linear'], ( "The only supported nuisance models are 'linear' and 'automl', " f"but was given {self.nuisance_models}") assert self.heterogeneity_model in ['linear', 'forest'], ( "The only supported heterogeneity models are 'linear' and 'forest' but received " f"{self.heterogeneity_model}") assert np.ndim(X) == 2, f"X must be a 2-dimensional array, but here had shape {np.shape(X)}" assert iterable(self.feature_inds), f"feature_inds should be array-like, but got {self.feature_inds}" assert iterable(self.categorical), f"categorical should be array-like, but got {self.categorical}" assert self.heterogeneity_inds is None or iterable(self.heterogeneity_inds), ( f"heterogeneity_inds should be None or array-like, but got {self.heterogeneity_inds}") assert self.feature_names is None or iterable(self.feature_names), ( f"feature_names should be None or array-like, but got {self.feature_names}") assert self.categories == 'auto' or iterable(self.categories), ( f"categories should be 'auto' or array-like, but got {self.categories}") # TODO: check compatibility of X and Y lengths if warm_start: if not hasattr(self, "_results"): # no previous fit, cancel warm start warm_start = False elif self._d_x != X.shape[1]: raise ValueError( f"Can't warm start: previous X had {self._d_x} columns, new X has {X.shape[1]} columns") # work with numeric feature indices, so that we can easily compare with categorical ones train_inds = _get_column_indices(X, self.feature_inds) if len(train_inds) == 0: raise ValueError( "No features specified. At least one feature index must be specified so that a model can be trained.") heterogeneity_inds = self.heterogeneity_inds if heterogeneity_inds is None: heterogeneity_inds = [None for ind in train_inds] # if heterogeneity_inds is 1D, repeat it if heterogeneity_inds == [] or isinstance(heterogeneity_inds[0], (int, str, bool)): heterogeneity_inds = [heterogeneity_inds for _ in train_inds] # heterogeneity inds should be a 2D list of length same as train_inds elif heterogeneity_inds is not None and len(heterogeneity_inds) != len(train_inds): raise ValueError("Heterogeneity indexes should have the same number of entries, but here " f" there were {len(heterogeneity_inds)} heterogeneity entries but " f" {len(train_inds)} feature indices.") # replace None elements of heterogeneity_inds and ensure indices are numeric heterogeneity_inds = {ind: list(range(X.shape[1])) if hinds is None else _get_column_indices(X, hinds) for ind, hinds in zip(train_inds, heterogeneity_inds)} if warm_start: train_y_model = False if self.nuisance_models != self.nuisance_models_: warnings.warn("warm_start will be ignored since the nuisance models have changed " f"from {self.nuisance_models_} to {self.nuisance_models} since the previous call to fit") warm_start = False train_y_model = True if self.heterogeneity_model != self.heterogeneity_model_: warnings.warn("warm_start will be ignored since the heterogeneity model has changed " f"from {self.heterogeneity_model_} to {self.heterogeneity_model} " "since the previous call to fit") warm_start = False # TODO: bail out also if categorical columns, classification, random_state changed? else: train_y_model = True # TODO: should we also train a new model_y under any circumstances when warm_start is True? if warm_start: new_inds = [ind for ind in train_inds if (ind not in self._cache or heterogeneity_inds[ind] != self._cache[ind][1].hinds)] else: new_inds = list(train_inds) self._cache = {} # store mapping from feature to insights, results # train the Y model if train_y_model: # perform model selection for the Y model using all X, not on a per-column basis allX = ColumnTransformer([('encode', OneHotEncoder( drop='first', sparse=False), self.categorical)], remainder=StandardScaler()).fit_transform(X) if self.verbose > 0: print("CausalAnalysis: performing model selection on overall Y model") if self.classification: self._model_y = _first_stage_clf(allX, y, automl=self.nuisance_models == 'automl', make_regressor=True, random_state=self.random_state, verbose=self.verbose) else: self._model_y = _first_stage_reg(allX, y, automl=self.nuisance_models == 'automl', random_state=self.random_state, verbose=self.verbose) if self.classification: # now that we've trained the classifier and wrapped it, ensure that y is transformed to # work with the regression wrapper # we use column_or_1d to treat pd.Series and pd.DataFrame objects the same way as arrays y = column_or_1d(y).reshape(-1, 1) # note that this needs to happen after wrapping to generalize to the multi-class case, # since otherwise we'll have too many columns to be able to train a classifier y = OneHotEncoder(drop='first', sparse=False).fit_transform(y) assert y.ndim == 1 or y.shape[1] == 1, ("Multiclass classification isn't supported" if self.classification else "Only a single outcome is supported") self._vec_y = y.ndim == 1 self._d_x = X.shape[1] # start with empty results and default shared insights self._results = [] self._shared = _get_default_shared_insights_output() self._shared[_CausalInsightsConstants.InitArgsKey] = { 'feature_inds': _sanitize(self.feature_inds), 'categorical': _sanitize(self.categorical), 'heterogeneity_inds': _sanitize(self.heterogeneity_inds), 'feature_names': _sanitize(self.feature_names), 'classification': _sanitize(self.classification), 'upper_bound_on_cat_expansion': _sanitize(self.upper_bound_on_cat_expansion), 'nuisance_models': _sanitize(self.nuisance_models), 'heterogeneity_model': _sanitize(self.heterogeneity_model), 'categories': _sanitize(self.categories), 'n_jobs': _sanitize(self.n_jobs), 'verbose': _sanitize(self.verbose), 'random_state': _sanitize(self.random_state) } # convert categorical indicators to numeric indices categorical_inds = _get_column_indices(X, self.categorical) categories = self.categories if categories == 'auto': categories = ['auto' for _ in categorical_inds] else: assert len(categories) == len(categorical_inds), ( "If categories is not 'auto', it must contain one entry per categorical column. Instead, categories" f"has length {len(categories)} while there are {len(categorical_inds)} categorical columns.") # check for indices over the categorical expansion bound invalid_inds = getattr(self, 'untrained_feature_indices_', []) # assume we'll be able to train former failures this time; we'll add them back if not invalid_inds = [(ind, reason) for (ind, reason) in invalid_inds if ind not in new_inds] self._has_column_names = True if self.feature_names is None: if hasattr(X, "iloc"): feature_names = X.columns else: self._has_column_names = False feature_names = [f"x{i}" for i in range(X.shape[1])] else: feature_names = self.feature_names self.feature_names_ = feature_names min_counts = {} for ind in new_inds: column_text = self._format_col(ind) if ind in categorical_inds: cats, counts = np.unique(_safe_indexing(X, ind, axis=1), return_counts=True) min_ind = np.argmin(counts) n_cat = len(cats) if n_cat > self.upper_bound_on_cat_expansion: warnings.warn(f"{column_text} has more than {self.upper_bound_on_cat_expansion} " f"values (found {n_cat}) so no heterogeneity model will be fit for it; " "increase 'upper_bound_on_cat_expansion' to change this behavior.") # can't remove in place while iterating over new_inds, so store in separate list invalid_inds.append((ind, 'upper_bound_on_cat_expansion')) elif counts[min_ind] < _CAT_LIMIT: if self.skip_cat_limit_checks and (counts[min_ind] >= 5 or (counts[min_ind] >= 2 and self.heterogeneity_model != 'forest')): # train the model, but warn warnings.warn(f"{column_text}'s value {cats[min_ind]} has only {counts[min_ind]} instances in " f"the training dataset, which is less than the lower limit ({_CAT_LIMIT}). " "A model will still be fit because 'skip_cat_limit_checks' is True, " "but this model may not be robust.") min_counts[ind] = counts[min_ind] elif counts[min_ind] < 2 or (counts[min_ind] < 5 and self.heterogeneity_model == 'forest'): # no model can be trained in this case since we need more folds warnings.warn(f"{column_text}'s value {cats[min_ind]} has only {counts[min_ind]} instances in " "the training dataset, but linear heterogeneity models need at least 2 and " "forest heterogeneity models need at least 5 instances, so no model will be fit " "for this column") invalid_inds.append((ind, 'cat_limit')) else: # don't train a model, but suggest workaround since there are enough instances of least # populated class warnings.warn(f"{column_text}'s value {cats[min_ind]} has only {counts[min_ind]} instances in " f"the training dataset, which is less than the lower limit ({_CAT_LIMIT}), " "so no heterogeneity model will be fit for it. This check can be turned off by " "setting 'skip_cat_limit_checks' to True, but that may result in an inaccurate " "model for this feature.") invalid_inds.append((ind, 'cat_limit')) for (ind, _) in invalid_inds: new_inds.remove(ind) # also remove from train_inds so we don't try to access the result later train_inds.remove(ind) if len(train_inds) == 0: raise ValueError("No features remain; increase the upper_bound_on_cat_expansion and ensure that there " "are several instances of each categorical value so that at least " "one feature model can be trained.") # extract subset of names matching new columns new_feat_names = _safe_indexing(feature_names, new_inds) cache_updates = dict(zip(new_inds, joblib.Parallel( n_jobs=self.n_jobs, verbose=self.verbose )(joblib.delayed(_process_feature)( feat_name, feat_ind, self.verbose, categorical_inds, categories, heterogeneity_inds, min_counts, y, X, self.nuisance_models, self.heterogeneity_model, self.random_state, self._model_y, self.cv, self.mc_iters) for feat_name, feat_ind in zip(new_feat_names, new_inds)))) # track indices where an exception was thrown, since we can't remove from dictionary while iterating inds_to_remove = [] for ind, value in cache_updates.items(): if isinstance(value, Exception): # don't want to cache this failed result inds_to_remove.append(ind) train_inds.remove(ind) invalid_inds.append((ind, value)) for ind in inds_to_remove: del cache_updates[ind] self._cache.update(cache_updates) for ind in train_inds: dict_update, result = self._cache[ind] self._results.append(result) for k in dict_update: self._shared[k] += dict_update[k] invalid_inds.sort() self.untrained_feature_indices_ = invalid_inds self.trained_feature_indices_ = train_inds self.nuisance_models_ = self.nuisance_models self.heterogeneity_model_ = self.heterogeneity_model return self def _format_col(self, ind): if self._has_column_names: return f"Column {ind} ({self.feature_names_[ind]})" else: return f"Column {ind}" # properties to return from effect InferenceResults @staticmethod def _point_props(alpha): return [(_CausalInsightsConstants.PointEstimateKey, 'point_estimate'), (_CausalInsightsConstants.StandardErrorKey, 'stderr'), (_CausalInsightsConstants.ZStatKey, 'zstat'), (_CausalInsightsConstants.PValueKey, 'pvalue'), (_CausalInsightsConstants.ConfidenceIntervalLowerKey, lambda inf: inf.conf_int(alpha=alpha)[0]), (_CausalInsightsConstants.ConfidenceIntervalUpperKey, lambda inf: inf.conf_int(alpha=alpha)[1])] # properties to return from PopulationSummaryResults @staticmethod def _summary_props(alpha): return [(_CausalInsightsConstants.PointEstimateKey, 'mean_point'), (_CausalInsightsConstants.StandardErrorKey, 'stderr_mean'), (_CausalInsightsConstants.ZStatKey, 'zstat'), (_CausalInsightsConstants.PValueKey, 'pvalue'), (_CausalInsightsConstants.ConfidenceIntervalLowerKey, lambda inf: inf.conf_int_mean(alpha=alpha)[0]), (_CausalInsightsConstants.ConfidenceIntervalUpperKey, lambda inf: inf.conf_int_mean(alpha=alpha)[1])] # Converts strings to property lookups or method calls as a convenience so that the # _point_props and _summary_props above can be applied to an inference object @staticmethod def _make_accessor(attr): if isinstance(attr, str): s = attr def attr(o): val = getattr(o, s) if callable(val): return val() else: return val return attr # Create a summary combining all results into a single output; this is used # by the various causal_effect and causal_effect_dict methods to generate either a dataframe # or a dictionary, respectively, based on the summary function passed into this method def _summarize(self, *, summary, get_inference, props, expand_arr, drop_sample): assert hasattr(self, "_results"), "This object has not been fit, so cannot get results" # ensure array has shape (m,y,t) def ensure_proper_dims(arr): if expand_arr: # population summary is missing sample dimension; add it for consistency arr = np.expand_dims(arr, 0) if self._vec_y: # outcome dimension is missing; add it for consistency arr = np.expand_dims(arr, axis=1) assert 2 <= arr.ndim <= 3 # add singleton treatment dimension if missing return arr if arr.ndim == 3 else np.expand_dims(arr, axis=2) # store set of inference results so we don't need to recompute per-attribute below in summary/coalesce infs = [get_inference(res) for res in self._results] # each attr has dimension (m,y) or (m,y,t) def coalesce(attr): """Join together the arrays for each feature""" attr = self._make_accessor(attr) # concatenate along treatment dimension arr = np.concatenate([ensure_proper_dims(attr(inf)) for inf in infs], axis=2) # for dictionary representation, want to remove unneeded sample dimension # in cohort and global results if drop_sample: arr = np.squeeze(arr, 0) return arr return summary([(key, coalesce(val)) for key, val in props]) def _pandas_summary(self, get_inference, *, props, n, expand_arr=False, keep_all_levels=False): """ Summarizes results into a dataframe. Parameters ---------- get_inference : lambda Method to get the relevant inference results from each result object props : list of (string, string or lambda) Set of column names and ways to get the corresponding values from the inference object n : int The number of samples in the dataset expand_arr : boolean, default False Whether to add a synthetic sample dimension to the result arrays when performing internal computations keep_all_levels : boolean, default False Whether to keep all levels, even when they don't take on more than one value; Note that regardless of this argument the "sample" level will only be present if expand_arr is False """ def make_dataframe(props): to_include = OrderedDict([(key, value.reshape(-1)) for key, value in props]) # TODO: enrich outcome logic for multi-class classification when that is supported index = pd.MultiIndex.from_tuples([(i, outcome, res.feature_name, f"{lvl}v{res.feature_baseline}" if res.feature_baseline is not None else lvl) for i in range(n) for outcome in ["y0"] for res in self._results for lvl in res.feature_levels], names=["sample", "outcome", "feature", "feature_value"]) if expand_arr: # There is no actual sample level in this data index = index.droplevel("sample") if not keep_all_levels: for lvl in index.levels: if len(lvl) == 1: if not isinstance(index, pd.MultiIndex): # can't drop only level index = pd.Index([self._results[0].feature_name], name="feature") else: index = index.droplevel(lvl.name) return pd.DataFrame(to_include, index=index) return self._summarize(summary=make_dataframe, get_inference=get_inference, props=props, expand_arr=expand_arr, drop_sample=False) # dropping the sample dimension is handled above instead def _dict_summary(self, get_inference, *, n, props, kind, drop_sample=False, expand_arr=False, row_wise=False): """ Summarizes results into a dictionary. Parameters ---------- get_inference : lambda Method to get the relevant inference results from each result object n : int The number of samples in the dataset props : list of (string, string or lambda) Set of column names and ways to get the corresponding values from the inference object kind : string The kind of inference results to get (e.g. 'global', 'local', or 'cohort') drop_sample : boolean, default False Whether to drop the sample dimension from each array expand_arr : boolean, default False Whether to add an initial sample dimension to the result arrays row_wise : boolean, default False Whether to return a list of dictionaries (one dictionary per row) instead of a dictionary of lists (one list per column) """ def make_dict(props): # should be serialization-ready and contain no numpy arrays res = _get_default_specific_insights(kind) shared = self._shared if row_wise: row_data = {} # remove entries belonging to row data, since we're including them in the list of nested dictionaries for k in _get_data_causal_insights_keys(): del res[k] shared = shared.copy() # copy so that we can modify without affecting shared state # TODO: Note that there's no column metadata for the sample number - should there be? for k in _get_column_causal_insights_keys(): # need to replicate the column info for each sample, then remove from the shared data row_data[k] = shared[k] * n del shared[k] # NOTE: the flattened order has the ouptut dimension before the feature dimension # which may need to be revisited once we support multiclass row_data.update([(key, value.flatten()) for key, value in props]) # get the length of the list corresponding to the first dictionary key # `list(row_data)` gets the keys as a list, since `row_data.keys()` can't be indexed into n_rows = len(row_data[list(row_data)[0]]) res[_CausalInsightsConstants.RowData] = [{key: row_data[key][i] for key in row_data} for i in range(n_rows)] else: res.update([(key, value.tolist()) for key, value in props]) return {**shared, **res} return self._summarize(summary=make_dict, get_inference=get_inference, props=props, expand_arr=expand_arr, drop_sample=drop_sample) def global_causal_effect(self, *, alpha=0.05, keep_all_levels=False): """ Get the global causal effect for each feature as a pandas DataFrame. Parameters ---------- alpha : float, default 0.05 The confidence level of the confidence interval keep_all_levels : bool, default False Whether to keep all levels of the output dataframe ('outcome', 'feature', and 'feature_level') even if there was only a single value for that level; by default single-valued levels are dropped. Returns ------- global_effects : pandas Dataframe DataFrame with the following structure: :Columns: ['point', 'stderr', 'zstat', 'pvalue', 'ci_lower', 'ci_upper'] :Index: ['feature', 'feature_value'] :Rows: For each feature that is numerical, we have an entry with index ['{feature_name}', 'num'], where 'num' is literally the string 'num' and feature_name is the input feature name. For each feature that is categorical, we have an entry with index ['{feature_name}', '{cat}v{base}'] where cat is the category value and base is the category used as baseline. If all features are numerical then the feature_value index is dropped in the dataframe, but not in the serialized dict. """ # a global inference indicates the effect of that one feature on the outcome return self._pandas_summary(lambda res: res.global_inference, props=self._point_props(alpha), n=1, expand_arr=True, keep_all_levels=keep_all_levels) def _global_causal_effect_dict(self, *, alpha=0.05, row_wise=False): """ Gets the global causal effect for each feature as dictionary. Dictionary entries for predictions, etc. will be nested lists of shape (d_y, sum(d_t)) Only for serialization purposes to upload to AzureML """ return self._dict_summary(lambda res: res.global_inference, props=self._point_props(alpha), kind='global', n=1, row_wise=row_wise, drop_sample=True, expand_arr=True) def _cohort_effect_inference(self, Xtest): assert np.ndim(Xtest) == 2 and np.shape(Xtest)[1] == self._d_x, ( "Shape of Xtest must be compatible with shape of X, " f"but got shape {np.shape(Xtest)} instead of (n, {self._d_x})" ) def inference_from_result(result): est = result.estimator X = result.X_transformer.transform(Xtest) if X.shape[1] == 0: X = None return est.const_marginal_ate_inference(X=X) return inference_from_result def cohort_causal_effect(self, Xtest, *, alpha=0.05, keep_all_levels=False): """ Gets the average causal effects for a particular cohort defined by a population of X's. Parameters ---------- Xtest : array-like The cohort samples for which to return the average causal effects within cohort alpha : float, default 0.05 The confidence level of the confidence interval keep_all_levels : bool, default False Whether to keep all levels of the output dataframe ('outcome', 'feature', and 'feature_level') even if there was only a single value for that level; by default single-valued levels are dropped. Returns ------- cohort_effects : pandas Dataframe DataFrame with the following structure: :Columns: ['point', 'stderr', 'zstat', 'pvalue', 'ci_lower', 'ci_upper'] :Index: ['feature', 'feature_value'] :Rows: For each feature that is numerical, we have an entry with index ['{feature_name}', 'num'], where 'num' is literally the string 'num' and feature_name is the input feature name. For each feature that is categorical, we have an entry with index ['{feature_name}', '{cat}v{base}'] where cat is the category value and base is the category used as baseline. If all features are numerical then the feature_value index is dropped in the dataframe, but not in the serialized dict. """ return self._pandas_summary(self._cohort_effect_inference(Xtest), props=self._summary_props(alpha), n=1, expand_arr=True, keep_all_levels=keep_all_levels) def _cohort_causal_effect_dict(self, Xtest, *, alpha=0.05, row_wise=False): """ Gets the cohort causal effects for each feature as dictionary. Dictionary entries for predictions, etc. will be nested lists of shape (d_y, sum(d_t)) Only for serialization purposes to upload to AzureML """ return self._dict_summary(self._cohort_effect_inference(Xtest), props=self._summary_props(alpha), kind='cohort', n=1, row_wise=row_wise, expand_arr=True, drop_sample=True) def _local_effect_inference(self, Xtest): assert np.ndim(Xtest) == 2 and np.shape(Xtest)[1] == self._d_x, ( "Shape of Xtest must be compatible with shape of X, " f"but got shape {np.shape(Xtest)} instead of (n, {self._d_x})" ) def inference_from_result(result): est = result.estimator X = result.X_transformer.transform(Xtest) if X.shape[1] == 0: X = None eff = est.const_marginal_effect_inference(X=X) if X is None: # need to reshape the output to match the input eff = eff._expand_outputs(Xtest.shape[0]) return eff return inference_from_result def local_causal_effect(self, Xtest, *, alpha=0.05, keep_all_levels=False): """ Gets the local causal effect for each feature as a pandas DataFrame. Parameters ---------- Xtest : array-like The samples for which to return the causal effects alpha : float, default 0.05 The confidence level of the confidence interval keep_all_levels : bool, default False Whether to keep all levels of the output dataframe ('sample', 'outcome', 'feature', and 'feature_level') even if there was only a single value for that level; by default single-valued levels are dropped. Returns ------- global_effect : pandas Dataframe DataFrame with the following structure: :Columns: ['point', 'stderr', 'zstat', 'pvalue', 'ci_lower', 'ci_upper'] :Index: ['sample', 'feature', 'feature_value'] :Rows: For each feature that is numeric, we have an entry with index ['{sampleid}', '{feature_name}', 'num'], where 'num' is literally the string 'num' and feature_name is the input feature name and sampleid is the index of the sample in Xtest. For each feature that is categorical, we have an entry with index ['{sampleid', '{feature_name}', '{cat}v{base}'] where cat is the category value and base is the category used as baseline. If all features are numerical then the feature_value index is dropped in the dataframe, but not in the serialized dict. """ return self._pandas_summary(self._local_effect_inference(Xtest), props=self._point_props(alpha), n=Xtest.shape[0], keep_all_levels=keep_all_levels) def _local_causal_effect_dict(self, Xtest, *, alpha=0.05, row_wise=False): """ Gets the local feature importance as dictionary Dictionary entries for predictions, etc. will be nested lists of shape (n_rows, d_y, sum(d_t)) Only for serialization purposes to upload to AzureML """ return self._dict_summary(self._local_effect_inference(Xtest), props=self._point_props(alpha), kind='local', n=Xtest.shape[0], row_wise=row_wise) def _safe_result_index(self, X, feature_index): assert hasattr(self, "_results"), "This instance has not yet been fitted" assert np.ndim(X) == 2 and np.shape(X)[1] == self._d_x, ( "Shape of X must be compatible with shape of the fitted X, " f"but got shape {np.shape(X)} instead of (n, {self._d_x})" ) (numeric_index,) = _get_column_indices(X, [feature_index]) bad_inds = dict(self.untrained_feature_indices_) if numeric_index in bad_inds: error = bad_inds[numeric_index] col_text = self._format_col(numeric_index) if error == 'cat_limit': msg = f"{col_text} had a value with fewer than {_CAT_LIMIT} occurences, so no model was fit for it" elif error == 'upper_bound_on_cat_expansion': msg = (f"{col_text} had more distinct values than the setting of 'upper_bound_on_cat_expansion', " "so no model was fit for it") else: msg = (f"{col_text} generated the following error during fitting, " f"so no model was fit for it:\n{str(error)}") raise ValueError(msg) if numeric_index not in self.trained_feature_indices_: raise ValueError(f"{self._format_col(numeric_index)} was not passed as a feature index " "so no model was fit for it") results = [res for res in self._results if res.feature_index == numeric_index] assert len(results) == 1 (result,) = results return result def _whatif_inference(self, X, Xnew, feature_index, y): assert not self.classification, "What-if analysis cannot be applied to classification tasks" assert np.shape(X)[0] == np.shape(Xnew)[0] == np.shape(y)[0], ( "X, Xnew, and y must have the same length, but have shapes " f"{np.shape(X)}, {np.shape(Xnew)}, and {np.shape(y)}" ) assert np.size(feature_index) == 1, f"Only one feature index may be changed, but got {np.size(feature_index)}" T0 = _safe_indexing(X, feature_index, axis=1) T1 = Xnew result = self._safe_result_index(X, feature_index) X = result.X_transformer.transform(X) if X.shape[1] == 0: X = None inf = result.estimator.effect_inference(X=X, T0=T0, T1=T1) # we want to offset the inference object by the baseline estimate of y inf.translate(y) return inf def whatif(self, X, Xnew, feature_index, y, *, alpha=0.05): """ Get counterfactual predictions when feature_index is changed to Xnew from its observational counterpart. Note that this only applies to regression use cases; for classification what-if analysis is not supported. Parameters ---------- X: array-like Features Xnew: array-like New values of a single column of X feature_index: int or string The index of the feature being varied to Xnew, either as a numeric index or the string name if the input is a dataframe y: array-like Observed labels or outcome of a predictive model for baseline y values alpha : float in [0, 1], default 0.05 Confidence level of the confidence intervals displayed in the leaf nodes. A (1-alpha)*100% confidence interval is displayed. Returns ------- y_new: DataFrame The predicted outputs that would have been observed under the counterfactual features """ return self._whatif_inference(X, Xnew, feature_index, y).summary_frame(alpha=alpha) def _whatif_dict(self, X, Xnew, feature_index, y, *, alpha=0.05, row_wise=False): """ Get counterfactual predictions when feature_index is changed to Xnew from its observational counterpart. Note that this only applies to regression use cases; for classification what-if analysis is not supported. Parameters ---------- X: array-like Features Xnew: array-like New values of a single column of X feature_index: int or string The index of the feature being varied to Xnew, either as a numeric index or the string name if the input is a dataframe y: array-like Observed labels or outcome of a predictive model for baseline y values alpha : float in [0, 1], default 0.05 Confidence level of the confidence intervals displayed in the leaf nodes. A (1-alpha)*100% confidence interval is displayed. row_wise : boolean, default False Whether to return a list of dictionaries (one dictionary per row) instead of a dictionary of lists (one list per column) Returns ------- dict : dict The counterfactual predictions, as a dictionary """ inf = self._whatif_inference(X, Xnew, feature_index, y) props = self._point_props(alpha=alpha) res = _get_default_specific_insights('whatif') if row_wise: row_data = {} # remove entries belonging to row data, since we're including them in the list of nested dictionaries for k in _get_data_causal_insights_keys(): del res[k] row_data.update([(key, self._make_accessor(attr)(inf).flatten()) for key, attr in props]) # get the length of the list corresponding to the first dictionary key # `list(row_data)` gets the keys as a list, since `row_data.keys()` can't be indexed into n_rows = len(row_data[list(row_data)[0]]) res[_CausalInsightsConstants.RowData] = [{key: row_data[key][i] for key in row_data} for i in range(n_rows)] else: res.update([(key, self._make_accessor(attr)(inf).tolist()) for key, attr in props]) return res def _tree(self, is_policy, Xtest, feature_index, *, treatment_costs=0, max_depth=3, min_samples_leaf=2, min_impurity_decrease=1e-4, include_model_uncertainty=False, alpha=0.05): result = self._safe_result_index(Xtest, feature_index) Xtest = result.X_transformer.transform(Xtest) if Xtest.shape[1] == 0: Xtest = None if result.feature_baseline is None: treatment_names = ['decrease', 'increase'] else: treatment_names = [f"{result.feature_baseline}"] + \ [f"{lvl}" for lvl in result.feature_levels] TreeType = SingleTreePolicyInterpreter if is_policy else SingleTreeCateInterpreter intrp = TreeType(include_model_uncertainty=include_model_uncertainty, uncertainty_level=alpha, max_depth=max_depth, min_samples_leaf=min_samples_leaf, min_impurity_decrease=min_impurity_decrease, random_state=self.random_state) if is_policy: intrp.interpret(result.estimator, Xtest, sample_treatment_costs=treatment_costs) if result.feature_baseline is None: # continuous treatment, so apply a treatment level 10% of typical treatment_level = result.treatment_value * 0.1 # NOTE: this calculation is correct only if treatment costs are marginal costs, # because then scaling the difference between treatment value and treatment costs is the # same as scaling the treatment value and subtracting the scaled treatment cost. # # Note also that unlike the standard outputs of the SinglePolicyTreeInterpreter, for # continuous treatments, the policy value should include the benefit of decreasing treatments # (rather than just not treating at all) # # We can get the total by seeing that if we restrict attention to units where we would treat, # 2 * policy_value - always_treat # includes exactly their contribution because policy_value and always_treat both include it # and likewise restricting attention to the units where we want to decrease treatment, # 2 * policy_value - always-treat # also computes the *benefit* of decreasing treatment, because their contribution to policy_value # is zero and the contribution to always_treat is negative treatment_total = (2 * intrp.policy_value_ - intrp.always_treat_value_.item()) * treatment_level always_totals = intrp.always_treat_value_ * treatment_level else: treatment_total = intrp.policy_value_ always_totals = intrp.always_treat_value_ policy_values = treatment_total, always_totals else: # no policy values for CATE trees intrp.interpret(result.estimator, Xtest) policy_values = None return intrp, result.X_transformer.get_feature_names(self.feature_names_), treatment_names, policy_values # TODO: it seems like it would be better to just return the tree itself rather than plot it; # however, the tree can't store the feature and treatment names we compute here... def plot_policy_tree(self, Xtest, feature_index, *, treatment_costs=0, max_depth=3, min_samples_leaf=2, min_value_increase=1e-4, include_model_uncertainty=False, alpha=0.05): """ Plot a recommended policy tree using matplotlib. Parameters ---------- X : array-like Features feature_index Index of the feature to be considered as treament treatment_costs: array-like, default 0 Cost of treatment, as a scalar value or per-sample. For continuous features this is the marginal cost per unit of treatment; for discrete features, this is the difference in cost between each of the non-default values and the default value (i.e., if non-scalar the array should have shape (n,d_t-1)) max_depth : int, default 3 maximum depth of the tree min_samples_leaf : int, default 2 minimum number of samples on each leaf min_value_increase : float, default 1e-4 The minimum increase in the policy value that a split needs to create to construct it include_model_uncertainty : bool, default False Whether to include confidence interval information when building a simplified model of the cate model. alpha : float in [0, 1], default 0.05 Confidence level of the confidence intervals displayed in the leaf nodes. A (1-alpha)*100% confidence interval is displayed. """ intrp, feature_names, treatment_names, _ = self._tree(True, Xtest, feature_index, treatment_costs=treatment_costs, max_depth=max_depth, min_samples_leaf=min_samples_leaf, min_impurity_decrease=min_value_increase, include_model_uncertainty=include_model_uncertainty, alpha=alpha) return intrp.plot(feature_names=feature_names, treatment_names=treatment_names) def _policy_tree_output(self, Xtest, feature_index, *, treatment_costs=0, max_depth=3, min_samples_leaf=2, min_value_increase=1e-4, alpha=0.05): """ Get a tuple of policy outputs. The first item in the tuple is the recommended policy tree expressed as a dictionary. The second item is the per-unit-average value of applying the learned policy; if the feature is continuous this means the gain from increasing the treatment by 10% of the typical amount for units where the treatment should be increased and decreasing the treatment by 10% of the typical amount when not. The third item is the value of always treating. This is a list, with one entry per non-control-treatment for discrete features, or just a single entry for continuous features, again increasing by 10% of a typical amount. Parameters ---------- X : array-like Features feature_index Index of the feature to be considered as treament treatment_costs: array-like, default 0 Cost of treatment, as a scalar value or per-sample. For continuous features this is the marginal cost per unit of treatment; for discrete features, this is the difference in cost between each of the non-default values and the default value (i.e., if non-scalar the array should have shape (n,d_t-1)) max_depth : int, default 3 maximum depth of the tree min_samples_leaf : int, default 2 minimum number of samples on each leaf min_value_increase : float, default 1e-4 The minimum increase in the policy value that a split needs to create to construct it alpha : float in [0, 1], default 0.05 Confidence level of the confidence intervals displayed in the leaf nodes. A (1-alpha)*100% confidence interval is displayed. Returns ------- output : _PolicyOutput """ (intrp, feature_names, treatment_names, (policy_val, always_trt)) = self._tree(True, Xtest, feature_index, treatment_costs=treatment_costs, max_depth=max_depth, min_samples_leaf=min_samples_leaf, min_impurity_decrease=min_value_increase, alpha=alpha) def policy_data(tree, node_id, node_dict): return {'treatment': treatment_names[np.argmax(tree.value[node_id])]} return _PolicyOutput(_tree_interpreter_to_dict(intrp, feature_names, policy_data), policy_val, {treatment_names[i + 1]: val for (i, val) in enumerate(always_trt.tolist())}, treatment_names[0]) # TODO: it seems like it would be better to just return the tree itself rather than plot it; # however, the tree can't store the feature and treatment names we compute here... def plot_heterogeneity_tree(self, Xtest, feature_index, *, max_depth=3, min_samples_leaf=2, min_impurity_decrease=1e-4, include_model_uncertainty=False, alpha=0.05): """ Plot an effect hetergoeneity tree using matplotlib. Parameters ---------- X : array-like Features feature_index Index of the feature to be considered as treament max_depth : int, default 3 maximum depth of the tree min_samples_leaf : int, default 2 minimum number of samples on each leaf min_impurity_decrease : float, default 1e-4 The minimum decrease in the impurity/uniformity of the causal effect that a split needs to achieve to construct it include_model_uncertainty : bool, default False Whether to include confidence interval information when building a simplified model of the cate model. alpha : float in [0, 1], default 0.05 Confidence level of the confidence intervals displayed in the leaf nodes. A (1-alpha)*100% confidence interval is displayed. """ intrp, feature_names, treatment_names, _ = self._tree(False, Xtest, feature_index, max_depth=max_depth, min_samples_leaf=min_samples_leaf, min_impurity_decrease=min_impurity_decrease, include_model_uncertainty=include_model_uncertainty, alpha=alpha) return intrp.plot(feature_names=feature_names, treatment_names=treatment_names) def _heterogeneity_tree_output(self, Xtest, feature_index, *, max_depth=3, min_samples_leaf=2, min_impurity_decrease=1e-4, include_model_uncertainty=False, alpha=0.05): """ Get an effect heterogeneity tree expressed as a dictionary. Parameters ---------- X : array-like Features feature_index Index of the feature to be considered as treament max_depth : int, optional (default=3) maximum depth of the tree min_samples_leaf : int, optional (default=2) minimum number of samples on each leaf min_impurity_decrease : float, optional (default=1e-4) The minimum decrease in the impurity/uniformity of the causal effect that a split needs to achieve to construct it include_model_uncertainty : bool, default False Whether to include confidence interval information when building a simplified model of the cate model. alpha : float in [0, 1], default 0.05 Confidence level of the confidence intervals displayed in the leaf nodes. A (1-alpha)*100% confidence interval is displayed. """ intrp, feature_names, _, _ = self._tree(False, Xtest, feature_index, max_depth=max_depth, min_samples_leaf=min_samples_leaf, min_impurity_decrease=min_impurity_decrease, include_model_uncertainty=include_model_uncertainty, alpha=alpha) def hetero_data(tree, node_id, node_dict): if include_model_uncertainty: return {'effect': _sanitize(tree.value[node_id]), 'ci': _sanitize(node_dict[node_id]['ci'])} else: return {'effect': _sanitize(tree.value[node_id])} return _tree_interpreter_to_dict(intrp, feature_names, hetero_data) def individualized_policy(self, Xtest, feature_index, *, n_rows=None, treatment_costs=0, alpha=0.05): """ Get individualized treatment policy based on the learned model for a feature, sorted by the predicted effect. Parameters ---------- Xtest: array-like Features feature_index: int or string Index of the feature to be considered as treatment n_rows: int, optional How many rows to return (all rows by default) treatment_costs: array-like, default 0 Cost of treatment, as a scalar value or per-sample. For continuous features this is the marginal cost per unit of treatment; for discrete features, this is the difference in cost between each of the non-default values and the default value (i.e., if non-scalar the array should have shape (n,d_t-1)) alpha: float in [0, 1], default 0.05 Confidence level of the confidence intervals A (1-alpha)*100% confidence interval is returned Returns ------- output: DataFrame Dataframe containing recommended treatment, effect, confidence interval, sorted by effect """ result = self._safe_result_index(Xtest, feature_index) # get dataframe with all but selected column orig_df = pd.DataFrame(Xtest, columns=self.feature_names_).rename( columns={self.feature_names_[result.feature_index]: 'Current treatment'}) Xtest = result.X_transformer.transform(Xtest) if Xtest.shape[1] == 0: x_rows = Xtest.shape[0] Xtest = None if result.feature_baseline is None: # apply 10% of a typical treatment for this feature effect = result.estimator.effect_inference(Xtest, T1=result.treatment_value * 0.1) else: effect = result.estimator.const_marginal_effect_inference(Xtest) if Xtest is None: # we got a scalar effect although our original X may have had more rows effect = effect._expand_outputs(x_rows) multi_y = (not self._vec_y) or self.classification if multi_y and result.feature_baseline is not None and np.ndim(treatment_costs) == 2: # we've got treatment costs of shape (n, d_t-1) so we need to add a y dimension to broadcast safely treatment_costs = np.expand_dims(treatment_costs, 1) effect.translate(-treatment_costs) est = effect.point_estimate est_lb = effect.conf_int(alpha)[0] est_ub = effect.conf_int(alpha)[1] if multi_y: # y was an array, not a vector est = np.squeeze(est, 1) est_lb = np.squeeze(est_lb, 1) est_ub = np.squeeze(est_ub, 1) if result.feature_baseline is None: rec = np.empty(est.shape[0], dtype=object) rec[est > 0] = "increase" rec[est <= 0] = "decrease" # set the effect bounds; for positive treatments these agree with # the estimates; for negative treatments, we need to invert the interval eff_lb, eff_ub = est_lb, est_ub eff_lb[est <= 0], eff_ub[est <= 0] = -eff_ub[est <= 0], -eff_lb[est <= 0] # the effect is now always positive since we decrease treatment when negative eff = np.abs(est) else: # for discrete treatment, stack a zero result in front for control zeros = np.zeros((est.shape[0], 1)) all_effs = np.hstack([zeros, est]) eff_ind = np.argmax(all_effs, axis=1) treatment_arr = np.array([result.feature_baseline] + [lvl for lvl in result.feature_levels], dtype=object) rec = treatment_arr[eff_ind] # we need to call effect_inference to get the correct CI between the two treatment options effect = result.estimator.effect_inference(Xtest, T0=orig_df['Current treatment'], T1=rec) # we now need to construct the delta in the cost between the two treatments and translate the effect current_treatment = orig_df['Current treatment'].values if isinstance(current_treatment, pd.core.arrays.categorical.Categorical): current_treatment = current_treatment.to_numpy() if np.ndim(treatment_costs) >= 2: # remove third dimenions potentially added if multi_y: # y was an array, not a vector treatment_costs = np.squeeze(treatment_costs, 1) assert treatment_costs.shape[1] == len(treatment_arr) - 1, ("If treatment costs are an array, " " they must be of shape (n, d_t-1)," " where n is the number of samples" " and d_t the number of treatment" " categories.") all_costs = np.hstack([zeros, treatment_costs]) # find cost of current treatment: equality creates a 2d array with True on each row, # only if its the location of the current treatment. Then we take the corresponding cost. current_cost = all_costs[current_treatment.reshape(-1, 1) == treatment_arr.reshape(1, -1)] target_cost = np.take_along_axis(all_costs, eff_ind.reshape(-1, 1), 1).reshape(-1) else: assert isinstance(treatment_costs, (int, float)), ("Treatments costs should either be float or " "a 2d array of size (n, d_t-1).") all_costs = np.array([0] + [treatment_costs] * (len(treatment_arr) - 1)) # construct index of current treatment current_ind = (current_treatment.reshape(-1, 1) == treatment_arr.reshape(1, -1)) @ np.arange(len(treatment_arr)) current_cost = all_costs[current_ind] target_cost = all_costs[eff_ind] delta_cost = current_cost - target_cost # add second dimension if needed for broadcasting during translation of effect if multi_y: delta_cost = np.expand_dims(delta_cost, 1) effect.translate(delta_cost) eff = effect.point_estimate eff_lb, eff_ub = effect.conf_int(alpha) if multi_y: # y was an array, not a vector eff = np.squeeze(eff, 1) eff_lb = np.squeeze(eff_lb, 1) eff_ub = np.squeeze(eff_ub, 1) df = pd.DataFrame({'Treatment': rec, 'Effect of treatment': eff, 'Effect of treatment lower bound': eff_lb, 'Effect of treatment upper bound': eff_ub}, index=orig_df.index) return df.join(orig_df).sort_values('Effect of treatment', ascending=False).head(n_rows) def _individualized_policy_dict(self, Xtest, feature_index, *, n_rows=None, treatment_costs=0, alpha=0.05): """ Get individualized treatment policy based on the learned model for a feature, sorted by the predicted effect. Parameters ---------- Xtest: array-like Features feature_index: int or string Index of the feature to be considered as treatment n_rows: int, optional How many rows to return (all rows by default) treatment_costs: array-like, default 0 Cost of treatment, as a scalar value or per-sample alpha: float in [0, 1], default 0.05 Confidence level of the confidence intervals A (1-alpha)*100% confidence interval is returned Returns ------- output: dictionary dictionary containing treatment policy, effects, and other columns """ return self.individualized_policy(Xtest, feature_index, n_rows=n_rows, treatment_costs=treatment_costs, alpha=alpha).to_dict('list') def typical_treatment_value(self, feature_index): """ Get the typical treatment value used for the specified feature Parameters ---------- feature_index: int or string The index of the feature to be considered as treatment Returns ------- treatment_value : float The treatment value considered 'typical' for this feature """ result = [res for res in self._results if res.feature_index == feature_index] if len(result) == 0: if self._has_column_names: result = [res for res in self._results if res.feature_name == feature_index] assert len(result) == 1, f"Could not find feature with index/name {feature_index}" return result[0].treatment_value else: raise ValueError(f"No feature with index {feature_index}") return result[0].treatment_value
PypiClean
/DjangoDjangoAppCenter-0.0.11-py3-none-any.whl/DjangoAppCenter/simpleui/static/admin/simpleui-x/elementui/umd/locale/fa.js
(function (global, factory) { if (typeof define === "function" && define.amd) { define('element/locale/fa', ['module', 'exports'], factory); } else if (typeof exports !== "undefined") { factory(module, exports); } else { var mod = { exports: {} }; factory(mod, mod.exports); global.ELEMENT.lang = global.ELEMENT.lang || {}; global.ELEMENT.lang.fa = mod.exports; } })(this, function (module, exports) { 'use strict'; exports.__esModule = true; exports.default = { el: { colorpicker: { confirm: 'باشد', clear: 'حذف' }, datepicker: { now: 'اکنون', today: 'امروز', cancel: 'لغو', clear: 'حذف', confirm: 'باشه', selectDate: 'انتخاب تاریخ', selectTime: 'انتخاب زمان', startDate: 'تاریخ شروع', startTime: 'زمان شروع', endDate: 'تاریخ پایان', endTime: 'زمان پایان', prevYear: 'سال قبل', nextYear: 'سال بعد', prevMonth: 'ماه قبل', nextMonth: 'ماه بعد', year: 'سال', month1: 'ژانویه', month2: 'فوریه', month3: 'مارس', month4: 'آوریل', month5: 'مه', month6: 'ژوئن', month7: 'جولای', month8: 'اوت', month9: 'سپتامبر', month10: 'اکتبر', month11: 'نوامبر', month12: 'دسامبر', // week: 'week', weeks: { sun: 'یکشنبه', mon: 'دوشنبه', tue: 'سه​شنبه', wed: 'چهارشنبه', thu: 'پنج​شنبه', fri: 'جمعه', sat: 'شنبه' }, months: { jan: 'ژانویه', feb: 'فوریه', mar: 'مارس', apr: 'آوریل', may: 'مه', jun: 'ژوئن', jul: 'جولای', aug: 'اوت', sep: 'سپتامبر', oct: 'اکتبر', nov: 'نوامبر', dec: 'دسامبر' } }, select: { loading: 'بارگیری', noMatch: 'هیچ داده‌ای پیدا نشد', noData: 'اطلاعاتی وجود ندارد', placeholder: 'انتخاب کنید' }, cascader: { noMatch: 'هیچ داده‌ای پیدا نشد', loading: 'بارگیری', placeholder: 'انتخاب کنید', noData: 'اطلاعاتی وجود ندارد' }, pagination: { goto: 'برو به', pagesize: '/صفحه', total: 'مجموع {total}', pageClassifier: '' }, messagebox: { title: 'پیام', confirm: 'باشه', cancel: 'لغو', error: 'ورودی غیر مجاز' }, upload: { deleteTip: 'برای پاک کردن حذف را فشار دهید', delete: 'حذف', preview: 'پیش‌نمایش', continue: 'ادامه' }, table: { emptyText: 'اطلاعاتی وجود ندارد', confirmFilter: 'تایید', resetFilter: 'حذف', clearFilter: 'همه', sumText: 'جمع' }, tree: { emptyText: 'اطلاعاتی وجود ندارد' }, transfer: { noMatch: 'هیچ داده‌ای پیدا نشد', noData: 'اطلاعاتی وجود ندارد', titles: ['لیست 1', 'لیست 2'], filterPlaceholder: 'کلید واژه هارو وارد کن', noCheckedFormat: '{total} مورد', hasCheckedFormat: '{checked} مورد از {total} مورد انتخاب شده است' }, image: { error: 'خطا در بارگیری تصویر' }, pageHeader: { title: 'بازگشت' } } }; module.exports = exports['default']; });
PypiClean
/Attest-0.5.3.tar.gz/Attest-0.5.3/attest/collectors.py
from __future__ import with_statement import inspect from contextlib import contextmanager, nested import sys from functools import wraps from . import statistics from .contexts import capture_output from .reporters import (auto_reporter, get_reporter_by_name, get_all_reporters, AbstractReporter, TestResult) class Tests(object): """Collection of test functions. :param tests: Iterable of other test collections to register with this one. :param contexts: Iterable of callables that take no arguments and return a context manager. """ def __init__(self, tests=(), contexts=None): self._tests = [] for collection in tests: self.register(collection) self._contexts = [] if contexts is not None: self._contexts.extend(contexts) def __iter__(self): return iter(self._tests) def __len__(self): return len(self._tests) def test_if(self, condition): """Returns :meth:`test` if the `condition` is ``True``. .. versionadded:: 0.4 """ if condition: return self.test return lambda x: x def test(self, func): """Decorate a function as a test belonging to this collection.""" @wraps(func) def wrapper(): with nested(*[ctx() for ctx in self._contexts]) as context: context = [c for c in context if c is not None] argc = len(inspect.getargspec(func)[0]) args = [] for arg in context: if type(arg) is tuple: # type() is intentional args.extend(arg) else: args.append(arg) func(*args[:argc]) self._tests.append(wrapper) return wrapper def context(self, func): """Decorate a function as a :func:`~contextlib.contextmanager` for running the tests in this collection in. Corresponds to setup and teardown in other testing libraries. :: db = Tests() @db.context def connect(): con = connect_db() try: yield con finally: con.disconnect() @db.test def using_connection(con): assert con is not None The above corresponds to:: db = Tests() @contextmanager def connect(): con = connect_db() try: yield con finally: con.disconnect() @db.test def using_connection(): with connect() as con: assert con is not None The difference is that this decorator applies the context to all tests defined in its collection, so it's less repetitive. Yielding :const:`None` or nothing passes no arguments to the test, yielding a single value other than a tuple passes that value as the sole argument to the test, yielding a tuple splats the tuple as the arguments to the test. If you want to yield a tuple as the sole argument, wrap it in a one-tuple or unsplat the args in the test. You can have more than one context, which will be run in order using :func:`contextlib.nested`, and their yields will be passed in order to the test functions. .. versionadded:: 0.2 Nested contexts. .. versionchanged:: 0.5 Tests will gets as many arguments as they ask for. """ func = contextmanager(func) self._contexts.append(func) return func def register_if(self, condition): """Returns :meth:`register` if the `condition` is ``True``. .. versionadded:: 0.4 """ if condition: return self.register return lambda x: x def register(self, tests): """Merge in another test collection. :param tests: * A class, which is then instantiated and return allowing it to be used as a decorator for :class:`TestBase` classes. * A string, representing the import path to an iterable yielding tests, in the form of ``'package.module.object'``. * Otherwise any iterable object is assumed to yield tests. Any of these can be passed in a list to the :class:`Tests` constructor. .. versionadded:: 0.2 Refer to collections by import path as a string """ if inspect.isclass(tests): self._tests.extend(tests()) return tests elif isinstance(tests, basestring): module, collection = str(tests).rsplit('.', 1) module = __import__(module, fromlist=[collection]) tests = getattr(module, collection) self._tests.extend(tests) def test_suite(self): """Create a :class:`unittest.TestSuite` from this collection.""" from unittest import TestSuite, FunctionTestCase suite = TestSuite() for test in self: suite.addTest(FunctionTestCase(test)) return suite def run(self, reporter=auto_reporter): """Run all tests in this collection. :param reporter: An instance of :class:`~attest.reporters.AbstractReporter` or a callable returning something implementing that API (not enforced). """ assertions, statistics.assertions = statistics.assertions, 0 if not isinstance(reporter, AbstractReporter): reporter = reporter() reporter.begin(self._tests) for test in self: result = TestResult() result.test = test try: with capture_output() as (out, err): if test() is False: raise AssertionError('test() is False') except BaseException, e: if isinstance(e, KeyboardInterrupt): break result.error = e result.stdout, result.stderr = out, err result.exc_info = sys.exc_info() reporter.failure(result) else: result.stdout, result.stderr = out, err reporter.success(result) try: reporter.finished() finally: statistics.assertions = assertions def main(self, argv=sys.argv): """Interface to :meth:`run` with command-line options. ``-h``, ``--help`` Show a help message ``-r NAME``, ``--reporter NAME`` Select reporter by name with :func:`~attest.reporters.get_reporter_by_name` ``-l``, ``--list-reporters`` List the names of all installed reporters Remaining arguments are passed to the reporter. .. versionadded:: 0.2 .. versionchanged:: 0.4 ``--list-reporters`` was added. """ from optparse import OptionParser parser = OptionParser() parser.add_option('-r', '--reporter', metavar='NAME', help='select reporter by name') parser.add_option('-l', '--list-reporters', action='store_true', help='list available reporters') options, args = parser.parse_args() if options.list_reporters: for reporter in get_all_reporters(): print reporter else: reporter = get_reporter_by_name(options.reporter)(*args) self.run(reporter) def test_if(condition): """Returns :func:`test` if the `condition` is ``True``. .. versionadded:: 0.4 """ if condition: return test return lambda x: x def test(meth): """Mark a :class:`TestBase` method as a test and wrap it to run in the :meth:`TestBase.__context__` of the subclass. """ @wraps(meth) def wrapper(self): with contextmanager(self.__context__)(): meth(self) wrapper.__test__ = True return wrapper class TestBase(object): """Base for test classes. Decorate test methods with :func:`test`. Needs to be registered with a :class:`Tests` collection to be run. For setup and teardown, override :meth:`__context__` like a :func:`~contextlib.contextmanager` (without the decorator). :: class Math(TestBase): def __context__(self): self.two = 1 + 1 yield del self.two @test def arithmetics(self): assert self.two == 2 suite = Tests([Math()]) suite.run() """ def __context__(self): yield def __iter__(self): for name in dir(self): attr = getattr(self, name) if getattr(attr, '__test__', False) and callable(attr): yield attr
PypiClean
/ORG.asm-1.0.3.tar.gz/ORG.asm-1.0.3/doc/sphinx/source/chroroplast.rst
In [111]: r = Index('../../samples/AA') Loading global data... Done. Reading indexed sequence reads... 25553356 sequences read Reading indexed pair data... Done. Loading reverse index... Done. Indexing reverse complement sequences ... Fast indexing forward reads... Fast indexing reverse reads... Done. In [113]: p.keys() Out[113]: ['petD', 'petG', 'psbN', 'petA', 'rpl2', 'petB', 'petL', 'ndhC', 'petN', 'ycf2', 'ycf3', 'rpl23', 'rpl22', 'ndhE', 'psaI', 'psaJ', 'ndhI', 'psaA', 'psaB', 'psaC', 'psbT', 'accD', 'matK', 'rpl14', 'rpl36', 'rpl16', 'clpP', 'cemA', 'ndhA', 'psbB', 'ycf4', 'rbcL', 'ndhB', 'ccsA', 'rps15', 'ndhD', 'psbD', 'rpoC1', 'rpl32', 'rpoC2', 'ndhF', 'rpl33', 'rpl20', 'rpoB', 'atpI', 'atpH', 'rpoA', 'atpB', 'atpA', 'ycf1', 'atpF', 'atpE', 'ndhG', 'psbE', 'rps12', 'rps11', 'psbF', 'psbA', 'rps16', 'psbC', 'rps14', 'psbM', 'ndhK', 'rps19', 'rps18', 'psbI', 'psbH', 'psbK', 'psbJ', 'rps7', 'ndhJ', 'rps4', 'rps3', 'rps2', 'psbL', 'rps8', 'ndhH', 'psbZ'] In [113]: gp = ['rbcL','matK','ndhA', 'rpoA','atpA','atpH', 'rpoA','atpB','psbN', 'psbT','psbB','psbD', 'psbE','psbF','psbA', 'psbC','psbM','psbI', 'psbH','psbK','psbJ', 'psbL','psbZ'] In [114]: p2=dict(x for x in protChloroArabidopsis.items() if x[0] in gp) In [115]: p2=dict(x for x in p.items() if x[0] in gp) In [116]: m = r.lookForSeeds(p2) 99.9 % |#################################################/ ] remain : 00:00:00 In [117]: s = matchtoseed(m,r) In [118]: asm = Assembler(r) In [119]: a = tango(asm,s,mincov=1,minread=10,minoverlap=40) Cycle : 3 (4 nodes / 0.0% fake) Waiting points : 3209 / 3208.00 Gene: None JumpGap on read 25547928 Cycle : 47 (88 nodes / 4.5% fake) Waiting points : 3212 / 3208.72 Gene: None JumpGap on read 2724088 Cycle : 109930 (218736 nodes / 9.5% fake) Waiting points : 3211 / 3358.00 Gene: None JumpGap on read 4432167 Cycle : 109939 (218750 nodes / 9.5% fake) Waiting points : 3211 / 3354.77 Gene: None JumpGap on read 24209366 Cycle : 160894 (320346 nodes / 10.0% fake) Waiting points : 3208 / 3476.05 Gene: None JumpGap on read 15356408 Cycle : 162299 (321604 nodes / 10.3% fake) Waiting points : 2443 / 2761.28 Gene: None JumpGap on read 11509091 Cycle : 162316 (321634 nodes / 10.3% fake) Waiting points : 2443 / 2745.91 Gene: None JumpGap on read 24781220 Cycle : 163703 (324406 nodes / 10.8% fake) Waiting points : 2436 / 2447.49 Gene: None JumpGap on read 25245800 Cycle : 166921 (330508 nodes / 12.0% fake) Waiting points : 2266 / 2458.52 Gene: atpA In [120]: asm.cleanDeadBranches() Remaining edges : 292066 node : 292020 Out[120]: 19931 In [121]: cg = asm.compactAssembling() Compacting graph : Stem 1 : 251 bp (total : 251) coverage : 458.51 Stem 2 : 1119 bp (total : 1370) coverage : 31.47 Stem 3 : 31 bp (total : 1401) coverage : 18.94 Stem 4 : 98 bp (total : 1499) coverage : 47.95 Stem 5 : 20 bp (total : 1519) coverage : 0.00 Stem 6 : 1 bp (total : 1520) coverage : 404.00 Stem 7 : 39 bp (total : 1559) coverage : 20.20 Stem 8 : 12 bp (total : 1571) coverage : 0.00 Stem 9 : 1750 bp (total : 3321) coverage : 604.10 Stem 10 : 2 bp (total : 3323) coverage : 168.33 Stem 11 : 1568 bp (total : 4891) coverage : 1055.90 Stem 12 : 27 bp (total : 4918) coverage : 331.86 Stem 13 : 27 bp (total : 4945) coverage : 216.43 Stem 14 : 124 bp (total : 5069) coverage : 54.94 Stem 15 : 1574 bp (total : 6643) coverage : 604.72 Stem 16 : 35 bp (total : 6678) coverage : 1388.75 Stem 17 : 2 bp (total : 6680) coverage : 437.67 Stem 18 : 39 bp (total : 6719) coverage : 83.32 Stem 19 : 279 bp (total : 6998) coverage : 778.42 Stem 20 : 17 bp (total : 7015) coverage : 162.72 Stem 21 : 902 bp (total : 7917) coverage : 1140.75 Stem 22 : 76 bp (total : 7993) coverage : 20.99 Stem 23 : 88 bp (total : 8081) coverage : 204.27 Stem 24 : 87 bp (total : 8168) coverage : 25.25 Stem 25 : 13 bp (total : 8181) coverage : 43.29 Stem 26 : 10 bp (total : 8191) coverage : 707.00 Stem 27 : 107 bp (total : 8298) coverage : 54.24 Stem 28 : 102 bp (total : 8400) coverage : 1092.37 Stem 29 : 2289 bp (total : 10689) coverage : 619.01 Stem 30 : 27 bp (total : 10716) coverage : 292.18 Stem 31 : 12 bp (total : 10728) coverage : 15.54 Stem 32 : 19 bp (total : 10747) coverage : 0.00 Stem 33 : 17 bp (total : 10764) coverage : 72.94 Stem 34 : 3415 bp (total : 14179) coverage : 599.41 Stem 35 : 351 bp (total : 14530) coverage : 645.88 Stem 36 : 696 bp (total : 15226) coverage : 24.20 Stem 37 : 12 bp (total : 15238) coverage : 7.77 Stem 38 : 76 bp (total : 15314) coverage : 472.21 Stem 39 : 76 bp (total : 15390) coverage : 32.79 Stem 40 : 100 bp (total : 15490) coverage : 48.00 Stem -3 : 31 bp (total : 15521) coverage : 18.94 Stem 41 : 88 bp (total : 15609) coverage : 41.99 Stem 42 : 8 bp (total : 15617) coverage : 0.00 Stem 43 : 8 bp (total : 15625) coverage : 123.44 Stem 44 : 91 bp (total : 15716) coverage : 27.45 Stem 45 : 19 bp (total : 15735) coverage : 409.05 Stem 46 : 13 bp (total : 15748) coverage : 165.93 Stem 47 : 15 bp (total : 15763) coverage : 88.38 Stem 48 : 9 bp (total : 15772) coverage : 424.20 Stem 49 : 1258 bp (total : 17030) coverage : 30.89 Stem 50 : 1557 bp (total : 18587) coverage : 1162.54 Stem 51 : 21 bp (total : 18608) coverage : 18.36 Stem 52 : 91 bp (total : 18699) coverage : 25.25 Stem 53 : 1090 bp (total : 19789) coverage : 550.36 Stem 54 : 21 bp (total : 19810) coverage : 22.95 Stem 55 : 21 bp (total : 19831) coverage : 211.18 Stem 56 : 20 bp (total : 19851) coverage : 625.24 Stem 57 : 33 bp (total : 19884) coverage : 83.18 Stem 58 : 12 bp (total : 19896) coverage : 963.38 Stem -46 : 13 bp (total : 19909) coverage : 165.93 Stem 59 : 16 bp (total : 19925) coverage : 112.88 Stem 60 : 1 bp (total : 19926) coverage : 151.50 Stem 61 : 91 bp (total : 20017) coverage : 38.42 Stem 62 : 1 bp (total : 20018) coverage : 353.50 Stem 63 : 1740 bp (total : 21758) coverage : 689.31 Stem 64 : 5 bp (total : 21763) coverage : 589.17 Stem 65 : 93 bp (total : 21856) coverage : 487.81 Stem 66 : 14 bp (total : 21870) coverage : 107.73 Stem 67 : 11 bp (total : 21881) coverage : 185.17 Stem 68 : 90 bp (total : 21971) coverage : 546.07 Stem 69 : 91 bp (total : 22062) coverage : 52.70 Stem 70 : 101 bp (total : 22163) coverage : 75.25 Stem 71 : 303 bp (total : 22466) coverage : 896.04 Stem 72 : 12 bp (total : 22478) coverage : 116.54 Stem 73 : 40 bp (total : 22518) coverage : 906.54 Stem -8 : 12 bp (total : 22530) coverage : 0.00 Stem 74 : 51 bp (total : 22581) coverage : 275.81 Stem 75 : 20 bp (total : 22601) coverage : 105.81 Stem 76 : 86 bp (total : 22687) coverage : 32.51 Stem 77 : 752 bp (total : 23439) coverage : 648.79 Stem 78 : 1879 bp (total : 25318) coverage : 580.05 Stem 79 : 8 bp (total : 25326) coverage : 404.00 Stem 80 : 1170 bp (total : 26496) coverage : 719.68 Stem 81 : 20 bp (total : 26516) coverage : 76.95 Stem 82 : 11 bp (total : 26527) coverage : 42.08 Stem 83 : 6 bp (total : 26533) coverage : 144.29 Stem 84 : 6 bp (total : 26539) coverage : 591.57 Stem 85 : 12 bp (total : 26551) coverage : 15.54 Stem 86 : 19 bp (total : 26570) coverage : 323.20 Stem 87 : 67 bp (total : 26637) coverage : 851.07 Stem 88 : 91 bp (total : 26728) coverage : 27.45 Stem 89 : 28 bp (total : 26756) coverage : 66.17 Stem 90 : 1 bp (total : 26757) coverage : 353.50 Stem -73 : 40 bp (total : 26797) coverage : 906.54 Stem 91 : 29 bp (total : 26826) coverage : 117.83 Stem 92 : 91 bp (total : 26917) coverage : 26.35 Stem 93 : 1481 bp (total : 28398) coverage : 556.39 Stem 94 : 5 bp (total : 28403) coverage : 353.50 Stem -25 : 13 bp (total : 28416) coverage : 43.29 Stem 95 : 257 bp (total : 28673) coverage : 647.50 Stem -42 : 8 bp (total : 28681) coverage : 0.00 Stem 96 : 12 bp (total : 28693) coverage : 23.31 Stem 97 : 90 bp (total : 28783) coverage : 33.30 Stem -52 : 91 bp (total : 28874) coverage : 25.25 Stem 98 : 1 bp (total : 28875) coverage : 404.00 Stem -7 : 39 bp (total : 28914) coverage : 20.20 Stem 99 : 2 bp (total : 28916) coverage : 202.00 Stem 100 : 26 bp (total : 28942) coverage : 48.63 Stem 101 : 72 bp (total : 29014) coverage : 893.78 Stem 102 : 2986 bp (total : 32000) coverage : 612.39 Stem -28 : 102 bp (total : 32102) coverage : 1092.37 Stem 103 : 89 bp (total : 32191) coverage : 35.91 Stem 104 : 1 bp (total : 32192) coverage : 303.00 Stem 105 : 189 bp (total : 32381) coverage : 726.67 Stem 106 : 20 bp (total : 32401) coverage : 168.33 Stem 107 : 84 bp (total : 32485) coverage : 508.56 Stem 108 : 199 bp (total : 32684) coverage : 466.62 Stem -2 : 1119 bp (total : 33803) coverage : 31.47 Stem -56 : 20 bp (total : 33823) coverage : 625.24 Stem 109 : 188 bp (total : 34011) coverage : 789.30 Stem 110 : 36 bp (total : 34047) coverage : 27.30 Stem 111 : 18 bp (total : 34065) coverage : 122.26 Stem 112 : 517 bp (total : 34582) coverage : 844.85 Stem 113 : 1436 bp (total : 36018) coverage : 572.26 Stem 114 : 15 bp (total : 36033) coverage : 88.38 Stem 115 : 240 bp (total : 36273) coverage : 945.04 Stem 116 : 23 bp (total : 36296) coverage : 42.08 Stem 117 : 83 bp (total : 36379) coverage : 50.50 Stem 118 : 2 bp (total : 36381) coverage : 505.00 Stem 119 : 324 bp (total : 36705) coverage : 1034.24 Stem -65 : 93 bp (total : 36798) coverage : 487.81 Stem 120 : 218 bp (total : 37016) coverage : 728.21 Stem 121 : 25 bp (total : 37041) coverage : 38.85 Stem 122 : 21 bp (total : 37062) coverage : 50.50 Stem 123 : 627 bp (total : 37689) coverage : 1244.17 Stem 124 : 33 bp (total : 37722) coverage : 130.71 Stem 125 : 2631 bp (total : 40353) coverage : 653.31 Stem -122 : 21 bp (total : 40374) coverage : 50.50 Stem 126 : 2818 bp (total : 43192) coverage : 532.05 Stem 127 : 22 bp (total : 43214) coverage : 74.65 Stem 128 : 76 bp (total : 43290) coverage : 31.48 Stem 129 : 76 bp (total : 43366) coverage : 502.38 Stem 130 : 13 bp (total : 43379) coverage : 0.00 Stem 131 : 13 bp (total : 43392) coverage : 0.00 Stem 132 : 75 bp (total : 43467) coverage : 462.47 Stem 133 : 75 bp (total : 43542) coverage : 127.58 Stem 134 : 20 bp (total : 43562) coverage : 0.00 Stem -54 : 21 bp (total : 43583) coverage : 22.95 Stem 135 : 232 bp (total : 43815) coverage : 965.79 Stem 136 : 777 bp (total : 44592) coverage : 545.11 Stem 137 : 36 bp (total : 44628) coverage : 40.95 Stem 138 : 2471 bp (total : 47099) coverage : 564.73 Stem -43 : 8 bp (total : 47107) coverage : 123.44 Stem -79 : 8 bp (total : 47115) coverage : 404.00 Stem 139 : 4312 bp (total : 51427) coverage : 558.02 Stem -133 : 75 bp (total : 51502) coverage : 127.58 Stem 140 : 26 bp (total : 51528) coverage : 86.04 Stem 141 : 1673 bp (total : 53201) coverage : 596.11 Stem 142 : 84 bp (total : 53285) coverage : 61.79 Stem 143 : 396 bp (total : 53681) coverage : 934.44 Stem -55 : 21 bp (total : 53702) coverage : 211.18 Stem 144 : 22 bp (total : 53724) coverage : 21.96 Stem 145 : 31 bp (total : 53755) coverage : 123.09 Stem 146 : 2647 bp (total : 56402) coverage : 551.08 Stem 147 : 28 bp (total : 56430) coverage : 24.38 Stem 148 : 224 bp (total : 56654) coverage : 35.91 Stem 149 : 12 bp (total : 56666) coverage : 38.85 Stem 150 : 97 bp (total : 56763) coverage : 39.16 Stem 151 : 102 bp (total : 56865) coverage : 92.17 Stem 152 : 102 bp (total : 56967) coverage : 910.96 Stem 153 : 12 bp (total : 56979) coverage : 93.23 Stem 154 : 30 bp (total : 57009) coverage : 387.71 Stem -117 : 83 bp (total : 57092) coverage : 50.50 Stem 155 : 83 bp (total : 57175) coverage : 520.63 Stem 156 : 18 bp (total : 57193) coverage : 574.11 Stem 157 : 19 bp (total : 57212) coverage : 131.30 Stem 158 : 114 bp (total : 57326) coverage : 25.47 Stem 159 : 1039 bp (total : 58365) coverage : 28.94 Stem 160 : 18 bp (total : 58383) coverage : 58.47 Stem -135 : 232 bp (total : 58615) coverage : 965.79 Stem 161 : 3912 bp (total : 62527) coverage : 608.53 Stem 162 : 135 bp (total : 62662) coverage : 565.15 Stem 163 : 25 bp (total : 62687) coverage : 23.31 Stem 164 : 13 bp (total : 62700) coverage : 14.43 Stem 165 : 13 bp (total : 62713) coverage : 50.50 Stem 166 : 16 bp (total : 62729) coverage : 5.94 Stem -96 : 12 bp (total : 62741) coverage : 23.31 Stem 167 : 4 bp (total : 62745) coverage : 303.00 Stem 168 : 102 bp (total : 62847) coverage : 465.78 Stem 169 : 28 bp (total : 62875) coverage : 132.34 Stem 170 : 9 bp (total : 62884) coverage : 707.00 Stem 171 : 78 bp (total : 62962) coverage : 39.63 Stem 172 : 91 bp (total : 63053) coverage : 40.62 Stem 173 : 68 bp (total : 63121) coverage : 404.00 Stem 174 : 25 bp (total : 63146) coverage : 73.81 Stem -76 : 86 bp (total : 63232) coverage : 32.51 Stem 175 : 82 bp (total : 63314) coverage : 48.67 Stem -155 : 83 bp (total : 63397) coverage : 520.63 Stem 176 : 401 bp (total : 63798) coverage : 1015.78 Stem 177 : 27 bp (total : 63825) coverage : 133.46 Stem 178 : 90 bp (total : 63915) coverage : 16.65 Stem 179 : 852 bp (total : 64767) coverage : 1030.72 Stem 180 : 15 bp (total : 64782) coverage : 63.12 Stem 181 : 70 bp (total : 64852) coverage : 303.00 Stem 182 : 23 bp (total : 64875) coverage : 67.33 Stem 183 : 14 bp (total : 64889) coverage : 40.40 Stem -97 : 90 bp (total : 64979) coverage : 33.30 Stem 184 : 91 bp (total : 65070) coverage : 473.16 Stem 185 : 92 bp (total : 65162) coverage : 33.67 Stem 186 : 91 bp (total : 65253) coverage : 577.46 Stem 187 : 379 bp (total : 65632) coverage : 590.32 Stem 188 : 2992 bp (total : 68624) coverage : 671.90 Stem 189 : 6 bp (total : 68630) coverage : 129.86 Stem -139 : 4312 bp (total : 72942) coverage : 558.02 Stem 190 : 234 bp (total : 73176) coverage : 706.14 Stem -9 : 1750 bp (total : 74926) coverage : 604.10 Stem 191 : 1353 bp (total : 76279) coverage : 472.70 Stem 192 : 4 bp (total : 76283) coverage : 222.20 Stem 193 : 83 bp (total : 76366) coverage : 98.60 Stem 194 : 90 bp (total : 76456) coverage : 45.51 Stem 195 : 8 bp (total : 76464) coverage : 224.44 Stem 196 : 15 bp (total : 76479) coverage : 69.44 Stem 197 : 16 bp (total : 76495) coverage : 932.76 Stem 198 : 107 bp (total : 76602) coverage : 665.85 Stem 199 : 27 bp (total : 76629) coverage : 82.96 Stem 200 : 2040 bp (total : 78669) coverage : 1145.64 Stem 201 : 665 bp (total : 79334) coverage : 732.02 Stem 202 : 99 bp (total : 79433) coverage : 38.38 Stem -58 : 12 bp (total : 79445) coverage : 963.38 Stem -192 : 4 bp (total : 79449) coverage : 222.20 Stem 203 : 1014 bp (total : 80463) coverage : 1168.02 Stem -150 : 97 bp (total : 80560) coverage : 39.16 Stem 204 : 67 bp (total : 80627) coverage : 29.71 Stem 205 : 70 bp (total : 80697) coverage : 539.14 Stem 206 : 88 bp (total : 80785) coverage : 43.12 Stem -95 : 257 bp (total : 81042) coverage : 647.50 Stem 207 : 7 bp (total : 81049) coverage : 151.50 Stem 208 : 97 bp (total : 81146) coverage : 29.89 Stem 209 : 60 bp (total : 81206) coverage : 23.18 Stem 210 : 91 bp (total : 81297) coverage : 610.39 Stem -68 : 90 bp (total : 81387) coverage : 546.07 Stem -69 : 91 bp (total : 81478) coverage : 52.70 Stem 211 : 100 bp (total : 81578) coverage : 35.00 Stem 212 : 92 bp (total : 81670) coverage : 1095.80 Stem 213 : 93 bp (total : 81763) coverage : 41.90 Stem 214 : 226 bp (total : 81989) coverage : 755.05 Stem -13 : 27 bp (total : 82016) coverage : 216.43 Stem -80 : 1170 bp (total : 83186) coverage : 719.68 Stem 215 : 24 bp (total : 83210) coverage : 4.04 Stem 216 : 15 bp (total : 83225) coverage : 12.62 Stem -123 : 627 bp (total : 83852) coverage : 1244.17 Stem 217 : 90 bp (total : 83942) coverage : 27.75 Stem 218 : 7045 bp (total : 90987) coverage : 554.35 Stem -75 : 20 bp (total : 91007) coverage : 105.81 Stem 219 : 74 bp (total : 91081) coverage : 802.61 Stem 220 : 76 bp (total : 91157) coverage : 48.53 Stem 221 : 24 bp (total : 91181) coverage : 113.12 Stem -57 : 33 bp (total : 91214) coverage : 83.18 Stem -16 : 35 bp (total : 91249) coverage : 1388.75 Stem -148 : 224 bp (total : 91473) coverage : 35.91 Stem 222 : 2289 bp (total : 93762) coverage : 605.65 Stem 223 : 92 bp (total : 93854) coverage : 35.84 Stem 224 : 91 bp (total : 93945) coverage : 686.14 Stem 225 : 88 bp (total : 94033) coverage : 1089.44 Stem 226 : 23 bp (total : 94056) coverage : 71.54 Stem -224 : 91 bp (total : 94147) coverage : 686.14 Stem -223 : 92 bp (total : 94239) coverage : 35.84 Stem -62 : 1 bp (total : 94240) coverage : 353.50 Stem 227 : 27 bp (total : 94267) coverage : 46.89 Stem 228 : 1 bp (total : 94268) coverage : 606.00 Stem 229 : 91 bp (total : 94359) coverage : 178.95 Stem -82 : 11 bp (total : 94370) coverage : 42.08 Stem -214 : 226 bp (total : 94596) coverage : 755.05 Stem -149 : 12 bp (total : 94608) coverage : 38.85 Stem -61 : 91 bp (total : 94699) coverage : 38.42 Stem 230 : 38 bp (total : 94737) coverage : 567.15 Stem 231 : 32 bp (total : 94769) coverage : 91.82 Stem 232 : 245 bp (total : 95014) coverage : 1204.61 Stem 233 : 35 bp (total : 95049) coverage : 89.78 Stem 234 : 23 bp (total : 95072) coverage : 88.38 Stem 235 : 73 bp (total : 95145) coverage : 768.42 Stem -154 : 30 bp (total : 95175) coverage : 387.71 Stem 236 : 202 bp (total : 95377) coverage : 529.88 Stem -187 : 379 bp (total : 95756) coverage : 590.32 Stem 237 : 35 bp (total : 95791) coverage : 471.33 Stem 238 : 24 bp (total : 95815) coverage : 72.72 Stem 239 : 17 bp (total : 95832) coverage : 527.44 Stem 240 : 16 bp (total : 95848) coverage : 65.35 Stem -48 : 9 bp (total : 95857) coverage : 424.20 Stem 241 : 17 bp (total : 95874) coverage : 84.17 Stem -118 : 2 bp (total : 95876) coverage : 505.00 Stem 242 : 25 bp (total : 95901) coverage : 101.00 Stem 243 : 74 bp (total : 95975) coverage : 29.63 Stem -151 : 102 bp (total : 96077) coverage : 92.17 Stem -152 : 102 bp (total : 96179) coverage : 910.96 Stem -230 : 38 bp (total : 96217) coverage : 567.15 Stem -169 : 28 bp (total : 96245) coverage : 132.34 Stem 244 : 170 bp (total : 96415) coverage : 907.82 Stem 245 : 17 bp (total : 96432) coverage : 129.06 Stem 246 : 1 bp (total : 96433) coverage : 404.00 Stem -194 : 90 bp (total : 96523) coverage : 45.51 Stem 247 : 19 bp (total : 96542) coverage : 792.85 Stem 248 : 33 bp (total : 96575) coverage : 71.29 Stem 249 : 11 bp (total : 96586) coverage : 58.92 Stem -193 : 83 bp (total : 96669) coverage : 98.60 Stem -124 : 33 bp (total : 96702) coverage : 130.71 Stem 250 : 74 bp (total : 96776) coverage : 41.75 Stem 251 : 2 bp (total : 96778) coverage : 370.33 Stem -203 : 1014 bp (total : 97792) coverage : 1168.02 Stem 252 : 29 bp (total : 97821) coverage : 13.47 Stem 253 : 94 bp (total : 97915) coverage : 36.15 Stem -119 : 324 bp (total : 98239) coverage : 1034.24 Stem 254 : 2448 bp (total : 100687) coverage : 602.54 Stem -105 : 189 bp (total : 100876) coverage : 726.67 Stem 255 : 18 bp (total : 100894) coverage : 74.42 Stem 256 : 936 bp (total : 101830) coverage : 749.15 Stem -98 : 1 bp (total : 101831) coverage : 404.00 Stem -60 : 1 bp (total : 101832) coverage : 151.50 Stem -114 : 15 bp (total : 101847) coverage : 88.38 Stem -219 : 74 bp (total : 101921) coverage : 802.61 Stem 257 : 98 bp (total : 102019) coverage : 628.44 Stem 258 : 465 bp (total : 102484) coverage : 27.74 Stem -208 : 97 bp (total : 102581) coverage : 29.89 Stem 259 : 22 bp (total : 102603) coverage : 114.17 Stem 260 : 22 bp (total : 102625) coverage : 0.00 Stem -144 : 22 bp (total : 102647) coverage : 21.96 Stem 261 : 2359 bp (total : 105006) coverage : 571.08 Stem -78 : 1879 bp (total : 106885) coverage : 580.05 Stem 262 : 1640 bp (total : 108525) coverage : 646.50 Stem -245 : 17 bp (total : 108542) coverage : 129.06 Stem 263 : 18 bp (total : 108560) coverage : 85.05 Stem -45 : 19 bp (total : 108579) coverage : 409.05 Stem -120 : 218 bp (total : 108797) coverage : 728.21 Stem -255 : 18 bp (total : 108815) coverage : 74.42 Stem 264 : 32 bp (total : 108847) coverage : 664.15 Stem 265 : 5963 bp (total : 114810) coverage : 557.80 Stem -202 : 99 bp (total : 114909) coverage : 38.38 Stem -215 : 24 bp (total : 114933) coverage : 4.04 Stem -107 : 84 bp (total : 115017) coverage : 508.56 Stem 266 : 1296 bp (total : 116313) coverage : 1153.99 Stem 267 : 102 bp (total : 116415) coverage : 77.47 Stem 268 : 102 bp (total : 116517) coverage : 983.52 Stem 269 : 280 bp (total : 116797) coverage : 597.73 Stem 270 : 23 bp (total : 116820) coverage : 130.46 Stem 271 : 71 bp (total : 116891) coverage : 1233.04 Stem -77 : 752 bp (total : 117643) coverage : 648.79 Stem -132 : 75 bp (total : 117718) coverage : 462.47 Stem 272 : 12 bp (total : 117730) coverage : 69.92 Stem 273 : 33 bp (total : 117763) coverage : 68.32 Stem -5 : 20 bp (total : 117783) coverage : 0.00 Stem 274 : 19 bp (total : 117802) coverage : 0.00 Stem 275 : 31 bp (total : 117833) coverage : 691.22 Stem -228 : 1 bp (total : 117834) coverage : 606.00 Stem 276 : 77 bp (total : 117911) coverage : 25.90 Stem 277 : 101 bp (total : 118012) coverage : 36.64 Stem 278 : 4 bp (total : 118016) coverage : 787.80 Stem 279 : 1183 bp (total : 119199) coverage : 691.73 Stem 280 : 2 bp (total : 119201) coverage : 101.00 Stem 281 : 15 bp (total : 119216) coverage : 69.44 Stem 282 : 11 bp (total : 119227) coverage : 42.08 Stem -129 : 76 bp (total : 119303) coverage : 502.38 Stem -38 : 76 bp (total : 119379) coverage : 472.21 Stem -249 : 11 bp (total : 119390) coverage : 58.92 Stem 283 : 878 bp (total : 120268) coverage : 610.02 Stem -243 : 74 bp (total : 120342) coverage : 29.63 Stem 284 : 21 bp (total : 120363) coverage : 123.95 Stem -83 : 6 bp (total : 120369) coverage : 144.29 Stem 285 : 16 bp (total : 120385) coverage : 95.06 Stem -239 : 17 bp (total : 120402) coverage : 527.44 Stem -10 : 2 bp (total : 120404) coverage : 168.33 Stem 286 : 647 bp (total : 121051) coverage : 538.98 Stem 287 : 62 bp (total : 121113) coverage : 22.44 Stem -44 : 91 bp (total : 121204) coverage : 27.45 Stem -113 : 1436 bp (total : 122640) coverage : 572.26 Stem 288 : 74 bp (total : 122714) coverage : 203.35 Stem 289 : 73 bp (total : 122787) coverage : 17.74 Stem 290 : 104 bp (total : 122891) coverage : 44.25 Stem 291 : 327 bp (total : 123218) coverage : 1131.02 Stem 292 : 90 bp (total : 123308) coverage : 42.18 Stem -248 : 33 bp (total : 123341) coverage : 71.29 Stem 293 : 13 bp (total : 123354) coverage : 7.21 Stem 294 : 1686 bp (total : 125040) coverage : 566.91 Stem 295 : 1369 bp (total : 126409) coverage : 594.57 Stem 296 : 1 bp (total : 126410) coverage : 656.50 Stem 297 : 38 bp (total : 126448) coverage : 77.69 Stem -264 : 32 bp (total : 126480) coverage : 664.15 Stem 298 : 14 bp (total : 126494) coverage : 134.67 Stem 299 : 25 bp (total : 126519) coverage : 11.65 Stem -121 : 25 bp (total : 126544) coverage : 38.85 Stem 300 : 90 bp (total : 126634) coverage : 32.19 Stem 301 : 231 bp (total : 126865) coverage : 1318.66 Stem 302 : 61 bp (total : 126926) coverage : 568.53 Stem -18 : 39 bp (total : 126965) coverage : 83.32 Stem -30 : 27 bp (total : 126992) coverage : 292.18 Stem 303 : 1213 bp (total : 128205) coverage : 659.66 Stem 304 : 69 bp (total : 128274) coverage : 54.83 Stem 305 : 102 bp (total : 128376) coverage : 289.27 Stem -256 : 936 bp (total : 129312) coverage : 749.15 Stem -94 : 5 bp (total : 129317) coverage : 353.50 Stem -299 : 25 bp (total : 129342) coverage : 11.65 Stem -32 : 19 bp (total : 129361) coverage : 0.00 Stem 306 : 11 bp (total : 129372) coverage : 25.25 Stem 307 : 1 bp (total : 129373) coverage : 101.00 Stem -180 : 15 bp (total : 129388) coverage : 63.12 Stem -250 : 74 bp (total : 129462) coverage : 41.75 Stem 308 : 126 bp (total : 129588) coverage : 1031.47 Stem 309 : 901 bp (total : 130489) coverage : 571.29 Stem -160 : 18 bp (total : 130507) coverage : 58.47 Stem 310 : 159 bp (total : 130666) coverage : 35.35 Stem 311 : 13 bp (total : 130679) coverage : 101.00 Stem 312 : 513 bp (total : 131192) coverage : 843.37 Stem -67 : 11 bp (total : 131203) coverage : 185.17 Stem -115 : 240 bp (total : 131443) coverage : 945.04 Stem 313 : 58 bp (total : 131501) coverage : 616.27 Stem 314 : 12 bp (total : 131513) coverage : 93.23 Stem -201 : 665 bp (total : 132178) coverage : 732.02 Stem 315 : 77 bp (total : 132255) coverage : 58.27 Stem -106 : 20 bp (total : 132275) coverage : 168.33 Stem 316 : 29 bp (total : 132304) coverage : 33.67 Stem -306 : 11 bp (total : 132315) coverage : 25.25 Stem 317 : 91 bp (total : 132406) coverage : 43.91 Stem 318 : 64 bp (total : 132470) coverage : 20.20 Stem 319 : 18 bp (total : 132488) coverage : 95.68 Stem -26 : 10 bp (total : 132498) coverage : 707.00 Stem -210 : 91 bp (total : 132589) coverage : 610.39 Stem -100 : 26 bp (total : 132615) coverage : 48.63 Stem -11 : 1568 bp (total : 134183) coverage : 1055.90 Stem -259 : 22 bp (total : 134205) coverage : 114.17 Stem 320 : 280 bp (total : 134485) coverage : 719.22 Stem 321 : 68 bp (total : 134553) coverage : 36.59 Stem 322 : 12 bp (total : 134565) coverage : 7.77 Stem 323 : 14 bp (total : 134579) coverage : 269.33 Stem -310 : 159 bp (total : 134738) coverage : 35.35 Stem 324 : 20 bp (total : 134758) coverage : 76.95 Stem 325 : 88 bp (total : 134846) coverage : 34.04 Stem -188 : 2992 bp (total : 137838) coverage : 671.90 Stem -279 : 1183 bp (total : 139021) coverage : 691.73 Stem 326 : 156 bp (total : 139177) coverage : 517.22 Stem 327 : 27 bp (total : 139204) coverage : 21.64 Stem 328 : 14 bp (total : 139218) coverage : 67.33 Stem 329 : 38 bp (total : 139256) coverage : 145.03 Stem 330 : 26 bp (total : 139282) coverage : 26.19 Stem -109 : 188 bp (total : 139470) coverage : 789.30 Stem -158 : 114 bp (total : 139584) coverage : 25.47 Stem 331 : 1868 bp (total : 141452) coverage : 31.78 Stem -325 : 88 bp (total : 141540) coverage : 34.04 Stem 332 : 374 bp (total : 141914) coverage : 398.07 Stem -179 : 852 bp (total : 142766) coverage : 1030.72 Stem -182 : 23 bp (total : 142789) coverage : 67.33 Stem -265 : 5963 bp (total : 148752) coverage : 557.80 Stem -24 : 87 bp (total : 148839) coverage : 25.25 Stem -36 : 696 bp (total : 149535) coverage : 24.20 Stem 333 : 2610 bp (total : 152145) coverage : 467.90 Stem -116 : 23 bp (total : 152168) coverage : 42.08 Stem 334 : 15 bp (total : 152183) coverage : 643.88 Stem -301 : 231 bp (total : 152414) coverage : 1318.66 Stem 335 : 70 bp (total : 152484) coverage : 1330.07 Stem 336 : 1997 bp (total : 154481) coverage : 601.35 Stem -70 : 101 bp (total : 154582) coverage : 75.25 Stem -198 : 107 bp (total : 154689) coverage : 665.85 Stem -314 : 12 bp (total : 154701) coverage : 93.23 Stem 337 : 512 bp (total : 155213) coverage : 838.91 Stem 338 : 14 bp (total : 155227) coverage : 40.40 Stem -71 : 303 bp (total : 155530) coverage : 896.04 Stem 339 : 14 bp (total : 155544) coverage : 262.60 Stem -257 : 98 bp (total : 155642) coverage : 628.44 Stem -285 : 16 bp (total : 155658) coverage : 95.06 Stem -177 : 27 bp (total : 155685) coverage : 133.46 Stem 340 : 102 bp (total : 155787) coverage : 62.76 Stem 341 : 102 bp (total : 155889) coverage : 1118.84 Stem -103 : 89 bp (total : 155978) coverage : 35.91 Stem -190 : 234 bp (total : 156212) coverage : 706.14 Stem -242 : 25 bp (total : 156237) coverage : 101.00 Stem 342 : 101 bp (total : 156338) coverage : 61.39 Stem -319 : 18 bp (total : 156356) coverage : 95.68 Stem 343 : 17 bp (total : 156373) coverage : 555.50 Stem 344 : 262 bp (total : 156635) coverage : 216.98 Stem 345 : 20 bp (total : 156655) coverage : 0.00 Stem -292 : 90 bp (total : 156745) coverage : 42.18 Stem 346 : 91 bp (total : 156836) coverage : 454.50 Stem -172 : 91 bp (total : 156927) coverage : 40.62 Stem 347 : 1 bp (total : 156928) coverage : 303.00 Stem 348 : 1228 bp (total : 158156) coverage : 604.52 Stem -153 : 12 bp (total : 158168) coverage : 93.23 Stem 349 : 85 bp (total : 158253) coverage : 37.58 Stem 350 : 28 bp (total : 158281) coverage : 69.66 Stem -303 : 1213 bp (total : 159494) coverage : 659.66 Stem -317 : 91 bp (total : 159585) coverage : 43.91 Stem -174 : 25 bp (total : 159610) coverage : 73.81 Stem -104 : 1 bp (total : 159611) coverage : 303.00 Stem -200 : 2040 bp (total : 161651) coverage : 1145.64 Stem -284 : 21 bp (total : 161672) coverage : 123.95 Stem -195 : 8 bp (total : 161680) coverage : 224.44 Stem -305 : 102 bp (total : 161782) coverage : 289.27 Stem 351 : 21 bp (total : 161803) coverage : 87.23 Stem 352 : 15 bp (total : 161818) coverage : 50.50 Stem -263 : 18 bp (total : 161836) coverage : 85.05 Stem 353 : 13 bp (total : 161849) coverage : 57.71 Stem -29 : 2289 bp (total : 164138) coverage : 619.01 Stem -205 : 70 bp (total : 164208) coverage : 539.14 Stem 354 : 213 bp (total : 164421) coverage : 34.93 Stem -332 : 374 bp (total : 164795) coverage : 398.07 Stem -171 : 78 bp (total : 164873) coverage : 39.63 Stem -156 : 18 bp (total : 164891) coverage : 574.11 Stem 355 : 4 bp (total : 164895) coverage : 909.00 Stem 356 : 23 bp (total : 164918) coverage : 75.75 Stem -297 : 38 bp (total : 164956) coverage : 77.69 Stem -101 : 72 bp (total : 165028) coverage : 893.78 Stem -339 : 14 bp (total : 165042) coverage : 262.60 Stem 357 : 45 bp (total : 165087) coverage : 814.59 Stem -41 : 88 bp (total : 165175) coverage : 41.99 Stem 358 : 1 bp (total : 165176) coverage : 252.50 Stem 359 : 91 bp (total : 165267) coverage : 40.62 Stem 360 : 90 bp (total : 165357) coverage : 543.85 Stem -302 : 61 bp (total : 165418) coverage : 568.53 Stem -307 : 1 bp (total : 165419) coverage : 101.00 Stem -253 : 94 bp (total : 165513) coverage : 36.15 Stem -260 : 22 bp (total : 165535) coverage : 0.00 Stem -51 : 21 bp (total : 165556) coverage : 18.36 Stem -108 : 199 bp (total : 165755) coverage : 466.62 Stem 361 : 13 bp (total : 165768) coverage : 0.00 Stem 362 : 89 bp (total : 165857) coverage : 350.13 Stem 363 : 89 bp (total : 165946) coverage : 68.46 Stem -313 : 58 bp (total : 166004) coverage : 616.27 Stem -356 : 23 bp (total : 166027) coverage : 75.75 Stem -345 : 20 bp (total : 166047) coverage : 0.00 Stem 364 : 21 bp (total : 166068) coverage : 4.59 Stem 365 : 15 bp (total : 166083) coverage : 69.44 Stem 366 : 11 bp (total : 166094) coverage : 92.58 Stem -236 : 202 bp (total : 166296) coverage : 529.88 Stem -218 : 7045 bp (total : 173341) coverage : 554.35 Stem 367 : 12 bp (total : 173353) coverage : 54.38 Stem -186 : 91 bp (total : 173444) coverage : 577.46 Stem -185 : 92 bp (total : 173536) coverage : 33.67 Stem -365 : 15 bp (total : 173551) coverage : 69.44 Stem 368 : 12 bp (total : 173563) coverage : 124.31 Stem -355 : 4 bp (total : 173567) coverage : 909.00 Stem -315 : 77 bp (total : 173644) coverage : 58.27 Stem -350 : 28 bp (total : 173672) coverage : 69.66 Stem 369 : 1210 bp (total : 174882) coverage : 482.06 Stem 370 : 20 bp (total : 174902) coverage : 81.76 Stem 371 : 11 bp (total : 174913) coverage : 126.25 Stem 372 : 373 bp (total : 175286) coverage : 1088.86 Stem 373 : 74 bp (total : 175360) coverage : 35.01 Stem -99 : 2 bp (total : 175362) coverage : 202.00 Stem 374 : 29 bp (total : 175391) coverage : 13.47 Stem -348 : 1228 bp (total : 176619) coverage : 604.52 Stem -166 : 16 bp (total : 176635) coverage : 5.94 Stem 375 : 158 bp (total : 176793) coverage : 1076.70 Stem -20 : 17 bp (total : 176810) coverage : 162.72 Stem -12 : 27 bp (total : 176837) coverage : 331.86 Stem -207 : 7 bp (total : 176844) coverage : 151.50 Stem -281 : 15 bp (total : 176859) coverage : 69.44 Stem -375 : 158 bp (total : 177017) coverage : 1076.70 Stem -145 : 31 bp (total : 177048) coverage : 123.09 Stem 376 : 125 bp (total : 177173) coverage : 1115.01 Stem 377 : 20 bp (total : 177193) coverage : 0.00 Stem -364 : 21 bp (total : 177214) coverage : 4.59 Stem -308 : 126 bp (total : 177340) coverage : 1031.47 Stem -84 : 6 bp (total : 177346) coverage : 591.57 Stem 378 : 1 bp (total : 177347) coverage : 50.50 Stem 379 : 21 bp (total : 177368) coverage : 128.55 Stem 380 : 36 bp (total : 177404) coverage : 19.11 Stem -112 : 517 bp (total : 177921) coverage : 844.85 Stem -378 : 1 bp (total : 177922) coverage : 50.50 Stem 381 : 88 bp (total : 178010) coverage : 23.83 Stem 382 : 816 bp (total : 178826) coverage : 608.84 Stem -74 : 51 bp (total : 178877) coverage : 275.81 Stem 383 : 16 bp (total : 178893) coverage : 118.82 Stem -35 : 351 bp (total : 179244) coverage : 645.88 Stem -206 : 88 bp (total : 179332) coverage : 43.12 Stem -362 : 89 bp (total : 179421) coverage : 350.13 Stem -229 : 91 bp (total : 179512) coverage : 178.95 Stem -342 : 101 bp (total : 179613) coverage : 61.39 Stem 384 : 1 bp (total : 179614) coverage : 1313.00 Stem 385 : 91 bp (total : 179705) coverage : 489.63 Stem 386 : 92 bp (total : 179797) coverage : 36.92 Stem -269 : 280 bp (total : 180077) coverage : 597.73 Stem -173 : 68 bp (total : 180145) coverage : 404.00 Stem 387 : 556 bp (total : 180701) coverage : 757.59 Stem 388 : 17 bp (total : 180718) coverage : 67.33 Stem -196 : 15 bp (total : 180733) coverage : 69.44 Stem 389 : 188 bp (total : 180921) coverage : 32.60 Stem -110 : 36 bp (total : 180957) coverage : 27.30 Stem 390 : 35 bp (total : 180992) coverage : 33.67 Stem -15 : 1574 bp (total : 182566) coverage : 604.72 Stem -286 : 647 bp (total : 183213) coverage : 538.98 Stem -341 : 102 bp (total : 183315) coverage : 1118.84 Stem -340 : 102 bp (total : 183417) coverage : 62.76 Stem 391 : 11 bp (total : 183428) coverage : 58.92 Stem -27 : 107 bp (total : 183535) coverage : 54.24 Stem -19 : 279 bp (total : 183814) coverage : 778.42 Stem 392 : 5221 bp (total : 189035) coverage : 543.74 Stem -143 : 396 bp (total : 189431) coverage : 934.44 Stem -213 : 93 bp (total : 189524) coverage : 41.90 Stem -212 : 92 bp (total : 189616) coverage : 1095.80 Stem -322 : 12 bp (total : 189628) coverage : 7.77 Stem 393 : 12 bp (total : 189640) coverage : 7.77 Stem -380 : 36 bp (total : 189676) coverage : 19.11 Stem -141 : 1673 bp (total : 191349) coverage : 596.11 Stem -111 : 18 bp (total : 191367) coverage : 122.26 Stem -274 : 19 bp (total : 191386) coverage : 0.00 Stem -349 : 85 bp (total : 191471) coverage : 37.58 Stem 394 : 96 bp (total : 191567) coverage : 59.35 Stem -17 : 2 bp (total : 191569) coverage : 437.67 Stem 395 : 2 bp (total : 191571) coverage : 269.33 Stem -288 : 74 bp (total : 191645) coverage : 203.35 Stem -373 : 74 bp (total : 191719) coverage : 35.01 Stem -374 : 29 bp (total : 191748) coverage : 13.47 Stem -164 : 13 bp (total : 191761) coverage : 14.43 Stem -254 : 2448 bp (total : 194209) coverage : 602.54 Stem -125 : 2631 bp (total : 196840) coverage : 653.31 Stem -258 : 465 bp (total : 197305) coverage : 27.74 Stem 396 : 39 bp (total : 197344) coverage : 42.92 Stem -333 : 2610 bp (total : 199954) coverage : 467.90 Stem -291 : 327 bp (total : 200281) coverage : 1131.02 Stem -354 : 213 bp (total : 200494) coverage : 34.93 Stem -126 : 2818 bp (total : 203312) coverage : 532.05 Stem -336 : 1997 bp (total : 205309) coverage : 601.35 Stem -246 : 1 bp (total : 205310) coverage : 404.00 Stem -235 : 73 bp (total : 205383) coverage : 768.42 Stem 397 : 12 bp (total : 205395) coverage : 108.77 Stem -272 : 12 bp (total : 205407) coverage : 69.92 Stem -167 : 4 bp (total : 205411) coverage : 303.00 Stem -277 : 101 bp (total : 205512) coverage : 36.64 Stem 398 : 271 bp (total : 205783) coverage : 801.32 Stem 399 : 12 bp (total : 205795) coverage : 62.15 Stem -247 : 19 bp (total : 205814) coverage : 792.85 Stem -87 : 67 bp (total : 205881) coverage : 851.07 Stem -189 : 6 bp (total : 205887) coverage : 129.86 Stem 400 : 408 bp (total : 206295) coverage : 511.67 Stem -59 : 16 bp (total : 206311) coverage : 112.88 Stem -270 : 23 bp (total : 206334) coverage : 130.46 Stem -199 : 27 bp (total : 206361) coverage : 82.96 Stem -1 : 251 bp (total : 206612) coverage : 458.51 Stem -379 : 21 bp (total : 206633) coverage : 128.55 Stem -91 : 29 bp (total : 206662) coverage : 117.83 Stem -136 : 777 bp (total : 207439) coverage : 545.11 Stem -234 : 23 bp (total : 207462) coverage : 88.38 Stem -335 : 70 bp (total : 207532) coverage : 1330.07 Stem 401 : 20 bp (total : 207552) coverage : 4.81 Stem -293 : 13 bp (total : 207565) coverage : 7.21 Stem -238 : 24 bp (total : 207589) coverage : 72.72 Stem -371 : 11 bp (total : 207600) coverage : 126.25 Stem 402 : 96 bp (total : 207696) coverage : 51.02 Stem -394 : 96 bp (total : 207792) coverage : 59.35 Stem -377 : 20 bp (total : 207812) coverage : 0.00 Stem 403 : 19 bp (total : 207831) coverage : 75.75 Stem -278 : 4 bp (total : 207835) coverage : 787.80 Stem -276 : 77 bp (total : 207912) coverage : 25.90 Stem -22 : 76 bp (total : 207988) coverage : 20.99 Stem -368 : 12 bp (total : 208000) coverage : 124.31 Stem -161 : 3912 bp (total : 211912) coverage : 608.53 Stem 404 : 22 bp (total : 211934) coverage : 43.91 Stem -40 : 100 bp (total : 212034) coverage : 48.00 Stem 405 : 128 bp (total : 212162) coverage : 1197.12 Stem 406 : 26 bp (total : 212188) coverage : 52.37 Stem 407 : 2350 bp (total : 214538) coverage : 584.22 Stem 408 : 28 bp (total : 214566) coverage : 303.00 Stem 409 : 35 bp (total : 214601) coverage : 53.31 Stem -184 : 91 bp (total : 214692) coverage : 473.16 Stem -300 : 90 bp (total : 214782) coverage : 32.19 Stem -406 : 26 bp (total : 214808) coverage : 52.37 Stem 410 : 23 bp (total : 214831) coverage : 887.96 Stem -85 : 12 bp (total : 214843) coverage : 15.54 Stem -282 : 11 bp (total : 214854) coverage : 42.08 Stem 411 : 12 bp (total : 214866) coverage : 77.69 Stem -331 : 1868 bp (total : 216734) coverage : 31.78 Stem -389 : 188 bp (total : 216922) coverage : 32.60 Stem -400 : 408 bp (total : 217330) coverage : 511.67 Stem 412 : 29 bp (total : 217359) coverage : 80.80 Stem 413 : 839 bp (total : 218198) coverage : 1196.97 Stem 414 : 39 bp (total : 218237) coverage : 55.55 Stem -401 : 20 bp (total : 218257) coverage : 4.81 Stem 415 : 53 bp (total : 218310) coverage : 353.50 Stem -178 : 90 bp (total : 218400) coverage : 16.65 Stem -287 : 62 bp (total : 218462) coverage : 22.44 Stem -176 : 401 bp (total : 218863) coverage : 1015.78 Stem -165 : 13 bp (total : 218876) coverage : 50.50 Stem -330 : 26 bp (total : 218902) coverage : 26.19 Stem -361 : 13 bp (total : 218915) coverage : 0.00 Stem -217 : 90 bp (total : 219005) coverage : 27.75 Stem -346 : 91 bp (total : 219096) coverage : 454.50 Stem -130 : 13 bp (total : 219109) coverage : 0.00 Stem 416 : 815 bp (total : 219924) coverage : 33.67 Stem 417 : 580 bp (total : 220504) coverage : 1134.29 Stem -413 : 839 bp (total : 221343) coverage : 1196.97 Stem -49 : 1258 bp (total : 222601) coverage : 30.89 Stem -271 : 71 bp (total : 222672) coverage : 1233.04 Stem 418 : 89 bp (total : 222761) coverage : 435.42 Stem 419 : 90 bp (total : 222851) coverage : 47.73 Stem -231 : 32 bp (total : 222883) coverage : 91.82 Stem -387 : 556 bp (total : 223439) coverage : 757.59 Stem -283 : 878 bp (total : 224317) coverage : 610.02 Stem -267 : 102 bp (total : 224419) coverage : 77.47 Stem -268 : 102 bp (total : 224521) coverage : 983.52 Stem -385 : 91 bp (total : 224612) coverage : 489.63 Stem -386 : 92 bp (total : 224704) coverage : 36.92 Stem 420 : 96 bp (total : 224800) coverage : 73.93 Stem -162 : 135 bp (total : 224935) coverage : 565.15 Stem -221 : 24 bp (total : 224959) coverage : 113.12 Stem -312 : 513 bp (total : 225472) coverage : 843.37 Stem -352 : 15 bp (total : 225487) coverage : 50.50 Stem -382 : 816 bp (total : 226303) coverage : 608.84 Stem -388 : 17 bp (total : 226320) coverage : 67.33 Stem -170 : 9 bp (total : 226329) coverage : 707.00 Stem -157 : 19 bp (total : 226348) coverage : 131.30 Stem -409 : 35 bp (total : 226383) coverage : 53.31 Stem -90 : 1 bp (total : 226384) coverage : 353.50 Stem 421 : 1048 bp (total : 227432) coverage : 884.83 Stem -326 : 156 bp (total : 227588) coverage : 517.22 Stem -127 : 22 bp (total : 227610) coverage : 74.65 Stem -64 : 5 bp (total : 227615) coverage : 589.17 Stem -81 : 20 bp (total : 227635) coverage : 76.95 Stem -311 : 13 bp (total : 227648) coverage : 101.00 Stem -392 : 5221 bp (total : 232869) coverage : 543.74 Stem -262 : 1640 bp (total : 234509) coverage : 646.50 Stem 422 : 1202 bp (total : 235711) coverage : 510.88 Stem -321 : 68 bp (total : 235779) coverage : 36.59 Stem -360 : 90 bp (total : 235869) coverage : 543.85 Stem -359 : 91 bp (total : 235960) coverage : 40.62 Stem -381 : 88 bp (total : 236048) coverage : 23.83 Stem -23 : 88 bp (total : 236136) coverage : 204.27 Stem -275 : 31 bp (total : 236167) coverage : 691.22 Stem 423 : 13 bp (total : 236180) coverage : 28.86 Stem -220 : 76 bp (total : 236256) coverage : 48.53 Stem -147 : 28 bp (total : 236284) coverage : 24.38 Stem -4 : 98 bp (total : 236382) coverage : 47.95 Stem -168 : 102 bp (total : 236484) coverage : 465.78 Stem -63 : 1740 bp (total : 238224) coverage : 689.31 Stem -397 : 12 bp (total : 238236) coverage : 108.77 Stem -21 : 902 bp (total : 239138) coverage : 1140.75 Stem -391 : 11 bp (total : 239149) coverage : 58.92 Stem 424 : 3 bp (total : 239152) coverage : 833.25 Stem -338 : 14 bp (total : 239166) coverage : 40.40 Stem 425 : 102 bp (total : 239268) coverage : 390.27 Stem -137 : 36 bp (total : 239304) coverage : 40.95 Stem -66 : 14 bp (total : 239318) coverage : 107.73 Stem -273 : 33 bp (total : 239351) coverage : 68.32 Stem -384 : 1 bp (total : 239352) coverage : 1313.00 Stem -232 : 245 bp (total : 239597) coverage : 1204.61 Stem -138 : 2471 bp (total : 242068) coverage : 564.73 Stem 426 : 2321 bp (total : 244389) coverage : 511.22 Stem -309 : 901 bp (total : 245290) coverage : 571.29 Stem -92 : 91 bp (total : 245381) coverage : 26.35 Stem -266 : 1296 bp (total : 246677) coverage : 1153.99 Stem -411 : 12 bp (total : 246689) coverage : 77.69 Stem -329 : 38 bp (total : 246727) coverage : 145.03 Stem -47 : 15 bp (total : 246742) coverage : 88.38 Stem -290 : 104 bp (total : 246846) coverage : 44.25 Stem -142 : 84 bp (total : 246930) coverage : 61.79 Stem -233 : 35 bp (total : 246965) coverage : 89.78 Stem -351 : 21 bp (total : 246986) coverage : 87.23 Stem -53 : 1090 bp (total : 248076) coverage : 550.36 Stem -421 : 1048 bp (total : 249124) coverage : 884.83 Stem -211 : 100 bp (total : 249224) coverage : 35.00 Stem -251 : 2 bp (total : 249226) coverage : 370.33 Stem -131 : 13 bp (total : 249239) coverage : 0.00 Stem -334 : 15 bp (total : 249254) coverage : 643.88 Stem -240 : 16 bp (total : 249270) coverage : 65.35 Stem -183 : 14 bp (total : 249284) coverage : 40.40 Stem -280 : 2 bp (total : 249286) coverage : 101.00 Stem -227 : 27 bp (total : 249313) coverage : 46.89 Stem -347 : 1 bp (total : 249314) coverage : 303.00 Stem -366 : 11 bp (total : 249325) coverage : 92.58 Stem -367 : 12 bp (total : 249337) coverage : 54.38 Stem 427 : 1 bp (total : 249338) coverage : 656.50 Stem 428 : 4 bp (total : 249342) coverage : 808.00 Stem -181 : 70 bp (total : 249412) coverage : 303.00 Stem 429 : 90 bp (total : 249502) coverage : 46.62 Stem -252 : 29 bp (total : 249531) coverage : 13.47 Stem -296 : 1 bp (total : 249532) coverage : 656.50 Stem 430 : 91 bp (total : 249623) coverage : 24.15 Stem 431 : 1000 bp (total : 250623) coverage : 1247.11 Stem -369 : 1210 bp (total : 251833) coverage : 482.06 Stem -128 : 76 bp (total : 251909) coverage : 31.48 Stem -39 : 76 bp (total : 251985) coverage : 32.79 Stem -327 : 27 bp (total : 252012) coverage : 21.64 Stem -244 : 170 bp (total : 252182) coverage : 907.82 Stem -175 : 82 bp (total : 252264) coverage : 48.67 Stem -140 : 26 bp (total : 252290) coverage : 86.04 Stem -418 : 89 bp (total : 252379) coverage : 435.42 Stem -419 : 90 bp (total : 252469) coverage : 47.73 Stem -372 : 373 bp (total : 252842) coverage : 1088.86 Stem -316 : 29 bp (total : 252871) coverage : 33.67 Stem 432 : 2513 bp (total : 255384) coverage : 1097.34 Stem -261 : 2359 bp (total : 257743) coverage : 571.08 Stem -289 : 73 bp (total : 257816) coverage : 17.74 Stem -416 : 815 bp (total : 258631) coverage : 33.67 Stem -357 : 45 bp (total : 258676) coverage : 814.59 Stem -430 : 91 bp (total : 258767) coverage : 24.15 Stem -72 : 12 bp (total : 258779) coverage : 116.54 Stem -295 : 1369 bp (total : 260148) coverage : 594.57 Stem -407 : 2350 bp (total : 262498) coverage : 584.22 Stem -393 : 12 bp (total : 262510) coverage : 7.77 Stem -163 : 25 bp (total : 262535) coverage : 23.31 Stem -222 : 2289 bp (total : 264824) coverage : 605.65 Stem -412 : 29 bp (total : 264853) coverage : 80.80 Stem 433 : 91 bp (total : 264944) coverage : 40.62 Stem -6 : 1 bp (total : 264945) coverage : 404.00 Stem -417 : 580 bp (total : 265525) coverage : 1134.29 Stem -50 : 1557 bp (total : 267082) coverage : 1162.54 Stem -225 : 88 bp (total : 267170) coverage : 1089.44 Stem -146 : 2647 bp (total : 269817) coverage : 551.08 Stem -304 : 69 bp (total : 269886) coverage : 54.83 Stem -134 : 20 bp (total : 269906) coverage : 0.00 Stem -88 : 91 bp (total : 269997) coverage : 27.45 Stem -422 : 1202 bp (total : 271199) coverage : 510.88 Stem -433 : 91 bp (total : 271290) coverage : 40.62 Stem -159 : 1039 bp (total : 272329) coverage : 28.94 Stem -408 : 28 bp (total : 272357) coverage : 303.00 Stem -323 : 14 bp (total : 272371) coverage : 269.33 Stem -343 : 17 bp (total : 272388) coverage : 555.50 Stem -370 : 20 bp (total : 272408) coverage : 81.76 Stem -320 : 280 bp (total : 272688) coverage : 719.22 Stem -431 : 1000 bp (total : 273688) coverage : 1247.11 Stem -324 : 20 bp (total : 273708) coverage : 76.95 Stem -404 : 22 bp (total : 273730) coverage : 43.91 Stem -318 : 64 bp (total : 273794) coverage : 20.20 Stem -425 : 102 bp (total : 273896) coverage : 390.27 Stem -358 : 1 bp (total : 273897) coverage : 252.50 Stem -89 : 28 bp (total : 273925) coverage : 66.17 Stem -33 : 17 bp (total : 273942) coverage : 72.94 Stem -423 : 13 bp (total : 273955) coverage : 28.86 Stem -428 : 4 bp (total : 273959) coverage : 808.00 Stem -328 : 14 bp (total : 273973) coverage : 67.33 Stem -426 : 2321 bp (total : 276294) coverage : 511.22 Stem -402 : 96 bp (total : 276390) coverage : 51.02 Stem -237 : 35 bp (total : 276425) coverage : 471.33 Stem -390 : 35 bp (total : 276460) coverage : 33.67 Stem -420 : 96 bp (total : 276556) coverage : 73.93 Stem -241 : 17 bp (total : 276573) coverage : 84.17 Stem -216 : 15 bp (total : 276588) coverage : 12.62 Stem -197 : 16 bp (total : 276604) coverage : 932.76 Stem -398 : 271 bp (total : 276875) coverage : 801.32 Stem -405 : 128 bp (total : 277003) coverage : 1197.12 Stem -415 : 53 bp (total : 277056) coverage : 353.50 Stem -414 : 39 bp (total : 277095) coverage : 55.55 Stem -395 : 2 bp (total : 277097) coverage : 269.33 Stem -376 : 125 bp (total : 277222) coverage : 1115.01 Stem -93 : 1481 bp (total : 278703) coverage : 556.39 Stem -337 : 512 bp (total : 279215) coverage : 838.91 Stem -191 : 1353 bp (total : 280568) coverage : 472.70 Stem -353 : 13 bp (total : 280581) coverage : 57.71 Stem -86 : 19 bp (total : 280600) coverage : 323.20 Stem -429 : 90 bp (total : 280690) coverage : 46.62 Stem -383 : 16 bp (total : 280706) coverage : 118.82 Stem -424 : 3 bp (total : 280709) coverage : 833.25 Stem -344 : 262 bp (total : 280971) coverage : 216.98 Stem -363 : 89 bp (total : 281060) coverage : 68.46 Stem -427 : 1 bp (total : 281061) coverage : 656.50 Stem -204 : 67 bp (total : 281128) coverage : 29.71 Stem -31 : 12 bp (total : 281140) coverage : 15.54 Stem -403 : 19 bp (total : 281159) coverage : 75.75 Stem -102 : 2986 bp (total : 284145) coverage : 612.39 Stem -34 : 3415 bp (total : 287560) coverage : 599.41 Stem -399 : 12 bp (total : 287572) coverage : 62.15 Stem -396 : 39 bp (total : 287611) coverage : 42.92 Stem -37 : 12 bp (total : 287623) coverage : 7.77 Stem -226 : 23 bp (total : 287646) coverage : 71.54 Stem -432 : 2513 bp (total : 290159) coverage : 1097.34 Stem -294 : 1686 bp (total : 291845) coverage : 566.91 Stem -410 : 23 bp (total : 291868) coverage : 887.96 Stem -298 : 14 bp (total : 291882) coverage : 134.67 Stem -14 : 124 bp (total : 292006) coverage : 54.94 Stem -209 : 60 bp (total : 292066) coverage : 23.18 Minimum stem coverage = 0
PypiClean
/Electrum-CHI-3.3.8.tar.gz/Electrum-CHI-3.3.8/packages/aiohttp/client_ws.py
import asyncio from typing import Any, Optional import async_timeout from .client_exceptions import ClientError from .client_reqrep import ClientResponse from .helpers import call_later, set_result from .http import ( WS_CLOSED_MESSAGE, WS_CLOSING_MESSAGE, WebSocketError, WSMessage, WSMsgType, ) from .http_websocket import WebSocketWriter # WSMessage from .streams import EofStream, FlowControlDataQueue # noqa from .typedefs import ( DEFAULT_JSON_DECODER, DEFAULT_JSON_ENCODER, JSONDecoder, JSONEncoder, ) class ClientWebSocketResponse: def __init__(self, reader: 'FlowControlDataQueue[WSMessage]', writer: WebSocketWriter, protocol: Optional[str], response: ClientResponse, timeout: float, autoclose: bool, autoping: bool, loop: asyncio.AbstractEventLoop, *, receive_timeout: Optional[float]=None, heartbeat: Optional[float]=None, compress: int=0, client_notakeover: bool=False) -> None: self._response = response self._conn = response.connection self._writer = writer self._reader = reader self._protocol = protocol self._closed = False self._closing = False self._close_code = None # type: Optional[int] self._timeout = timeout self._receive_timeout = receive_timeout self._autoclose = autoclose self._autoping = autoping self._heartbeat = heartbeat self._heartbeat_cb = None if heartbeat is not None: self._pong_heartbeat = heartbeat / 2.0 self._pong_response_cb = None self._loop = loop self._waiting = None # type: Optional[asyncio.Future[bool]] self._exception = None # type: Optional[BaseException] self._compress = compress self._client_notakeover = client_notakeover self._reset_heartbeat() def _cancel_heartbeat(self) -> None: if self._pong_response_cb is not None: self._pong_response_cb.cancel() self._pong_response_cb = None if self._heartbeat_cb is not None: self._heartbeat_cb.cancel() self._heartbeat_cb = None def _reset_heartbeat(self) -> None: self._cancel_heartbeat() if self._heartbeat is not None: self._heartbeat_cb = call_later( self._send_heartbeat, self._heartbeat, self._loop) def _send_heartbeat(self) -> None: if self._heartbeat is not None and not self._closed: # fire-and-forget a task is not perfect but maybe ok for # sending ping. Otherwise we need a long-living heartbeat # task in the class. self._loop.create_task(self._writer.ping()) if self._pong_response_cb is not None: self._pong_response_cb.cancel() self._pong_response_cb = call_later( self._pong_not_received, self._pong_heartbeat, self._loop) def _pong_not_received(self) -> None: if not self._closed: self._closed = True self._close_code = 1006 self._exception = asyncio.TimeoutError() self._response.close() @property def closed(self) -> bool: return self._closed @property def close_code(self) -> Optional[int]: return self._close_code @property def protocol(self) -> Optional[str]: return self._protocol @property def compress(self) -> int: return self._compress @property def client_notakeover(self) -> bool: return self._client_notakeover def get_extra_info(self, name: str, default: Any=None) -> Any: """extra info from connection transport""" conn = self._response.connection if conn is None: return default transport = conn.transport if transport is None: return default return transport.get_extra_info(name, default) def exception(self) -> Optional[BaseException]: return self._exception async def ping(self, message: bytes=b'') -> None: await self._writer.ping(message) async def pong(self, message: bytes=b'') -> None: await self._writer.pong(message) async def send_str(self, data: str, compress: Optional[int]=None) -> None: if not isinstance(data, str): raise TypeError('data argument must be str (%r)' % type(data)) await self._writer.send(data, binary=False, compress=compress) async def send_bytes(self, data: bytes, compress: Optional[int]=None) -> None: if not isinstance(data, (bytes, bytearray, memoryview)): raise TypeError('data argument must be byte-ish (%r)' % type(data)) await self._writer.send(data, binary=True, compress=compress) async def send_json(self, data: Any, compress: Optional[int]=None, *, dumps: JSONEncoder=DEFAULT_JSON_ENCODER) -> None: await self.send_str(dumps(data), compress=compress) async def close(self, *, code: int=1000, message: bytes=b'') -> bool: # we need to break `receive()` cycle first, # `close()` may be called from different task if self._waiting is not None and not self._closed: self._reader.feed_data(WS_CLOSING_MESSAGE, 0) await self._waiting if not self._closed: self._cancel_heartbeat() self._closed = True try: await self._writer.close(code, message) except asyncio.CancelledError: self._close_code = 1006 self._response.close() raise except Exception as exc: self._close_code = 1006 self._exception = exc self._response.close() return True if self._closing: self._response.close() return True while True: try: with async_timeout.timeout(self._timeout, loop=self._loop): msg = await self._reader.read() except asyncio.CancelledError: self._close_code = 1006 self._response.close() raise except Exception as exc: self._close_code = 1006 self._exception = exc self._response.close() return True if msg.type == WSMsgType.CLOSE: self._close_code = msg.data self._response.close() return True else: return False async def receive(self, timeout: Optional[float]=None) -> WSMessage: while True: if self._waiting is not None: raise RuntimeError( 'Concurrent call to receive() is not allowed') if self._closed: return WS_CLOSED_MESSAGE elif self._closing: await self.close() return WS_CLOSED_MESSAGE try: self._waiting = self._loop.create_future() try: with async_timeout.timeout( timeout or self._receive_timeout, loop=self._loop): msg = await self._reader.read() self._reset_heartbeat() finally: waiter = self._waiting self._waiting = None set_result(waiter, True) except (asyncio.CancelledError, asyncio.TimeoutError): self._close_code = 1006 raise except EofStream: self._close_code = 1000 await self.close() return WSMessage(WSMsgType.CLOSED, None, None) except ClientError: self._closed = True self._close_code = 1006 return WS_CLOSED_MESSAGE except WebSocketError as exc: self._close_code = exc.code await self.close(code=exc.code) return WSMessage(WSMsgType.ERROR, exc, None) except Exception as exc: self._exception = exc self._closing = True self._close_code = 1006 await self.close() return WSMessage(WSMsgType.ERROR, exc, None) if msg.type == WSMsgType.CLOSE: self._closing = True self._close_code = msg.data if not self._closed and self._autoclose: await self.close() elif msg.type == WSMsgType.CLOSING: self._closing = True elif msg.type == WSMsgType.PING and self._autoping: await self.pong(msg.data) continue elif msg.type == WSMsgType.PONG and self._autoping: continue return msg async def receive_str(self, *, timeout: Optional[float]=None) -> str: msg = await self.receive(timeout) if msg.type != WSMsgType.TEXT: raise TypeError( "Received message {}:{!r} is not str".format(msg.type, msg.data)) return msg.data async def receive_bytes(self, *, timeout: Optional[float]=None) -> bytes: msg = await self.receive(timeout) if msg.type != WSMsgType.BINARY: raise TypeError( "Received message {}:{!r} is not bytes".format(msg.type, msg.data)) return msg.data async def receive_json(self, *, loads: JSONDecoder=DEFAULT_JSON_DECODER, timeout: Optional[float]=None) -> Any: data = await self.receive_str(timeout=timeout) return loads(data) def __aiter__(self) -> 'ClientWebSocketResponse': return self async def __anext__(self) -> WSMessage: msg = await self.receive() if msg.type in (WSMsgType.CLOSE, WSMsgType.CLOSING, WSMsgType.CLOSED): raise StopAsyncIteration # NOQA return msg
PypiClean
/GooseSLURM-0.12.4.tar.gz/GooseSLURM-0.12.4/docs/cluster.rst
######## Clusters ######## Basic cluster layout -------------------- The basic layout of a cluster is shown in the image below. The cluster consists of a *head-node* and several *compute-nodes*. The head-node is the "computer" you log in to from your own PC. From the head-node, jobs are distributed to the compute-nodes by a scheduler: the *queuing system*. The scheduler sends the job to a compute-node when a free spot with the requested resources is available. Running a job thus corresponds to submitting a special (Bash-)script to the scheduler. You don't know a priori when, or on which compute-node, your job will be handled. .. figure:: images/cluster.svg :width: 500 px :align: center :alt: cluster architecture: head-node and compute-nodes Connecting, File Transfer and Editing ------------------------------------- The clusters run on Linux and you interface to them using a service called "ssh", which is short for Secure SHell. This service can present a remote Command Line Interface (CLI) over a network. In Layman's terms, *it allows you to type commands from your own computer which are executed on the cluster*. An ssh-client is therefore needed, which can be obtained for any operating system. Additionally, before and after running a job, the relevant files have to be transferred to and from the cluster respectively. Below we discuss several interfaces for either Linux/macOS or Windows users. It is remarked that generally copying actions/commands run on the users' computer, not on the cluster. Connecting and File Transfer from Windows ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ To connect to the cluster from Windows, first a ssh-client is required. Several options exist, for example: * `SSH Secure Shell (shareware) <http://software.sites.unc.edu/shareware/#s>`_. * `PuTTy (free) <http://www.putty.org/>`_. * `cygwin (free) <https://www.cygwin.com/>`_: provides a Linux-like environment on Windows. * `git (free) <https://git-scm.com/download/win>`_. Git is actually a version management system, but it also includes a BASH-shell on Windows. * `FileZilla (free) <https://filezilla-project.org/>`_ can be used to transfer files to and from the clusters. Connecting and File Transfer from Linux/macOS ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Connecting """""""""" Almost all Linux/macOS distributions have a ssh-client installed. To log in to the *furnace* cluster, open a terminal window on your own computer, and execute: .. code-block:: bash [username@mypc ~]$ ssh myclusteraccount@clustername If this is the first time, ssh will ask you to confirm the identity of the computer you are connecting to. Confirm the question, and enter your password to log in on the cluster. To avoid the need to provide the user's password on each login, a key can be generated on the host computer and provided to the cluster. If still logged in on the *furnace*, hit ``Ctrl-D`` or type ``exit`` to log out and return to your own computer. Then follow these steps: 1. Generate a key on your own computer: .. code-block:: bash [username@mypc ~]$ ssh-keygen -t dsa confirm each question with ``Return``. 2. Copy the key to the cluster with: .. code-block:: bash [username@mypc ~]$ ssh-copy-id myclusteraccount@clustername If done right, from now on logging in will be password-less. File transfer """"""""""""" There are several file transfer options, partially depending on the Linux/macOS distribution used: * Using a file browser (e.g. Nautilus for Gnome, or Dolphin for KDE). Open the browser and type ``sftp://myclusteraccount@clustername`` in the address bar (or location bar). * Using ``scp`` from the command line. This command is completely analogous to the ``cp`` command (see :ref:`sec-bash`). To copy files to the cluster (e.g. *furnace*) type in your local prompt: .. code-block:: bash [username@mypc ~]$ scp somepath myclusteraccount@clustername:someclusterpath where ``somepath`` (and ``myclusteraccount`` and ``clustername``) should be replaced. To copy files from the cluster back to the users' computer, the source and the destination should be reversed: .. code-block:: bash [username@mypc ~]$ scp myclusteraccount@clustername:someclusterpath somepath Note that to copy folders ``-r`` should be added after ``scp``. * The more advanced user may want to take a look at the ``rsync`` command, which can perform fast transfer/synchronization. Editing Scripts ^^^^^^^^^^^^^^^ Here, some basic examples are given to edit files/scripts. There are basically two ways of doing this 1. Copy the file to your own computer, and then edit it with your favourite text/script editor (e.g. with code highlighting), and finally copy the file back to the cluster. This option is recommended for large modifications to a script, and usually is easier to manage in terms of versions. 2. Edit the file on the cluster with an editor that runs in the command line from a shell on the cluster. This option is recommended for small script modifications. There are several command line based text editors available on the clusters (and on most other :ref:`sec-linux` machines) named ``vi``, ``emacs``, and ``nano``. The first two are advanced and powerful, but are extremely beginner unfriendly.
PypiClean
/CLAchievements-0.1.0.tar.gz/CLAchievements-0.1.0/doc/plugin.rst
.. _plugin: Write your own achievement ========================== Achievement without persistent data ----------------------------------- Suppose you want to create an achievement ``Foo`` awarded when user successfully run a command on a file :file:`foo`. Let's write this achievement. Meta-information """""""""""""""" First, we need to define a class and define meta-information: any achievement is a subclass of :class:`~clachievements.achievements.__init__.Achievement`. Two arguments are compulsory: * :attr:`~clachievements.achievements.__init__.Achievement.title`: if ``None``, your class is an abstract achievement, meant to be subclassed; if a string, your achievement is an *actual* achievement. See the :class:`class documentation <clachievements.achievements.__init__.Achievement>` for other attributes; * `description`: your achievement must have a description. The first non-empty line of your class docstring is used, unless :attr:`~clachievements.achievements.__init__.Achievements._description` is defined, when it is used instead. See :class:`the class documentation <clachievements.achievements.__init__.Achievement>` to get more information about other attributes. .. code-block:: python from clachievements.achievements import Achievement from clachievements.testutils import test_lock, test_unlock class Foo(Achievement): """Successfully run a command on file `foo`.""" title = "Foo" Unlocking the achievement """"""""""""""""""""""""" Great: you have an achievement. But it is never unlocked: it will be frustrating for the user. An achievement is a :ref:`context manager <typecontextmanager>`: its :meth:`~contextmanager.__enter__` and :meth:`~contextmanager.__exit__` methods are called before and after the actual system call. They can be used to test the command line, the environment before and after the command, etc. Here, we test that: * ``foo`` is a positional argument; * the command did not fail. If so, we call :meth:`~clachievements.achievements.__init__.Achievement.unlock()` to unlock the argument. It ensures that the argument is marked as unlocked, and it displays a pop-up to notify the user. No need to make sure that parallel calls to your achievement might unlock it at the same time: it is handled within the :meth:`~clachievements.achievements.__init__.Achievement.unlock()` method itself. .. code-block:: python from clachievements.achievements import Achievement from clachievements.testutils import test_lock, test_unlock class Foo(Achievement): """Successfully run a command on file `foo`.""" title = "Foo" def __exit__(self, exc_type, exc_value, traceback): if "foo" in self.command.positional: if isinstance(exc_value, SystemExit): if exc_value.code == 0: self.unlock() .. _testing: Testing """"""" If we are done, the achievement will work, but the unit tests will fail. An achievement *must* define a test that unlock the achievement. Each achievement must define a static or class method, :pep:`decorated <318>` with :func:`~clachievements.testutils.test_unlock`. This method must iterate strings which are shell commands, unlocking the achievement. To be wrapped by CLAchievements, system calls must use string substitution: ``"foo bar"`` will call the ``foo`` binary, *not wrapped* by CLAchievements, where ``"{bin.foo} bar"`` will call the ``foo`` binary, wrapped by CLAchievements. You can add as many test methods as you want. You can also define test methods that must not unlock achievements, by decorating them with :func:`~clachievements.testutils.test_lock`. When performing tests, each test method is run inside an empty temporary directory, which will be deleted afterward. .. code-block:: python from clachievements.achievements import Achievement from clachievements.testutils import test_lock, test_unlock class Foo(Achievement): """Successfully run a command on file `foo`.""" title = "Foo" def __exit__(self, exc_type, exc_value, traceback): if "foo" in self.command.positional: if isinstance(exc_value, SystemExit): if exc_value.code == 0: self.unlock() @staticmethod @test_unlock def test_touch(): yield "{bin.touch} foo" @staticmethod @test_lock def test_ls(): yield "{bin.ls} foo" Achievement with persistent data -------------------------------- Now, we want a new achievement ``FooBar`` to be triggered when 50 successful commands have been run on a file :file:`foo`. Let's do this. To do this, we have to store the number of successful commands. A class is defined to ease this process: :class:`~clachievements.achievements.__init__.SimplePersistentDataAchievement`. It is wrong (see below), but is works for simple cases. When using this class, a row is created in the CLAchievements database with this achievement name. * The first time this achievement is created, this row is filled with the content of attribute :attr:`~clachievements.achievements.__init__.SimplePersistentDataAchievement.default_data`. * When accessing to :attr:`~clachievements.achievements.__init__.SimplePersistentDataAchievement.data`, data is read from the database. * When assigning a value to :attr:`~clachievements.achievements.__init__.SimplePersistentDataAchievement.data`, data is written to the database. Any :mod:`picklable <pickle>` data can be stored using this method. This is simple, but this is not robust to concurrent access: if an integrity error occurs when assigning a value to :attr:`~clachievements.achievements.__init__.SimplePersistentDataAchievement.data`, it is silently ignored. With this example achievement, if I run this argument 50 times in parallel, about 30 of the assignments are ignored. If I were to design a life critical application, this would be a big issues. But this is only a game: it does not work perfectly, but it is so much simpler to implement! .. code-block:: python from clachievements.achievements import SimplePersistentDataAchievement from clachievements.testutils import test_lock, test_unlock class FooBar(SimplePersistentDataAchievement): """Successfully run 50 command on file `foo`.""" title = "FooBar" default_data = 0 def __exit__(self, exc_type, exc_value, traceback): if "foo" in self.command.positional: if isinstance(exc_value, SystemExit): if exc_value.code == 0: self.data += 1 if self.data >= 50: self.unlock() @staticmethod @test_lock def test_touch(): for _ in range(49): yield "{bin.touch} foo" @staticmethod @test_unlock def test_ls_touch(): for _ in range(25): yield "{bin.touch} foo" yield "{bin.ls} foo" More ---- Suppose this error-prone persistent data management does not suit you. Just write your own: within the achievement, the :class:`sqlite3 database connection <sqlite3.Connection>` is available as :attr:`self.database.conn`. Do whatever you want with it (without breaking other plugin databases)! In this case, to be sure not to mess with tables of CLA core or other plugins, use the tables named (case insensitive) ``achievement_YourPluginName`` or ``achievement_YourPluginName_*``. Methods :meth:`~clachievements.achievements.__init__.Achievement.first` and :meth:`~clachievements.achievements.__init__.Achievement.last` can be used to initialize or clean the achievement: the first one is called the first time the achievement is ever loaded (so it can be used to create some tables into the database), while the last one is called when the achievement has just been unlocked (so it can be used to clean stuff). Both these methods are meant to be subclassed, and are expected to call ``super().first(...)`` at the beginning of their code.
PypiClean
/GeminiMotorDrive-0.2.zip/GeminiMotorDrive-0.2/doc/source/GeminiMotorDrive.compilers.move_sequence.rst
GeminiMotorDrive.compilers.move_sequence ======================================== .. currentmodule:: GeminiMotorDrive.compilers.move_sequence .. automodule:: GeminiMotorDrive.compilers.move_sequence .. autosummary:: compile_sequence convert_sequence_to_motor_units get_sequence_time move_time compile_sequence ---------------- .. autofunction:: compile_sequence convert_sequence_to_motor_units ------------------------------- .. autofunction:: convert_sequence_to_motor_units get_sequence_time ----------------- .. autofunction:: get_sequence_time move_time --------- .. autofunction:: move_time
PypiClean
/FlowSom-0.1.1.tar.gz/FlowSom-0.1.1/flowsom/cluster.py
import numpy as np from itertools import combinations import bisect class ConsensusCluster: """ Implementation of Consensus clustering, following the paper https://link.springer.com/content/pdf/10.1023%2FA%3A1023949509487.pdf Args: * cluster -> clustering class needs fit_predict method called with parameter n_clusters * L -> smallest number of clusters to try * K -> biggest number of clusters to try * H -> number of resamplings for each cluster number * resample_proportion -> percentage to sample * Mk -> consensus matrices for each k (shape =(K,data.shape[0],data.shape[0])) (NOTE: every consensus matrix is retained, like specified in the paper) * Ak -> area under CDF for each number of clusters (see paper: section 3.3.1. Consensus distribution.) * deltaK -> changes in ares under CDF (see paper: section 3.3.1. Consensus distribution.) * self.bestK -> number of clusters that was found to be best """ def __init__(self, cluster, L, K, H, resample_proportion=0.5): assert 0 <= resample_proportion <= 1, "proportion has to be between 0 and 1" self.cluster_ = cluster self.resample_proportion_ = resample_proportion self.L_ = L self.K_ = K self.H_ = H self.Mk = None self.Ak = None self.deltaK = None self.bestK = None def _internal_resample(self, data, proportion): """ Args: * data -> (examples,attributes) format * proportion -> percentage to sample """ resampled_indices = np.random.choice( range(data.shape[0]), size=int(data.shape[0]*proportion), replace=False) return resampled_indices, data[resampled_indices, :] def fit(self, data, verbose=False): """ Fits a consensus matrix for each number of clusters Args: * data -> (examples,attributes) format * verbose -> should print or not """ Mk = np.zeros((self.K_-self.L_, data.shape[0], data.shape[0])) Is = np.zeros((data.shape[0],)*2) for k in range(self.L_, self.K_): # for each number of clusters i_ = k-self.L_ if verbose: print("At k = %d, aka. iteration = %d" % (k, i_)) for h in range(self.H_): # resample H times if verbose: print("\tAt resampling h = %d, (k = %d)" % (h, k)) resampled_indices, resample_data = self._internal_resample( data, self.resample_proportion_) Mh = self.cluster_(n_clusters=k).fit_predict(resample_data) # find indexes of elements from same clusters with bisection # on sorted array => this is more efficient than brute force search id_clusts = np.argsort(Mh) sorted_ = Mh[id_clusts] for i in range(k): # for each cluster ia = bisect.bisect_left(sorted_, i) ib = bisect.bisect_right(sorted_, i) is_ = id_clusts[ia:ib] ids_ = np.array(list(combinations(is_, 2))).T # sometimes only one element is in a cluster (no combinations) if ids_.size != 0: Mk[i_, ids_[0], ids_[1]] += 1 # increment counts ids_2 = np.array(list(combinations(resampled_indices, 2))).T Is[ids_2[0], ids_2[1]] += 1 Mk[i_] /= Is+1e-8 # consensus matrix # Mk[i_] is upper triangular (with zeros on diagonal), we now make it symmetric Mk[i_] += Mk[i_].T Mk[i_, range(data.shape[0]), range( data.shape[0])] = 1 # always with self Is.fill(0) # reset counter self.Mk = Mk # fits areas under the CDFs self.Ak = np.zeros(self.K_-self.L_) for i, m in enumerate(Mk): hist, bins = np.histogram(m.ravel(), density=True) self.Ak[i] = np.sum(h*(b-a) for b, a, h in zip(bins[1:], bins[:-1], np.cumsum(hist))) # fits differences between areas under CDFs self.deltaK = np.array([(Ab-Aa)/Aa if i > 2 else Aa for Ab, Aa, i in zip(self.Ak[1:], self.Ak[:-1], range(self.L_, self.K_-1))]) self.bestK = np.argmax(self.deltaK) + \ self.L_ if self.deltaK.size > 0 else self.L_ def predict(self): """ Predicts on the consensus matrix, for best found cluster number """ assert self.Mk is not None, "First run fit" return self.cluster_(n_clusters=self.bestK).fit_predict( 1-self.Mk[self.bestK-self.L_]) def predict_data(self, data): """ Predicts on the data, for best found cluster number Args: * data -> (examples,attributes) format """ assert self.Mk is not None, "First run fit" return self.cluster_(n_clusters=self.bestK).fit_predict( data)
PypiClean
/Feni-1.23-py3-none-any.whl/feni/feni.py
from . import decorator from . import filehandlers from . import configuration from . import articles from . import preprocessor from bottle import default_app, route, run, template, static_file, request, auth_basic import jinja2 import os import markdown import logging from string import Template import difflib def create_sample(): sample_template = '''<!DOCTYPE html> <html lang="en"> <head> <title>{{title}} {% if subtitle %} - {% endif %} {{subtitle}}</title> <meta charset="utf-8"> <meta http-equiv="X-UA-Compatible" content="IE=edge"> <meta name="viewport" content="width=device-width, initial-scale=1"> <meta name="description" content=""> <meta name="author" content=""> <link href="/css/styles.css" rel="stylesheet" type="text/css"></link> </head> <body> {% block content_block %} <div class="content">{{content}}</div> {% endblock %} </body> </html>''' sample_template_2 = '''{% extends "article.html" %} {% block content_block %} <div class="content2"><i>{{content}}</i></div> {% endblock %}''' sample_article = '''type: article-type-one permalink: post1.html publish: true --- ### Welcome to Feni static site generator : Article 1''' sample_article_2 = '''type: article-type-two permalink: post2.html publish: true --- ### Welcome to Feni static site generator : Article 2''' sample_config = '''source: articles decorator: decorator output: site template: templates ''' sample_templates_config = '''article.html: types: - article-type-one article2.html: types: - article-type-two ''' sample_css = '''content { color: black } content2 { color: gray; } ''' os.makedirs("articles") os.makedirs("decorator/img") os.makedirs("decorator/css") os.makedirs("templates") with open("templates/article.html", "w") as a: a.write(sample_template) with open("templates/article2.html", "w") as a: a.write(sample_template_2) with open("articles/article.md", "w") as a: a.write(sample_article) with open("articles/article2.md", "w") as a: a.write(sample_article_2) with open("feni.yaml", "w") as a: a.write(sample_config) with open("templates/templates.yaml", "w") as a: a.write(sample_templates_config) def make_sure_dir_exists(folder_path): dir = os.path.dirname(folder_path) if not os.path.exists(dir): logging.info("Creating directory: %s", dir) try: os.makedirs(dir) except: logging.warning("Cannot create directory: %s", dir) def make_embedded_articles(template): r = {} if template.embedd != None: for variable_name, article in template.embedd.items(): logging.info("Embedding article: %s", article) r[variable_name] = markdown.markdown(articles.get_article_from(configuration['source'], article).content) return r def insert_block_count(block, count): end_of_tag = block.find(">") if end_of_tag != -1: return block[0:end_of_tag] + ' data-block-count="{}" '.format(count) + block[end_of_tag:] else: return block def replace_block(article, block, after): md_content = preprocessor.process(article.content, article.name) blocks = md_content.split("\n\n") new_file_contents = '---\n'.join([ article.frontmatter, "\n\n".join(blocks[0:block] + [after] + blocks[block+1:]) ] ) with open(article.path, 'w') as f: f.write(new_file_contents) return True def is_html(block): if len(block) > 0: try: return block.strip()[0] == '<' except IndexError: return False else: return False def generate_article(decorator, environment, article, edit=False): template = decorator.get_template_for_type(article.type) if template == None: raise Exception("No template found for article type: %s", article.type) jinja_template = environment.get_template(template.file) content = "" if edit: md_content = preprocessor.process(article.content, article.name) blocks = md_content.split("\n\n") for (c, block) in enumerate(blocks): if is_html(block): t = block else: t = markdown.markdown(block, extensions=['markdown.extensions.toc', 'markdown.extensions.fenced_code']) with_block_id = insert_block_count(t, c) content += with_block_id else: md_content = preprocessor.process(article.content, article.name) content = markdown.markdown(md_content, extensions=['markdown.extensions.toc', 'markdown.extensions.fenced_code']) data = article.data data.update(make_embedded_articles(template)) return jinja_template.render(content=content, **data).encode("utf-8") def generate(source, output, decorator_path, templates_folder, template_map): d = decorator.Decorator(decorator_path, templates_folder, template_map) d.add_handler('.less', filehandlers.LessProcessor) d.build_decoration(output) environment = jinja2.Environment(loader=jinja2.FileSystemLoader(d.template_folder)) permalinks = {} for article in articles.get_articles_from(source): if article.permalink != None: if article.permalink in permalinks: raise Exception("Duplicate permalink found %s's permalink and %s's permalinks are same" % (article.name, permalinks[article.permalink])) permalinks[article.permalink] = article.name output_path = os.path.join(output, *os.path.split(article.permalink)) if article.publish: make_sure_dir_exists(output_path) with open(output_path, 'wb') as f: try: f.write(generate_article(d, environment, article)) except: logging.warning("Error generating article: %s", article.name) pass logging.info("File written: %s", output_path) else: logging.info("Not publishing %s", article.name) try: os.remove(output_path) except: pass def inject_scripts(article): return Template(''' <script src="//code.jquery.com/jquery-3.3.1.min.js"></script> <script> var permalink = '${permalink}'; var block = null; function openDialog(je, file_contents) { if ($$('#save-button').length > 0) { return; } text_area = $$('<textarea>') text_area.val(file_contents); var saveB = $$('<button>').insertAfter(je).html("Save"); var cancelB = $$('<button>').insertAfter(saveB).html("Cancel"); saveB.attr('id', "save-button"); cancelB.attr('id', "cancel-button"); cancelB.click(function() { location.reload(); }); saveB.click(function() { $$.post('/feni_rt_save_/'+ permalink, {block: block, after: text_area.val()}, function(response) { block = null; if (response == 'failed') { je.html(before); } location.reload(); }); }); text_area.css({'width':(je.width()+'px')}); text_area.css({'height':(je.height()+'px')}); je.replaceWith(text_area); text_area.focus(); } $$(function() { $$('p, h1, h2, h3, h4, h5, h6, blockquote, b, i, strong, em, small, ul, ol').click(function(e) { if (block !== null) return; var je = $$(e.target); while (true) { block = je.data('block-count'); if (block === undefined) { je = je.parent(); if (je.is('body')) { break; } } else { break; } } if (block !== undefined) { $$.post('/feni_rt_get_/'+permalink, {block: block}, function(file_content) { openDialog(je, file_content); }); } }); }); </script>''').substitute({'permalink': article.permalink}) template_html = ''' <!DOCTYPE html> <html lang="en"> <head> <meta charset="utf-8"> <meta http-equiv="X-UA-Compatible" content="IE=edge"> <meta name="viewport" content="width=device-width, initial-scale=1"> <meta name="description" content=""> <meta name="author" content=""> <link href="/styles.css" rel="stylesheet" type="text/css"></link> </head> <body class="language-markup"> <% for item in items: %> <div><a href="feni_rt_edit_/{{item.permalink}}">{{ item.name }}</a></div> <% end %> </body> </html>''' @route('/feni_rt_get_/<permalink:path>', method='POST') def get_matching(permalink): block = int(request.forms.get('block')) (config, _) = configuration.read() for article in articles.get_articles_from(config['source']): if article.permalink == permalink: md_content = preprocessor.process(article.content, article.name) blocks = md_content.split("\n\n") return blocks[block] @route('/feni_rt_save_/<permalink:path>', method='POST') def save(permalink): block = int(request.forms.get('block')) after = request.forms.get('after') (config, _) = configuration.read() for article in articles.get_articles_from(config['source']): if article.permalink == permalink: if replace_block(article, block, after): return template("{{content}}", content="success") return template("{{content}}", content="failed") @route('/feni_rt_edit_/<permalink:path>') def edit(permalink): (config, _) = configuration.read() d = decorator.Decorator(config['decorator'], config['template'], config['templatemap']) d.add_handler('.less', filehandlers.LessProcessor) environment = jinja2.Environment(loader=jinja2.FileSystemLoader(d.template_folder)) for article in articles.get_articles_from(config['source']): if article.permalink == permalink: article_html = generate_article(d,environment, article, edit=True) article_html = article_html.replace(str.encode("</head>"), str.encode(inject_scripts(article) + "</head>")) return article_html return template("Article not found!") @route('/') def index(): (config, _) = configuration.read() items = [] for article in articles.get_articles_from(config['source']): items.append(article) return template(template_html, items=items) @route('/<filename:path>', name='static') def serve_static(filename): (config, _) = configuration.read() return static_file(filename, root=config['decorator']) def main(): try: (config, args) = configuration.read() if args.server: port = 8080 while True: try: run(host='0.0.0.0', port=port) break except OSError: port += 1 elif args.sample: print("Generating sample") create_sample() else: generate(config['source'], config['output'], config['decorator'], config['template'], config['templatemap']) logging.info("Successfully created site in %s", config['output']) except Exception as ex: logging.error(str(ex)) raise app = default_app()
PypiClean
/Electrum-CHI-3.3.8.tar.gz/Electrum-CHI-3.3.8/packages/pip/_vendor/html5lib/treewalkers/base.py
from __future__ import absolute_import, division, unicode_literals from xml.dom import Node from ..constants import namespaces, voidElements, spaceCharacters __all__ = ["DOCUMENT", "DOCTYPE", "TEXT", "ELEMENT", "COMMENT", "ENTITY", "UNKNOWN", "TreeWalker", "NonRecursiveTreeWalker"] DOCUMENT = Node.DOCUMENT_NODE DOCTYPE = Node.DOCUMENT_TYPE_NODE TEXT = Node.TEXT_NODE ELEMENT = Node.ELEMENT_NODE COMMENT = Node.COMMENT_NODE ENTITY = Node.ENTITY_NODE UNKNOWN = "<#UNKNOWN#>" spaceCharacters = "".join(spaceCharacters) class TreeWalker(object): """Walks a tree yielding tokens Tokens are dicts that all have a ``type`` field specifying the type of the token. """ def __init__(self, tree): """Creates a TreeWalker :arg tree: the tree to walk """ self.tree = tree def __iter__(self): raise NotImplementedError def error(self, msg): """Generates an error token with the given message :arg msg: the error message :returns: SerializeError token """ return {"type": "SerializeError", "data": msg} def emptyTag(self, namespace, name, attrs, hasChildren=False): """Generates an EmptyTag token :arg namespace: the namespace of the token--can be ``None`` :arg name: the name of the element :arg attrs: the attributes of the element as a dict :arg hasChildren: whether or not to yield a SerializationError because this tag shouldn't have children :returns: EmptyTag token """ yield {"type": "EmptyTag", "name": name, "namespace": namespace, "data": attrs} if hasChildren: yield self.error("Void element has children") def startTag(self, namespace, name, attrs): """Generates a StartTag token :arg namespace: the namespace of the token--can be ``None`` :arg name: the name of the element :arg attrs: the attributes of the element as a dict :returns: StartTag token """ return {"type": "StartTag", "name": name, "namespace": namespace, "data": attrs} def endTag(self, namespace, name): """Generates an EndTag token :arg namespace: the namespace of the token--can be ``None`` :arg name: the name of the element :returns: EndTag token """ return {"type": "EndTag", "name": name, "namespace": namespace} def text(self, data): """Generates SpaceCharacters and Characters tokens Depending on what's in the data, this generates one or more ``SpaceCharacters`` and ``Characters`` tokens. For example: >>> from html5lib.treewalkers.base import TreeWalker >>> # Give it an empty tree just so it instantiates >>> walker = TreeWalker([]) >>> list(walker.text('')) [] >>> list(walker.text(' ')) [{u'data': ' ', u'type': u'SpaceCharacters'}] >>> list(walker.text(' abc ')) # doctest: +NORMALIZE_WHITESPACE [{u'data': ' ', u'type': u'SpaceCharacters'}, {u'data': u'abc', u'type': u'Characters'}, {u'data': u' ', u'type': u'SpaceCharacters'}] :arg data: the text data :returns: one or more ``SpaceCharacters`` and ``Characters`` tokens """ data = data middle = data.lstrip(spaceCharacters) left = data[:len(data) - len(middle)] if left: yield {"type": "SpaceCharacters", "data": left} data = middle middle = data.rstrip(spaceCharacters) right = data[len(middle):] if middle: yield {"type": "Characters", "data": middle} if right: yield {"type": "SpaceCharacters", "data": right} def comment(self, data): """Generates a Comment token :arg data: the comment :returns: Comment token """ return {"type": "Comment", "data": data} def doctype(self, name, publicId=None, systemId=None): """Generates a Doctype token :arg name: :arg publicId: :arg systemId: :returns: the Doctype token """ return {"type": "Doctype", "name": name, "publicId": publicId, "systemId": systemId} def entity(self, name): """Generates an Entity token :arg name: the entity name :returns: an Entity token """ return {"type": "Entity", "name": name} def unknown(self, nodeType): """Handles unknown node types""" return self.error("Unknown node type: " + nodeType) class NonRecursiveTreeWalker(TreeWalker): def getNodeDetails(self, node): raise NotImplementedError def getFirstChild(self, node): raise NotImplementedError def getNextSibling(self, node): raise NotImplementedError def getParentNode(self, node): raise NotImplementedError def __iter__(self): currentNode = self.tree while currentNode is not None: details = self.getNodeDetails(currentNode) type, details = details[0], details[1:] hasChildren = False if type == DOCTYPE: yield self.doctype(*details) elif type == TEXT: for token in self.text(*details): yield token elif type == ELEMENT: namespace, name, attributes, hasChildren = details if (not namespace or namespace == namespaces["html"]) and name in voidElements: for token in self.emptyTag(namespace, name, attributes, hasChildren): yield token hasChildren = False else: yield self.startTag(namespace, name, attributes) elif type == COMMENT: yield self.comment(details[0]) elif type == ENTITY: yield self.entity(details[0]) elif type == DOCUMENT: hasChildren = True else: yield self.unknown(details[0]) if hasChildren: firstChild = self.getFirstChild(currentNode) else: firstChild = None if firstChild is not None: currentNode = firstChild else: while currentNode is not None: details = self.getNodeDetails(currentNode) type, details = details[0], details[1:] if type == ELEMENT: namespace, name, attributes, hasChildren = details if (namespace and namespace != namespaces["html"]) or name not in voidElements: yield self.endTag(namespace, name) if self.tree is currentNode: currentNode = None break nextSibling = self.getNextSibling(currentNode) if nextSibling is not None: currentNode = nextSibling break else: currentNode = self.getParentNode(currentNode)
PypiClean
/LoginRadius-2.8.1.zip/LoginRadius-2.8.1/LoginRadius.py
from datetime import datetime #Requires Python 2.6> from collections import namedtuple #Requires Python 2.7> from importlib import import_module import sys __author__ = "LoginRadius" __copyright__ = "Copyright 2016-2017, LoginRadius" __email__ = "developers@loginradius.com" __status__ = "Production" __version__ = "2.8.1" SECURE_API_URL = "https://api.loginradius.com/" HEADERS = {'Accept': "application/json"} class LoginRadius(): """ LoginRadius Class. Use this to obtain social data and other information from the LoginRadius API. Requires Python 2.7 or greater. """ API_SECRET = None LIBRARY = None def __init__(self, token): """ Constructed when you want to retrieve social data with respect to the acquired token. :param token: token from LoginRadius Callback. :raise Exceptions.NoAPISecret: Raised if you did not set an API_SECRET. """ self.user = None self.error = {} #Token from the POST request. self.token = token #Namedtuple for settings for each request and the api functions. self.settings = namedtuple("Settings", ['library', 'urllib', 'urllib2', 'json', 'requests']) self.api = LoginRadiusAPI(self) #We prefer to use requests with the updated urllib3 module. try: from distutils.version import StrictVersion import requests if StrictVersion(requests.__version__) < StrictVersion("2.0"): raise Exceptions.RequestsLibraryDated(requests.__version__) else: self._settings("requests") #However, we can use urllib if there is no requests or it is outdated. except (ImportError, Exceptions.RequestsLibraryDated): self._settings("urllib2") #Well, something went wrong here. except: raise if not self.API_SECRET: raise Exceptions.NoAPISecret else: #Namedtuples for access, api, and user. self.access = self._get_access_tuple() self.user = self._get_user_tuple() # # Internal private functions # def _settings(self, library): """This sets the name tuple settings to whatever library you want. You may change this as you wish.""" if LoginRadius.LIBRARY is not None: if LoginRadius.LIBRARY == "requests": self._set_requests() elif LoginRadius.LIBRARY == "urllib2": self._set_urllib2() else: raise Exceptions.InvalidLibrary(LoginRadius.LIBRARY) else: if library == "requests": self._set_requests() elif library == "urllib2": self._set_urllib2() else: raise Exceptions.InvalidLibrary(library) def _set_requests(self): """Change to the requests library to use.""" self.settings.library = "requests" self.settings.requests = import_module("requests") self.settings.urllib2 = False def _set_urllib2(self): """Change to the requests urllib2 library to use.""" if sys.version_info[0] == 2: self.settings.urllib2 = import_module("urllib2") self.settings.urllib = import_module("urllib") else: self.settings.urllib2 = import_module("urllib.request") self.settings.urllib = import_module("urllib.parse") self.settings.library = "urllib2" self.settings.requests = False self.settings.json = import_module("json") def _get_user_tuple(self): """All the functions relative to the user with the token.""" user = namedtuple("User", ['profile', 'photo', 'check_in', 'audio', 'album', 'video', 'contacts', 'status', 'group', 'post', 'event', 'mention', 'company', 'following', 'page', 'like', 'status_update', 'direct_message']) #Lazy get methods user.album = AlbumLazyLoad(self) user.audio = AudioLazyLoad(self) user.check_in = CheckInLazyLoad(self) user.company = CompanyLazyLoad(self) user.contacts = ContactsLazyLoad(self) user.event = EventLazyLoad(self) user.following = FollowingLazyLoad(self) user.group = GroupLazyLoad(self) user.like = LikeLazyLoad(self) user.mention = MentionLazyLoad(self) user.page = PageLazyLoad(self) user.photo = PhotoLazyLoad(self) user.post = PostLazyLoad(self) user.profile = UserProfileLazyLoad(self) user.status = StatusLazyLoad(self) user.video = VideoLazyLoad(self) #Post methods user.direct_message = self.api.direct_message user.status_update = self.api.status_update return user #Get access token def _get_access_tuple(self): """Access information like token and expire.""" payload = {'token': self.token, 'secret': self.API_SECRET} url = SECURE_API_URL + "api/v2/access_token" results = self._get_json(url, payload) access = namedtuple("Access", ['token', 'expire', 'raw', 'valid']) access.raw = results if 'access_token' in results: access.token = results['access_token'] else: raise Exceptions.MissingJsonResponseParameter('access_token', raw=access.raw) if 'expires_in' in results: access.expire = results['expires_in'] else: raise Exceptions.MissingJsonResponseParameter('expires_in', raw=access.raw) access.valid = self._validate_token return access def _get_json(self, url, payload): """Get JSON from LoginRadius""" if self.settings.requests: r = self.settings.requests.get(url, params=payload, headers=HEADERS) return self._process_result(r.json()) else: payload = self.settings.urllib.urlencode(payload) r = self.settings.urllib2.Request(url + "?" + payload) r.add_header('Accept', HEADERS['Accept']) try: data = self.settings.urllib2.urlopen(r) except self.settings.urllib2.HTTPError: raise return self._process_result(self.settings.json.load(data)) def _post_json(self, url, payload): """Post JSON to LoginRadius""" if self.settings.requests: import json data = json.dumps(payload) r = self.settings.requests.post(url + "?" + data, params=payload, headers=HEADERS) return self._process_result(r.json()) else: payload = self.settings.urllib.urlencode(payload) pdata = '' if sys.version_info[0] == 3: pdata = pdata.encode('ascii') r = self.settings.urllib2.Request(url + "?" + payload, pdata, {'Content-Type': 'application/json'}) for key, value in HEADERS.items(): r.add_header(key, value) try: data = self.settings.urllib2.urlopen(r) except self.settings.urllib2.HTTPError: raise return self._process_result(self.settings.json.load(data)) def _process_result(self, result): """Anything we need to parse or look for. In this case, just the errorCode""" if "errorCode" in result: self._process_error(result) else: return result def _process_error(self, result): """If there is an errorCode, let's figure out which one and raise the corresponding exception.""" self.error = result if result['errorCode'] == 901: raise Exceptions.APIKeyInvalid elif result['errorCode'] == 902: raise Exceptions.APISecretInvalid elif result['errorCode'] == 903: raise Exceptions.InvalidRequestToken elif result['errorCode'] == 904: raise Exceptions.RequestTokenExpired elif result['errorCode'] == 905: raise Exceptions.InvalidAccessToken elif result['errorCode'] == 906: raise Exceptions.TokenExpired(self.access.expire) elif result['errorCode'] == 907: raise Exceptions.ParameterMissing elif result['errorCode'] == 908: raise Exceptions.ParameterNotFormatted elif result['errorCode'] == 909: raise Exceptions.FeatureNotSupported elif result['errorCode'] == 910: raise Exceptions.EndPointNotSupported else: raise Exceptions.UnknownJsonError(result) def _validate_token(self): """UTC time relative for checking if our token is still valid.""" expire = datetime.strptime(self.access.expire, "%Y-%m-%dT%H:%M:%S.%fZ") if expire > datetime.utcnow(): return True else: return False # # Public functions # def change_library(self, library): self._settings(library) class LoginRadiusAPI(object): """Where all the API commands can be invoked locally.""" def __init__(self, lr_object): """ :param lr_object: this is the reference to the parent LoginRadius object. """ self._lr_object = lr_object # # Read permissions # def get_user_profile(self): """Retrieve basic profile information.""" payload = {'access_token': self._lr_object.access.token} url = SECURE_API_URL + "api/v2/userprofile/" return self._lr_object._get_json(url, payload) def get_user_profile_raw(self): """Retrieve basic profile information but unformatted based on the provider.""" payload = {'access_token': self._lr_object.access.token} url = SECURE_API_URL + "api/v2/userprofile/raw/" return self._lr_object._get_json(url, payload) def get_photo(self, album_id=''): """Get photos based on the album_id retrieved.""" payload = {'access_token': self._lr_object.access.token, 'albumid': album_id} url = SECURE_API_URL + "api/v2/photo/" return self._lr_object._get_json(url, payload) def get_photo_raw(self, album_id=''): """Get photos based on the album_id retrieved but unformatted based on the provider.""" payload = {'access_token': self._lr_object.access.token, 'albumid': album_id} url = SECURE_API_URL + "api/v2/photo/raw/" return self._lr_object._get_json(url, payload) def get_checkin(self): """Get check ins from profile.""" payload = {'access_token': self._lr_object.access.token} url = SECURE_API_URL + "api/v2/checkin/" return self._lr_object._get_json(url, payload) def get_checkin_raw(self): """Get check ins but in raw format based on provider.""" payload = {'access_token': self._lr_object.access.token} url = SECURE_API_URL + "api/v2/checkin/raw/" return self._lr_object._get_json(url, payload) def get_album(self): """Get albums from profile.""" payload = {'access_token': self._lr_object.access.token} url = SECURE_API_URL + "api/v2/album/" return self._lr_object._get_json(url, payload) def get_album_raw(self): """Get albums from profile but in raw format based on provider.""" payload = {'access_token': self._lr_object.access.token} url = SECURE_API_URL + "api/v2/album/raw/" return self._lr_object._get_json(url, payload) def get_audio(self): """Get audio from the profile.""" payload = {'access_token': self._lr_object.access.token} url = SECURE_API_URL + "api/v2/audio/" return self._lr_object._get_json(url, payload) def get_audio_raw(self): """Get audio from the profile but in raw format based on provider.""" payload = {'access_token': self._lr_object.access.token} url = SECURE_API_URL + "api/v2/audio/raw/" return self._lr_object._get_json(url, payload) def get_video(self): """Get videos from the profile.""" payload = {'access_token': self._lr_object.access.token} url = SECURE_API_URL + "api/v2/video/" return self._lr_object._get_json(url, payload) def get_video_raw(self): """Get videos from the profile but in raw format based on provider.""" payload = {'access_token': self._lr_object.access.token} url = SECURE_API_URL + "api/v2/video/raw/" return self._lr_object._get_json(url, payload) def get_contacts(self, next_cursor=''): """Get a list of contacts from the profile.""" payload = {'access_token': self._lr_object.access.token, 'nextcursor': next_cursor} url = SECURE_API_URL + "api/v2/contact/" return self._lr_object._get_json(url, payload) def get_contacts_raw(self, next_cursor=''): """Get a list of contacts from the profile but in raw format based on provider.""" payload = {'access_token': self._lr_object.access.token, 'nextcursor': next_cursor} url = SECURE_API_URL + "api/v2/contact/raw/" return self._lr_object._get_json(url, payload) def get_status(self): """Get status updates from profile.""" payload = {'access_token': self._lr_object.access.token} url = SECURE_API_URL + "api/v2/status/" return self._lr_object._get_json(url, payload) def get_status_raw(self): """Get status updates from profile but in raw format based on provider.""" payload = {'access_token': self._lr_object.access.token} url = SECURE_API_URL + "api/v2/status/raw/" return self._lr_object._get_json(url, payload) def get_group(self): """Get group data from profile.""" payload = {'access_token': self._lr_object.access.token} url = SECURE_API_URL + "api/v2/group/" return self._lr_object._get_json(url, payload) def get_group_raw(self): """Get group data from profile but in raw format based on provider.""" payload = {'access_token': self._lr_object.access.token} url = SECURE_API_URL + "api/v2/group/raw/" return self._lr_object._get_json(url, payload) def get_post(self): """Get posts from profile.""" payload = {'access_token': self._lr_object.access.token} url = SECURE_API_URL + "api/v2/post/" return self._lr_object._get_json(url, payload) def get_post_raw(self): """Get posts from profile but in raw format based on provider.""" payload = {'access_token': self._lr_object.access.token} url = SECURE_API_URL + "api/v2/post/raw/" return self._lr_object._get_json(url, payload) def get_event(self): """Get events from profile.""" payload = {'access_token': self._lr_object.access.token} url = SECURE_API_URL + "api/v2/event/" return self._lr_object._get_json(url, payload) def get_event_raw(self): """Get events from profile but in raw format based on provider.""" payload = {'access_token': self._lr_object.access.token} url = SECURE_API_URL + "api/v2/event/raw/" return self._lr_object._get_json(url, payload) def get_mention(self): """Get mentions from profile.""" payload = {'access_token': self._lr_object.access.token} url = SECURE_API_URL + "api/v2/mention/" return self._lr_object._get_json(url, payload) def get_mention_raw(self): """Get mentions from profile but in raw format based on provider.""" payload = {'access_token': self._lr_object.access.token} url = SECURE_API_URL + "api/v2/mention/raw/" return self._lr_object._get_json(url, payload) def get_company(self): """Get company from profile.""" payload = {'access_token': self._lr_object.access.token} url = SECURE_API_URL + "api/v2/company/" return self._lr_object._get_json(url, payload) def get_company_raw(self): """Get company from profile but in raw format based on provider.""" payload = {'access_token': self._lr_object.access.token} url = SECURE_API_URL + "api/v2/company/raw/" return self._lr_object._get_json(url, payload) def get_following(self): """Get following/followers from profile.""" payload = {'access_token': self._lr_object.access.token} url = SECURE_API_URL + "api/v2/following/" return self._lr_object._get_json(url, payload) def get_following_raw(self): """Get following/followers from profile but in raw format based on provider.""" payload = {'access_token': self._lr_object.access.token} url = SECURE_API_URL + "api/v2/following/raw/" return self._lr_object._get_json(url, payload) def get_activity(self): """Get activity from profile.""" payload = {'access_token': self._lr_object.access.token} url = SECURE_API_URL + "api/v2/activity/" return self._lr_object._get_json(url, payload) def get_activity_raw(self): """Get activity from profile but in raw format based on provider.""" payload = {'access_token': self._lr_object.access.token} url = SECURE_API_URL + "api/v2/activity/raw/" return self._lr_object._get_json(url, payload) def get_page(self, page_name): """Get page information from profile.""" payload = {'access_token': self._lr_object.access.token, 'pagename': page_name} url = SECURE_API_URL + "api/v2/page/" return self._lr_object._get_json(url, payload) def get_page_raw(self, page_name): """Get page information from profile but in raw format based on provider.""" payload = {'access_token': self._lr_object.access.token, 'pagename': page_name} url = SECURE_API_URL + "api/v2/page/raw/" return self._lr_object._get_json(url, payload) def get_like(self): """Get likes from profile.""" payload = {'access_token': self._lr_object.access.token} url = SECURE_API_URL + "api/v2/like/" return self._lr_object._get_json(url, payload) def get_like_raw(self): """Get likes from profile but in raw format based on provider.""" payload = {'access_token': self._lr_object.access.token} url = SECURE_API_URL + "api/v2/like/raw/" return self._lr_object._get_json(url, payload) # #Write Permissions # def status_update(self, status, title='', url='', imageurl='', caption='', description=''): """ Perform a status update on the profile on behalf of the user. Some of these arguments may be ignored depending on the provider. For what is and is not supported, please refer to: http://www.loginradius.com/datapoints """ payload = {'access_token': self._lr_object.access.token, 'status': status, 'title': title, 'url': url, 'imageurl': imageurl, 'caption': caption, 'description': description} url = SECURE_API_URL + "api/v2/status/" return self._lr_object._post_json(url, payload) def direct_message(self, to, subject, message): """Direct message another user on behalf of this user.""" payload = {'access_token': self._lr_object.access.token, 'to': to, 'subject': subject, 'message': message} url = SECURE_API_URL + "api/v2/message/" return self._lr_object._post_json(url, payload) class LazyLoad(object): """"lazy load" the details when needed. This methodology is inspired by SQLAlchemy.""" def __init__(self, lr_object, raw=False): """ :param lr_object: this is the reference to the parent LoginRadius object. :param raw: This determines whether or not we should ask the API for provider independent data. """ self._lr_object = lr_object self.data = None self.raw = raw def __str__(self): """ Return the string equivalent of the dictionary. """ self._check() return str(self.data) def load(self): """ Promptly load the data instead of loading it on first access. """ self.data = self.get() def get(self): """ This will get the JSON API and return it as a dictionary. """ if not self.data: return self._get() else: return self.data def _get(self): """ Override this method when inheriting. This will get the JSON API and return it as a dictionary. """ pass def flush(self): """ Clears the local data stored so that the next request doesn't default to the local cache and instead grabs it from the LoginRadius servers. """ self.data = None def set_raw(self, state): """ Change the state of the raw parameter to determine what data set is grabbed from the provider. """ if isinstance(state, bool): self.raw = state else: raise TypeError def __repr__(self): """ General explanation of what type of object for debugging. """ return "LoginRadius LazyLoad Object" def __getitem__(self, item): """ Get item from the dictionary. """ self._check() return self.data[item] def __setitem__(self, key, value): """ Allow the user to set the items in the dictionary as well. """ self._check() self.data[key] = value def _check(self): """ Simple check to see if we already got the data for this Lazy Load. If not, get it. """ if not self.data: self.data = self.get() class UserProfileLazyLoad(LazyLoad): def __init__(self, lr_object, raw=False): super(UserProfileLazyLoad, self).__init__(lr_object, raw=raw) def _get(self): if not self.raw: return self._lr_object.api.get_user_profile() else: return self._lr_object.api.get_user_profile_raw() class PhotoLazyLoad(LazyLoad): def __init__(self, lr_object, raw=False, album_id=''): self.album_id = album_id super(PhotoLazyLoad, self).__init__(lr_object, raw=raw) def _get(self): if not self.raw: return self._lr_object.api.get_photo(album_id=self.album_id) else: return self._lr_object.api.get_photo_raw(album_id=self.album_id) class CheckInLazyLoad(LazyLoad): def __init__(self, lr_object, raw=False): super(CheckInLazyLoad, self).__init__(lr_object, raw=raw) def _get(self): if not self.raw: return self._lr_object.api.get_checkin() else: return self._lr_object.api.get_checkin_raw() class AlbumLazyLoad(LazyLoad): def __init__(self, lr_object, raw=False): super(AlbumLazyLoad, self).__init__(lr_object, raw=raw) def _get(self): if not self.raw: return self._lr_object.api.get_album() else: return self._lr_object.api.get_album_raw() class AudioLazyLoad(LazyLoad): def __init__(self, lr_object, raw=False): super(AudioLazyLoad, self).__init__(lr_object, raw=raw) def _get(self): if not self.raw: return self._lr_object.api.get_audio() else: return self._lr_object.api.get_audio_raw() class VideoLazyLoad(LazyLoad): def __init__(self, lr_object, raw=False): super(VideoLazyLoad, self).__init__(lr_object, raw=raw) def _get(self): if not self.raw: return self._lr_object.api.get_video() else: return self._lr_object.api.get_video_raw() class ContactsLazyLoad(LazyLoad): def __init__(self, lr_object, raw=False, next_cursor=''): self.next_cursor = next_cursor super(ContactsLazyLoad, self).__init__(lr_object, raw=raw) def _get(self): if not self.raw: return self._lr_object.api.get_contacts(next_cursor=self.next_cursor) else: return self._lr_object.api.get_contacts_raw(next_cursor=self.next_cursor) class StatusLazyLoad(LazyLoad): def __init__(self, lr_object, raw=False): super(StatusLazyLoad, self).__init__(lr_object, raw=raw) def _get(self): if not self.raw: return self._lr_object.api.get_status() else: return self._lr_object.api.get_status_raw() class GroupLazyLoad(LazyLoad): def __init__(self, lr_object, raw=False): super(GroupLazyLoad, self).__init__(lr_object, raw=raw) def _get(self): if not self.raw: return self._lr_object.api.get_group() else: return self._lr_object.api.get_group_raw() class PostLazyLoad(LazyLoad): def __init__(self, lr_object, raw=False): super(PostLazyLoad, self).__init__(lr_object, raw=raw) def _get(self): if not self.raw: return self._lr_object.api.get_post() else: return self._lr_object.api.get_post_raw() class EventLazyLoad(LazyLoad): def __init__(self, lr_object, raw=False): super(EventLazyLoad, self).__init__(lr_object, raw=raw) def _get(self): if not self.raw: return self._lr_object.api.get_event() else: return self._lr_object.api.get_event_raw() class MentionLazyLoad(LazyLoad): def __init__(self, lr_object, raw=False): super(MentionLazyLoad, self).__init__(lr_object, raw=raw) def _get(self): if not self.raw: return self._lr_object.api.get_mention() else: return self._lr_object.api.get_mention_raw() class CompanyLazyLoad(LazyLoad): def __init__(self, lr_object, raw=False): super(CompanyLazyLoad, self).__init__(lr_object, raw=raw) def _get(self): if not self.raw: return self._lr_object.api.get_company() else: return self._lr_object.api.get_company_raw() class FollowingLazyLoad(LazyLoad): def __init__(self, lr_object, raw=False): super(FollowingLazyLoad, self).__init__(lr_object, raw=raw) def _get(self): if not self.raw: return self._lr_object.api.get_following() else: return self._lr_object.api.get_following_raw() class ActivityLazyLoad(LazyLoad): def __init__(self, lr_object, raw=False): super(ActivityLazyLoad, self).__init__(lr_object, raw=raw) def _get(self): if not self.raw: return self._lr_object.api.get_activity() else: return self._lr_object.api.get_activity_raw() class PageLazyLoad(LazyLoad): def __init__(self, lr_object, page_name='', raw=False): self.page_name = page_name super(PageLazyLoad, self).__init__(lr_object, raw=raw) def _get(self): if not self.raw: return self._lr_object.api.get_page(self.page_name) else: return self._lr_object.api.get_page_raw(self.page_name) class LikeLazyLoad(LazyLoad): def __init__(self, lr_object, raw=False): super(LikeLazyLoad, self).__init__(lr_object, raw=raw) def _get(self): if not self.raw: return self._lr_object.api.get_like() else: return self._lr_object.api.get_like_raw() class LoginRadiusExceptions(Exception): """ This is the base for all LoginRadius Exceptions. Makes dealing with exceptions easy! """ def __init__(self): pass def __str__(self): return "" class Exceptions: """ Common exceptions used by LoginRadius. """ def __init__(self): pass class RequestsLibraryDated(LoginRadiusExceptions): """ Raise exception if module requests is outdated. By default 0.12 is included in Python. We need at least version 2.0 """ def __init__(self, version): self.version = str(version) def __str__(self): return "LoginRadius needs at least requests 2.0, found: " \ + self.version + "\nPlease upgrade to the latest version." class InvalidLibrary(LoginRadiusExceptions): """ Raised on trying to change library to through _settings on invalid library argument. """ def __init__(self, library): self.library = library def __str__(self): return "Invalid library string given. Got: " + str(self.library) + ". Valid cases: 'requests' or " + \ "'urllib2'" class NoAPISecret(LoginRadiusExceptions): """ Raised on construction of the LoginRadius object, if no API_SECRET has been set for the class. """ def __init__(self, version): self.version = str(version) def __str__(self): return "No API_SECRET set. Please initialize a API_SECRET first.\n" \ + "ie. LoginRadius.API_SECRET = \"Really_Secret_Key\"" class MissingJsonResponseParameter(LoginRadiusExceptions): """ Raised if construction of namedtuple would fail because missing expected response from LoginRadius API. """ def __init__(self, missing_parameter, raw=None): self.missing_parameter = missing_parameter self.raw = raw def __str__(self): exception_string = "Expected parameter from JSON response does not exist." + \ " Expected: " + self.missing_parameter + " but was not in" + \ " the dictionary." if self.raw: exception_string += " Instead, we got: " + str(self.raw) return exception_string class TokenExpired(LoginRadiusExceptions): """ Raised if the request cannot be completed because the access token has expired. """ def __init__(self, time): self.time = time def __str__(self): return "The request cannot be completed because the token has expired. " + \ "The token expired on: " + self.time class FeatureNotSupported(LoginRadiusExceptions): """ Raised if the request cannot be completed because your account/API access does not include this. """ def __init__(self): pass def __str__(self): return "Your LoginRadius site doesn't have permission to access this endpoint, please contact " +\ "LoginRadius support if you need more information." class UnknownJsonError(LoginRadiusExceptions): """ Raised if cannot determine error number from Json """ def __init__(self, result): self.result = result def __str__(self): return "The request cannot be completed because LoginRadius raised an unknown error in the API request." + \ " More information can be found in the error attribute or in this raw data: " + str(self.result) class APIKeyInvalid(LoginRadiusExceptions): """ Raised if you entered your API wrong, or not at all. """ def __init__(self): pass def __str__(self): return "The LoginRadius API Key is not valid, please double check your account." class APISecretInvalid(LoginRadiusExceptions): """ Raised if you your API Secret is invalid. """ def __init__(self): pass def __str__(self): return "The LoginRadius API Secret is not valid, please double check your account." class InvalidRequestToken(LoginRadiusExceptions): """ Raised if you your request token is invalid from the POST request. """ def __init__(self): pass def __str__(self): return "The LoginRadius Request Token is invalid, please verify the authentication response." class RequestTokenExpired(LoginRadiusExceptions): """ Raised if you your request token has expired from the POST request. """ def __init__(self): pass def __str__(self): return "The LoginRadius Request Token has expired, please verify the authentication response." class InvalidAccessToken(LoginRadiusExceptions): """ Raised if you access token is invalid. """ def __init__(self): pass def __str__(self): return "The LoginRadius Access Token has expired, please get a new token from the LoginRadius API." class ParameterMissing(LoginRadiusExceptions): """ Raised if a parameter in the GET or POST request is missing. """ def __init__(self): pass def __str__(self): return "A parameter is missing in the request, please check all parameters in the API call." class ParameterNotFormatted(LoginRadiusExceptions): """ Raised if a parameter in the GET or POST request is not formatted properly for the provider. """ def __init__(self): pass def __str__(self): return "A parameter is not formatted well in the request, please check all the parameters in the API call." class EndPointNotSupported(LoginRadiusExceptions): """ Raised if a the endpoint is not supported by the provider which correlates to the token. """ def __init__(self): pass def __str__(self): return "The requested endpoint is not supported by the current ID provider, " + \ "please check the API support page at http://www.loginradius.com/datapoints"
PypiClean
/NeodroidAgent-0.4.8-py36-none-any.whl/neodroidagent/agents/exclude/model_based/world_model_agent.py
import numpy from neodroidagent.sessions.single_agent.parallel import ParallelSession from neodroidagent.training.procedures import Agent, episodic_training from tqdm import tqdm from neodroidagent.agents.torch_agents.model_free import DDPGAgent from neodroidagent.entry_points import session_entry_point __author__ = "Christian Heider Nielsen" tqdm.monitor_interval = 0 class WorldModelAgent(Agent): """ As of https://worldmodels.github.io/, https://arxiv.org/abs/1803.10122 Parameters ---------- actor_optimizer_spec: OptimiserSpec Specifying the constructor and kwargs, as well as learning rate and other parameters for the optimiser critic_optimizer_spec: OptimiserSpec num_feature: int The number of features of the environmental state num_action: int The number of available actions that agent can choose from replay_memory_size: int How many memories to store in the replay memory. batch_size: int How many transitions to sample each time experience is replayed. tau: float The update rate that target networks slowly track the learned networks. """ def _train( self, states: numpy.ndarray, actions: numpy.ndarray, rewards: numpy.ndarray, terminals: numpy.ndarray, ): pass # region Test def wm_test(rollouts=None, skip=True): import neodroidagent.configs.agent_test_configs.ddpg_test_config as C if rollouts: C.ROLLOUTS = rollouts session_entry_point( DDPGAgent, C, session=ParallelSession( procedure=episodic_training, auto_reset_on_terminal_state=True ), parse_args=False, skip_confirmation=skip_confirmation, ) def wm_run(rollouts=None, skip=True): import neodroidagent.configs.agent_test_configs.ddpg_test_config as C if rollouts: C.ROLLOUTS = rollouts session_entry_point( DDPGAgent, C, session=ParallelSession( procedure=episodic_training, auto_reset_on_terminal_state=True ), parse_args=False, skip_confirmation=skip_confirmation, ) if __name__ == "__main__": wm_run() # endregion
PypiClean
/KratosMultilevelMonteCarloApplication-9.3.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl/KratosMultiphysics/MultilevelMonteCarloApplication/XMC/xmc/classDefs_solverWrapper/KratosSolverWrapper.py
import math import pickle import warnings import numpy as np # Import Kratos import KratosMultiphysics from KratosMultiphysics.MultilevelMonteCarloApplication.adaptive_refinement_utilities import AdaptiveRefinement from KratosMultiphysics.MultilevelMonteCarloApplication.tools import ParametersWrapper # Import XMC import xmc.solverWrapper as sw import xmc.classDefs_solverWrapper.methodDefs_KratosSolverWrapper.solve as mds import xmc.classDefs_solverWrapper.methodDefs_KratosSolverWrapper.mpi_solve as mpi_mds import xmc.classDefs_solverWrapper.methodDefs_KratosSolverWrapper.utilities as mdu from xmc.tools import dynamicImport # Import distributed environment from exaqute import * class KratosSolverWrapper(sw.SolverWrapper): """ Solver wrapper class managing Kratos Multiphysics (Kratos) solver. Attributes: analysis: Kratos analysis stage. The analysis stage class default name is SimulationScenario. The default file name is simulation_scenario. It is imported with the "analysisStage" key. By default an example analysis stage is called. adaptive_refinement_jump_to_finest_level: boolean. Used in multilevel algorithms when "stochastic_adaptive_refinement" strategy is selected. If true, intermediate refinement indices are skipped. Set by adaptiveRefinementJumpToFinestLevel key. asynchronous: boolean. If true, the asynchronous algorithm should be run. If false, the standard synchronous algorithms should be run. Set by asynchronous key. different_tasks: boolean. Used in multilevel algorithms when "stochastic_adaptive_refinement" strategy is selected. If true, different indices are run all together in the same task. If false, each index is run in a different task. Set by not TaskAllAtOnce key. fake_sample_to_serialize: list. A variable which is used just to serialize the Kratos Model and the Kratos Parameters. The list should be of the same type of the random variable generated by the generator. Set by fakeRandomVariable key. is_mpi: boolean. This booleans states if the Kratos problem is solved in MPI or in serial. mapping_output_quantities: boolean. If true, the analysis stage is prepared to map the variables of interest to a reference Kratos Model. By default, such Kratos Model is the coarsest index. Set by mappingOutputQuantities key. number_contributions_per_instance: integer. Defines the number of realization per each solve call. Useful if one wants to exploit ensemble average, together with hierarchical Monte Carlo methods. Set by numberContributionsPerInstance key. outputBatchSize: integer. Defines the size of each sub-list of the Quantities of Interest list which is returned by the solve method. It is alternative to outputDimension, defined below. Set by OutputBatchSize. outputDimension: integer or list of integers. If integer, equals to len(sample), where sample is the first output argument of self.solve(). If list of integers, then it means that samples are split in future lists, and outputDimension is [len(subSample) for subSample in sample]. Set by OutputDimension key. print_to_file: boolean. If true, prepares the distributed environment programing model PyCOMPSs to write a file inside the solve task. Set by printToFile key. project_parameters_path: string or list of strings. Defines the path to Kratos Project Parameters. Set by projectParametersPath key. qoi_estimator: list of strings. Each string is the moment estimator name to which each quantity of interest is associated. refinement_parameters_path: string. Define the path to the Kratos Adaptive Refinement Project Parameters. Set by refinementParametersPath key. refinement_strategy: string. Options are: - "reading_from_file", - "deterministic_adaptive_refinement", - "stochastic_adaptive_refinement". It defines the refinement strategy for multilevel algorithms. Set by refinementStrategy key. The standard for single-level Monte Carlo algorithms is "reading_from_file". However, also other strategies work, if refinementParametersPath key is set. size_multi_x_moment_estimator: integer. Defines the size of each vector quantity if interest. If integer, vector quantities of interest have the same size. It is required to set a priori this value only because of returnZeroQoiAndTime_Task, which needs to know how many 0s to return. Set by sizeMultiXMomentEstimator. Obs: in future, also a list will be supported. If list, the list has the same length of numberMultiMomentEstimator+numberMultiCombinedMomentEstimator. Main methods: serialize: Method serializing Kratos Model and Kratos Parameters. solve: Method running the problem. Other methods are called from the two methods defined above. """ # TODO: are both outputBatchSize and outputBatchSize needed? Probably not. def __init__(self,**keywordArgs): super().__init__(**keywordArgs) self.analysis = dynamicImport(keywordArgs.get( "analysisStage", ("xmc.classDefs_solverWrapper.methodDefs_KratosSolverWrapper" ".simulation_definition.SimulationScenario") )) self.adaptive_refinement_jump_to_finest_level = keywordArgs.get("adaptiveRefinementJumpToFinestLevel",False) self.asynchronous = keywordArgs.get("asynchronous",False) self.different_tasks = not keywordArgs.get("taskAllAtOnce",True) self.fake_sample_to_serialize = keywordArgs.get("fakeRandomVariable") self.mapping_output_quantities = keywordArgs.get("mappingOutputQuantities",False) self.is_mpi = keywordArgs.get("isMpi", False) self.number_contributions_per_instance = keywordArgs.get("numberContributionsPerInstance",1) self.outputBatchSize = keywordArgs.get("outputBatchSize",1) self.print_to_file = keywordArgs.get("printToFile",False) self.project_parameters_path = keywordArgs.get("projectParametersPath") self.qoi_estimator = keywordArgs.get("qoiEstimator") self.refinement_parameters_path = keywordArgs.get("refinementParametersPath") self.refinement_strategy = keywordArgs.get("refinementStrategy") self.size_multi_x_moment_estimator = keywordArgs.get("sizeMultiXMomentEstimator",-1) # remove after returnZeroQoiAndTime_Task is removed # Set outputDimension self.outputDimension = keywordArgs.get("outputDimension",None) # If not given, compute from self.outputBatchSize for backward compatibility if self.outputDimension is None: outputNb = self._numberOfOutputs() # Total number of output splits, including (possibly) a last one of smaller size batchNb = int(math.ceil(outputNb/self.outputBatchSize)) # Assemble the list of sizes of each split # They are all equal to outputBatchSize, except perhaps the last one # E.g. outputBatchSize=2 and outputNb=5 gives [2,2,1] self.outputDimension = [min(self.outputBatchSize, outputNb-i*self.outputBatchSize) for i in range(batchNb)] # workaround for Monte Carlo if (self.solverWrapperIndex == []): if self.refinement_strategy != "reading_from_file": msg = self.__class__.__name__ msg += ": Running a single-level Monte Carlo algorithm. " msg += "Default \"refinementStrategy\" is \"reading_from_file\". " msg += "Running with {} instead. ".format(self.refinement_strategy) msg += "This implies that \"refinementParametersPath\" is required for running, and it will not be used." warnings.warn(msg, RuntimeWarning) self.solverWrapperIndex.append(0) if (self.solverWrapperIndex[0] >= 0): # for index < 0 not needed if (self.asynchronous is not True): # synchronous framework self.serialize() else: # asynchronous framework pass def serialize(self): """ Method serializing Kratos Model and Kratos Parameters. Inputs: """ if self.refinement_strategy not in ["stochastic_adaptive_refinement","deterministic_adaptive_refinement", "reading_from_file"]: raise Exception ("Select KratosMultiphysics refinement stategy.\nOptions:\ \n i) stochastic_adaptive_refinement\ \n ii) deterministic_adaptive_refinement\ \n iii) reading_from_file") self.is_project_parameters_pickled = False self.is_model_pickled = False self.is_custom_settings_metric_refinement_pickled = False self.is_custom_settings_remesh_refinement_pickled = False if self.refinement_strategy != "reading_from_file": self.SetRefinementParameters() self.SerializeRefinementParameters() self.SerializeModelParameters() print(self.__class__.__name__, ": Model and parameters serialized correctly.") def solve(self,random_variable): """ Method running the problem. Inputs: random_variable: list. Random event in the form of list. Outputs: qoi_list: List of structure respecting self.outputDimension. It contains the quantities of interest. time_for_qoi: float. Measure of time to generate the sample. """ if all([component>=0 for component in self.solverWrapperIndex]): aux_qoi_array = [] # loop over contributions (by default only one) for contribution_counter in range (0,self.number_contributions_per_instance): # store current contribution self.current_local_contribution = contribution_counter # if multiple ensembles, append a seed to the random variable list # for example, this seed is used to generate different initial conditions if self.number_contributions_per_instance > 1: random_variable.append(int(np.random.uniform(0,429496729))) # solve if (self.refinement_strategy == "stochastic_adaptive_refinement"): qoi,time_for_qoi = self.executeInstanceStochasticAdaptiveRefinement(random_variable) elif (self.refinement_strategy == "deterministic_adaptive_refinement"): qoi,time_for_qoi = self.executeInstanceDeterministicAdaptiveRefinement(random_variable) elif (self.refinement_strategy == "reading_from_file"): qoi,time_for_qoi = self.executeInstanceReadingFromFile(random_variable) # append components to aux array aux_qoi_array.append(qoi) # delete COMPSs future objects no longer needed delete_object(random_variable) # postprocess components if self.number_contributions_per_instance > 1: unm = mdu.UnfolderManager(self._numberOfOutputs(), self.outputBatchSize) if (self._numberOfOutputs() == self.outputBatchSize): qoi_list = [unm.PostprocessContributionsPerInstance_Task(aux_qoi_array,self.qoi_estimator,returns=math.ceil(self._numberOfOutputs()/self.outputBatchSize))] elif (self._numberOfOutputs() > self.outputBatchSize): qoi_list = unm.PostprocessContributionsPerInstance_Task(aux_qoi_array,self.qoi_estimator,returns=math.ceil(self._numberOfOutputs()/self.outputBatchSize)) else: raise Exception("_numberOfOutputs() returns a value smaller than self.outputBatchSize. Set outputBatchSize smaller or equal to the number of scalar outputs.") delete_object(unm) else: # unfold qoi into its components of fixed size unm = mdu.UnfolderManager(self._numberOfOutputs(), self.outputBatchSize) if (self._numberOfOutputs() == self.outputBatchSize): qoi_list = [unm.UnfoldNValues_Task(aux_qoi_array[0],returns=math.ceil(self._numberOfOutputs()/self.outputBatchSize))] elif (self._numberOfOutputs() > self.outputBatchSize): qoi_list = unm.UnfoldNValues_Task(aux_qoi_array[0],returns=math.ceil(self._numberOfOutputs()/self.outputBatchSize)) else: raise Exception("_numberOfOutputs() returns a value smaller than self.outputBatchSize. Set outputBatchSize smaller or equal to the number of scalar outputs.") # delete COMPSs future objects no longer needed delete_object(unm) # delete COMPSs future objects no longer needed for contribution_counter in range (0,self.number_contributions_per_instance): delete_object(aux_qoi_array[contribution_counter]) #delete_object(qoi) del(aux_qoi_array) else: qoi,time_for_qoi = mds.returnZeroQoiAndTime_Task(self.qoi_estimator, self.size_multi_x_moment_estimator) # unfold qoi into its components of fixed size unm = mdu.UnfolderManager(self._numberOfOutputs(), self.outputBatchSize) if (self._numberOfOutputs() == self.outputBatchSize): qoi_list = [unm.UnfoldNValues_Task(qoi,returns=math.ceil(self._numberOfOutputs()/self.outputBatchSize))] elif (self._numberOfOutputs() > self.outputBatchSize): qoi_list = unm.UnfoldNValues_Task(qoi,returns=math.ceil(self._numberOfOutputs()/self.outputBatchSize)) else: raise Exception("_numberOfOutputs() returns a value smaller than self.outputBatchSize. Set outputBatchSize smaller or equal to the number of scalar outputs.") # delete COMPSs future objects no longer needed delete_object(unm) delete_object(qoi) return qoi_list,time_for_qoi #################################################################################################### ######################################### EXECUTION TOOLS ########################################## #################################################################################################### def executeInstanceStochasticAdaptiveRefinement(self,random_variable): """ Method executing an instance of the UQ algorithm, i.e. a single MC realization and eventually the refinement (that occurs before the simulation run). To be called if the selected refinement strategy is stochastic_adaptive_refinement. Inputs: random_variable: list. Random event in the form of list. Outputs: qoi: list. It contains the quantities of interest. time_for_qoi: float. Measure of time to generate the sample. """ # local variables current_index = self.solverWrapperIndex[0] pickled_coarse_model = self.pickled_model[0] pickled_reference_model_mapping = pickled_coarse_model pickled_coarse_project_parameters = self.pickled_project_parameters[0] pickled_custom_metric_refinement_parameters = self.pickled_custom_metric_refinement_parameters pickled_custom_remesh_refinement_parameters = self.pickled_custom_remesh_refinement_parameters current_analysis = self.analysis different_tasks = self.different_tasks mapping_flag = self.mapping_output_quantities adaptive_refinement_jump_to_finest_level = self.adaptive_refinement_jump_to_finest_level print_to_file = self.print_to_file current_local_contribution = self.current_local_contribution time_for_qoi = 0.0 if (different_tasks is False): # single task if self.is_mpi: qoi,time_for_qoi = \ mpi_mds.executeInstanceStochasticAdaptiveRefinementAllAtOnce_Wrapper(current_index,pickled_coarse_model,pickled_coarse_project_parameters,pickled_custom_metric_refinement_parameters,pickled_custom_remesh_refinement_parameters,random_variable,current_analysis,time_for_qoi,mapping_flag,adaptive_refinement_jump_to_finest_level,print_to_file,current_local_contribution) else: qoi,time_for_qoi = \ mds.executeInstanceStochasticAdaptiveRefinementAllAtOnce_Wrapper(current_index,pickled_coarse_model,pickled_coarse_project_parameters,pickled_custom_metric_refinement_parameters,pickled_custom_remesh_refinement_parameters,random_variable,current_analysis,time_for_qoi,mapping_flag,adaptive_refinement_jump_to_finest_level,print_to_file,current_local_contribution) elif (different_tasks is True): # multiple tasks if (current_index == 0): # index = 0 current_local_index = 0 if self.is_mpi: qoi,pickled_current_model,time_for_qoi = \ mpi_mds.executeInstanceStochasticAdaptiveRefinementMultipleTasks_Wrapper(current_index,pickled_coarse_model,pickled_coarse_project_parameters,pickled_custom_metric_refinement_parameters,pickled_custom_remesh_refinement_parameters,random_variable,current_local_index,current_analysis,time_for_qoi,mapping_flag,print_to_file,current_local_contribution) else: qoi,pickled_current_model,time_for_qoi = \ mds.executeInstanceStochasticAdaptiveRefinementMultipleTasks_Wrapper(current_index,pickled_coarse_model,pickled_coarse_project_parameters,pickled_custom_metric_refinement_parameters,pickled_custom_remesh_refinement_parameters,random_variable,current_local_index,current_analysis,time_for_qoi,mapping_flag,print_to_file,current_local_contribution) delete_object(pickled_current_model) else: # index > 0 for current_local_index in range(current_index+1): if ((adaptive_refinement_jump_to_finest_level is False) or (adaptive_refinement_jump_to_finest_level is True and (current_local_index == 0 or current_local_index == current_index))): if (mapping_flag is False): qoi,pickled_current_model,time_for_qoi = \ mds.executeInstanceStochasticAdaptiveRefinementMultipleTasks_Wrapper(current_index,pickled_coarse_model,pickled_coarse_project_parameters,pickled_custom_metric_refinement_parameters,pickled_custom_remesh_refinement_parameters,random_variable,current_local_index,current_analysis,time_for_qoi,mapping_flag,print_to_file,current_local_contribution) elif (mapping_flag is True): qoi,pickled_current_model,time_for_qoi = \ mds.executeInstanceStochasticAdaptiveRefinementMultipleTasks_Wrapper(current_index,pickled_coarse_model,pickled_coarse_project_parameters,pickled_custom_metric_refinement_parameters,pickled_custom_remesh_refinement_parameters,random_variable,current_local_index,current_analysis,time_for_qoi,mapping_flag,print_to_file,current_local_contribution,pickled_mapping_reference_model=pickled_reference_model_mapping) delete_object(pickled_coarse_model) del(pickled_coarse_model) pickled_coarse_model = pickled_current_model del(pickled_current_model) else: # not running since we jump from coarsest to finest level pass delete_object(pickled_coarse_model) else: raise Exception ("Boolean variable different task is not a boolean, instead is equal to",different_tasks) return qoi,time_for_qoi def executeInstanceDeterministicAdaptiveRefinement(self,random_variable): """ Method executing an instance of the UQ algorithm, i.e. a single MC realization and eventually the refinement (that occurs before the simulation run). To be called if the selected refinement strategy is deterministic_adaptive_refinement. Inputs: random_variable: list. Random event in the form of list. Outputs: qoi: list. It contains the quantities of interest. time_for_qoi: float. Measure of time to generate the sample. """ # local variables current_index = self.solverWrapperIndex[0] pickled_model = self.pickled_model[current_index] pickled_mapping_reference_model = self.pickled_model[0] pickled_project_parameters = self.pickled_project_parameters[current_index] mapping_flag = self.mapping_output_quantities print_to_file = self.print_to_file current_local_contribution = self.current_local_contribution current_analysis = self.analysis time_for_qoi = 0.0 if self.is_mpi: qoi,time_for_qoi = mpi_mds.executeInstanceDeterministicAdaptiveRefinement_Wrapper(current_index,pickled_model,pickled_project_parameters,current_analysis,random_variable,time_for_qoi,mapping_flag,pickled_mapping_reference_model,print_to_file,current_local_contribution) else: qoi,time_for_qoi = mds.executeInstanceDeterministicAdaptiveRefinement_Wrapper(current_index,pickled_model,pickled_project_parameters,current_analysis,random_variable,time_for_qoi,mapping_flag,pickled_mapping_reference_model,print_to_file,current_local_contribution) return qoi,time_for_qoi def executeInstanceReadingFromFile(self,random_variable): """ Method executing an instance of the UQ algorithm, i.e. a single MC realization and eventually the refinement (that occurs before the simulation run). To be called if the selected refinement strategy is reading_from_file. Inputs: random_variable: list. Random event in the form of list. Outputs: qoi: list. It contains the quantities of interest. time_for_qoi: float. Measure of time to generate the sample. """ # local variables current_index = self.solverWrapperIndex[0] pickled_model = self.pickled_model[current_index] pickled_mapping_reference_model = self.pickled_mapping_reference_model[current_index] pickled_project_parameters = self.pickled_project_parameters[current_index] mapping_flag = self.mapping_output_quantities print_to_file = self.print_to_file current_local_contribution = self.current_local_contribution current_analysis = self.analysis time_for_qoi = 0.0 if self.is_mpi: qoi,time_for_qoi = mpi_mds.executeInstanceReadingFromFile_Wrapper(current_index,pickled_model,pickled_project_parameters,current_analysis,random_variable,time_for_qoi,mapping_flag,pickled_mapping_reference_model,print_to_file,current_local_contribution) else: qoi,time_for_qoi = mds.executeInstanceReadingFromFile_Wrapper(current_index,pickled_model,pickled_project_parameters,current_analysis,random_variable,time_for_qoi,mapping_flag,pickled_mapping_reference_model,print_to_file,current_local_contribution) return qoi,time_for_qoi #################################################################################################### ####################################### SERIALIZATION TOOLS ######################################## #################################################################################################### def SerializeModelParameters(self): """ Method managing the serialization and pickling of the Kratos Model and the Kratos Parameters of the problem. It builds self.pickled_model and self.pickled_project_parameters. The serialization-pickling process is the following: - from Model/Parameters Kratos object to MpiSerializer Kratos object, - from MpiSerializer Kratos object to pickle string, - from pickle string to MpiSerializer Kratos object, - from MpiSerializer Kratos object to Model/Parameters Kratos object. Depending on the refinement strategy, three different methods may be called and are defined next. We remark that creating the class member pickled_mapping_reference_model is required by MPI runs, since we need such coarse model to be serialized with the same number of processors of the MPI task. Inputs: """ self.serialized_model = [] self.serialized_project_parameters = [] self.pickled_model = [] self.pickled_project_parameters = [] self.pickled_mapping_reference_model = [] if (self.refinement_strategy == "stochastic_adaptive_refinement"): self.SerializeModelParametersStochasticAdaptiveRefinement() elif (self.refinement_strategy == "deterministic_adaptive_refinement"): self.SerializeModelParametersDeterministicAdaptiveRefinement() elif (self.refinement_strategy == "reading_from_file"): self.SerializeModelParametersReadingFromFile() else: raise Exception ("Specify refinement_strategy: stochastic_adaptive_refinement or deterministic_adaptive_refinement or reading_from_file") self.is_project_parameters_pickled = True self.is_model_pickled = True def SerializeModelParametersStochasticAdaptiveRefinement(self): """ Method serializing and pickling the Kratos Model and the Kratos Parameters of the problem. It builds self.pickled_model and self.pickled_project_parameters. To be called if the selected refinement strategy is stochastic_adaptive_refinement. Inputs: """ with open(self.project_parameters_path,"r") as parameter_file: parameters = KratosMultiphysics.Parameters(parameter_file.read()) # create wrapper instance to modify current project parameters self.wrapper = ParametersWrapper(parameters) # serialize and pickle parmeters to serialize the model in MPI self.wrapper.SetModelImportSettingsInputType("use_input_model_part") serialized_project_parameters_tmp = KratosMultiphysics.MpiSerializer() serialized_project_parameters_tmp.Save("ParametersSerialization",parameters) pickled_project_parameters_tmp = pickle.dumps(serialized_project_parameters_tmp, 2) # second argument is the protocol and is NECESSARY (according to pybind11 docs) # remove the materials filename to pickle the parameters # this is required to read the materials only once # finally, we restore the materials filename to read the materials # in the model serialization # it is important to serialize first the parameters and then the model # to avoid additional data which may be added to the parameters # remove materials filename from Kratos settings and revert model part type materials_filename = self.wrapper.GetMaterialsFilename() self.wrapper.SetMaterialsFilename("") # serialize and pickle Kratos project parameters serialized_project_parameters = KratosMultiphysics.MpiSerializer() serialized_project_parameters.Save("ParametersSerialization",parameters) pickled_project_parameters = pickle.dumps(serialized_project_parameters, 2) # second argument is the protocol and is NECESSARY (according to pybind11 docs) # append to attributes self.serialized_project_parameters.append(serialized_project_parameters) self.pickled_project_parameters.append(pickled_project_parameters) # reset to read the model part and materials filename self.wrapper.SetModelImportSettingsInputType("mdpa") self.wrapper.SetMaterialsFilename(materials_filename) # pickle and eventually serialize model if self.is_mpi: if not parameters["problem_data"]["parallel_type"].GetString()=="MPI": raise(Exception("XMC is set in MPI but Kratos is not!")) # self.serialized_model cannot be retrieved in MPI so only pickled model is returned # returns: [[model1_1,model_1_2, model_1_3, model_1_4], [model2_1,model_2_2, model_2_3, model_2_4]] # we need to pass the index to serialize with the correct number of MPI processors # however, since pickled_project_parameters is the same across levels, the same model part is considered current_index = self.solverWrapperIndex[0] pickled_model = mpi_mds.SerializeMPIModel_Wrapper( \ pickled_project_parameters_tmp, self.wrapper.GetModelPartName(), self.fake_sample_to_serialize, self.analysis, current_index=current_index) else: if parameters["problem_data"]["parallel_type"].GetString()=="MPI": raise(Exception("Kratos is set in MPI but XMC is not!")) serialized_model, pickled_model = mds.SerializeSerialModel_Task(pickled_project_parameters_tmp, self.wrapper.GetModelPartName(), self.fake_sample_to_serialize, self.analysis) # append to attribute self.serialized_model.append(serialized_model) # this only for non-MPI parallelism, since a serialized_model cannot be retrieved in MPI self.pickled_model.append(pickled_model) # remove temporary objects created for MPI serialization del(serialized_project_parameters_tmp) ; del(pickled_project_parameters_tmp) def SerializeModelParametersDeterministicAdaptiveRefinement(self): """ Method serializing and pickling the Kratos Model and the Kratos Parameters of the problem. It builds self.pickled_model and self.pickled_project_parameters. To be called if the selected refinement strategy is deterministic_adaptive_refinement. Inputs: """ # Serialize model and parameters of coarsest level (level = 0). # If we are running with MPI parallel type, # the model is being serialized in a MPI task # with the same number of processes required by level = self.solverWrapperIndex[0]. # This strategy works in both cases the solverWrapper instance is solving level 0 # or if it is solving levels > 0. self.SerializeModelParametersStochasticAdaptiveRefinement() # now serialize levels > 0 number_levels_to_serialize = self.solverWrapperIndex[0] # same routine of executeInstanceStochasticAdaptiveRefinement() to build models and parameters, but here we save models and parameters pickled_coarse_model = self.pickled_model[0] pickled_coarse_project_parameters = self.pickled_project_parameters[0] pickled_custom_metric_refinement_parameters = self.pickled_custom_metric_refinement_parameters pickled_custom_remesh_refinement_parameters = self.pickled_custom_remesh_refinement_parameters current_analysis = self.analysis # generate the sample and prepare auxiliary variables we need fake_sample = self.fake_sample_to_serialize fake_computational_time = 0.0 if (number_levels_to_serialize > 0): for current_level in range(number_levels_to_serialize+1): if not self.is_mpi: # serial fake_qoi,pickled_current_model,fake_computational_time = \ mds.executeInstanceStochasticAdaptiveRefinementMultipleTasks_Wrapper(number_levels_to_serialize,pickled_coarse_model,pickled_coarse_project_parameters,pickled_custom_metric_refinement_parameters,pickled_custom_remesh_refinement_parameters,fake_sample,current_level,current_analysis,fake_computational_time,mapping_flag=False,print_to_file=False,current_contribution=0) elif self.is_mpi and current_level == number_levels_to_serialize: # MPI and we serialize level of interest adaptive_refinement_jump_to_finest_level = self.adaptive_refinement_jump_to_finest_level pickled_current_model = mpi_mds.SerializeDeterministicAdaptiveRefinementMPIModel_Wrapper(current_level,pickled_coarse_model,pickled_coarse_project_parameters,pickled_custom_metric_refinement_parameters,pickled_custom_remesh_refinement_parameters,fake_sample,current_analysis,fake_computational_time,adaptive_refinement_jump_to_finest_level) else: # MPI parallel type and we do not serialize since it is not the level of interest # we set pickled model equal to coarsest model as workaround pickled_current_model = pickled_coarse_model del(pickled_coarse_model) pickled_coarse_model = pickled_current_model # save if current level > 0 (level = 0 has already been saved) if (current_level>0): # save pickled and serialized model and parameters self.pickled_model.append(pickled_current_model) # self.serialized_model.append(pickle.loads(get_value_from_remote(pickled_current_model))) # commented since gives problem when solving with PyCOMPSs self.pickled_project_parameters.append(pickled_coarse_project_parameters) # self.serialized_project_parameters.append(pickle.loads(get_value_from_remote(pickled_coarse_project_parameters))) # commented since gives problem when solving with PyCOMPSs del(pickled_current_model) def SerializeModelParametersReadingFromFile(self): """ Method serializing and pickling the Kratos Model and the Kratos Parameters of the problem. It builds self.pickled_model and self.pickled_project_parameters. To be called if the selected refinement strategy is reading_from_file. Inputs: """ current_index = 0 for parameters_path in self.project_parameters_path: with open(parameters_path,"r") as parameter_file: parameters = KratosMultiphysics.Parameters(parameter_file.read()) # create wrapper instance to modify current project parameters self.wrapper = ParametersWrapper(parameters) # serialize and pickle parmeters to serialize the model in MPI # it is not required to remove the materials, since the Kratos variable # IS_RESTARTED is set to True self.wrapper.SetModelImportSettingsInputType("use_input_model_part") serialized_project_parameters = KratosMultiphysics.MpiSerializer() serialized_project_parameters.Save("ParametersSerialization",parameters) # append to attributes self.serialized_project_parameters.append(serialized_project_parameters) pickled_project_parameters = pickle.dumps(serialized_project_parameters, 2) # second argument is the protocol and is NECESSARY (according to pybind11 docs) self.pickled_project_parameters.append(pickled_project_parameters) # reset to read the model part self.wrapper.SetModelImportSettingsInputType("mdpa") # pickle and eventually serialize model if self.is_mpi: if not parameters["problem_data"]["parallel_type"].GetString()=="MPI": raise(Exception("XMC is set in MPI but Kratos is not!")) # self.serialized_model cannot be retrieved in MPI so only pickled model is returned # returns: [[model1_1,model_1_2, model_1_3, model_1_4], [model2_1,model_2_2, model_2_3, model_2_4]] pickled_model = mpi_mds.SerializeMPIModel_Wrapper( \ pickled_project_parameters, self.wrapper.GetModelPartName(), self.fake_sample_to_serialize, self.analysis, current_index) pickled_mapping_reference_model = mpi_mds.SerializeMPIModel_Wrapper( \ self.pickled_project_parameters[0], self.wrapper.GetModelPartName(), self.fake_sample_to_serialize, self.analysis, current_index) current_index += 1 else: if parameters["problem_data"]["parallel_type"].GetString()=="MPI": raise(Exception("Kratos is set in MPI but XMC is not!")) serialized_model, pickled_model = mds.SerializeSerialModel_Task(pickled_project_parameters, self.wrapper.GetModelPartName(), self.fake_sample_to_serialize, self.analysis) self.serialized_model.append(serialized_model) # this only for non-MPI parallelism, since a serialized_model cannot be retrieved in MPI # append to attribute self.pickled_model.append(pickled_model) if self.is_mpi: self.pickled_mapping_reference_model.append(pickled_mapping_reference_model) else: self.pickled_mapping_reference_model.append(self.pickled_model[0]) def SerializeRefinementParameters(self): """ Method serializing and pickling the custom setting metric and remeshing for the adaptive refinement. It requires self.custom_metric_refinement_parameters and self.custom_remesh_refinement_parameters. It builds self.pickled_custom_metric_refinement_parameters and self.pickled_custom_remesh_refinement_parameters. Inputs: """ metric_refinement_parameters = self.custom_metric_refinement_parameters remeshing_refinement_parameters = self.custom_remesh_refinement_parameters # save parameters as MpiSerializer Kratos objects serialized_metric_refinement_parameters = KratosMultiphysics.MpiSerializer() serialized_metric_refinement_parameters.Save("MetricRefinementParametersSerialization",metric_refinement_parameters) serialized_remesh_refinement_parameters = KratosMultiphysics.MpiSerializer() serialized_remesh_refinement_parameters.Save("RemeshRefinementParametersSerialization",remeshing_refinement_parameters) # pickle parameters pickled_metric_refinement_parameters = pickle.dumps(serialized_metric_refinement_parameters, 2) # second argument is the protocol and is NECESSARY (according to pybind11 docs) pickled_remesh_refinement_parameters = pickle.dumps(serialized_remesh_refinement_parameters, 2) # second argument is the protocol and is NECESSARY (according to pybind11 docs) self.pickled_custom_metric_refinement_parameters = pickled_metric_refinement_parameters self.pickled_custom_remesh_refinement_parameters = pickled_remesh_refinement_parameters self.is_custom_settings_metric_refinement_pickled = True self.is_custom_settings_remesh_refinement_pickled = True #################################################################################################### ######################################### AUXILIARY TOOLS ########################################## #################################################################################################### def SetRefinementParameters(self): """ Method reading the refinement parameters passed from json file. Inputs: """ with open(self.refinement_parameters_path,"r") as parameter_file: parameters = KratosMultiphysics.Parameters(parameter_file.read()) if parameters.Has("metric"): self.custom_metric_refinement_parameters = parameters["metric"] elif parameters.Has("hessian_metric"): self.custom_metric_refinement_parameters = parameters["hessian_metric"] warnings.warn( ( "The metric settings are passed through the \"hessian_metric\" key." " This is deprecated and will be removed soon." " Instead, you should pass the metric settings using the \"metric\" key." ), FutureWarning, ) else: raise Exception("Refinement parameters, set by refinement_parameters_path, does not contain the required key \"metric\".") if parameters.Has("remeshing"): self.custom_remesh_refinement_parameters = parameters["remeshing"] elif parameters.Has("refinement_mmg"): self.custom_remesh_refinement_parameters = parameters["refinement_mmg"] warnings.warn( ( "The remeshing settings are passed through the \"refinement_mmg\" key." " This is deprecated and will be removed soon." " Instead, you should pass the metric settings using the \"remeshing\" key." ), FutureWarning, ) else: raise Exception("Refinement parameters, set by refinement_parameters_path, does not contain the required key \"remeshing\".") def ComputeMeshParameters(self): """ Method computing the mesh discretization parameter self.mesh_parameters and the mesh sizes self.mesh_sizes. The mesh parameter is the reciprocal of the minimum mesh size of the grid. Inputs: """ # unpickle and unserialize model and build Kratos Model object serialized_model = pickle.loads(self.pickled_model[0]) current_model = KratosMultiphysics.Model() serialized_model.Load("ModelSerialization",current_model) # unpickle and unserialize parameters and build Kratos Parameters object serialized_project_parameters = pickle.loads(self.pickled_project_parameters[0]) current_project_parameters = KratosMultiphysics.Parameters() serialized_project_parameters.Load("ParametersSerialization",current_project_parameters) # unpickle and unserialize metric refinement parameters and build Kratos Parameters objects serialized_custom_metric_refinement_parameters = pickle.loads(self.pickled_custom_metric_refinement_parameters) current_custom_metric_refinement_parameters = KratosMultiphysics.Parameters() serialized_custom_metric_refinement_parameters.Load("MetricRefinementParametersSerialization",current_custom_metric_refinement_parameters) self.mesh_sizes = [] self.mesh_parameters = [] level = self.solverWrapperIndex[0] adaptive_refinement_manager = AdaptiveRefinement(level,current_model,current_project_parameters,current_custom_metric_refinement_parameters,None) adaptive_refinement_manager.EstimateMeshSizeCurrentLevel() h_current_level = adaptive_refinement_manager.mesh_size mesh_parameter_current_level = h_current_level**(-1) self.mesh_sizes.append(h_current_level) self.mesh_parameters.append(mesh_parameter_current_level) def _numberOfOutputs(self): """ Internal method returning the total number of outputs, regardless of how how many members vector quantities of interest have. Inputs: """ return len(self.qoi_estimator)
PypiClean
/DoubleRatchet-1.0.3.tar.gz/DoubleRatchet-1.0.3/doubleratchet/recommended/crypto_provider_cryptography.py
from typing_extensions import assert_never from cryptography.hazmat.backends import default_backend from cryptography.hazmat.primitives import hashes from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes from cryptography.hazmat.primitives.hmac import HMAC from cryptography.hazmat.primitives.kdf.hkdf import HKDF from cryptography.hazmat.primitives.padding import PKCS7 from .crypto_provider import CryptoProvider, HashFunction from .. import aead __all__ = [ # pylint: disable=unused-variable "CryptoProviderImpl" ] def get_hash_algorithm(hash_function: HashFunction) -> hashes.HashAlgorithm: """ Args: hash_function: Identifier of a hash function. Returns: The implementation of the hash function as a cryptography :class:`~cryptography.hazmat.primitives.hashes.HashAlgorithm` object. """ if hash_function is HashFunction.SHA_256: return hashes.SHA256() if hash_function is HashFunction.SHA_512: return hashes.SHA512() if hash_function is HashFunction.SHA_512_256: return hashes.SHA512_256() return assert_never(hash_function) class CryptoProviderImpl(CryptoProvider): """ Cryptography provider based on the Python package `cryptography <https://github.com/pyca/cryptography>`_. """ @staticmethod async def hkdf_derive( hash_function: HashFunction, length: int, salt: bytes, info: bytes, key_material: bytes ) -> bytes: return HKDF( algorithm=get_hash_algorithm(hash_function), length=length, salt=salt, info=info, backend=default_backend() ).derive(key_material) @staticmethod async def hmac_calculate(key: bytes, hash_function: HashFunction, data: bytes) -> bytes: hmac = HMAC(key, get_hash_algorithm(hash_function), backend=default_backend()) hmac.update(data) return hmac.finalize() @staticmethod async def aes_cbc_encrypt(key: bytes, initialization_vector: bytes, plaintext: bytes) -> bytes: # Prepare PKCS#7 padded plaintext padder = PKCS7(128).padder() padded_plaintext = padder.update(plaintext) + padder.finalize() # Encrypt the plaintext using AES-CBC aes = Cipher( algorithms.AES(key), modes.CBC(initialization_vector), backend=default_backend() ).encryptor() return aes.update(padded_plaintext) + aes.finalize() # pylint: disable=no-member @staticmethod async def aes_cbc_decrypt(key: bytes, initialization_vector: bytes, ciphertext: bytes) -> bytes: # Decrypt the plaintext using AES-CBC try: aes = Cipher( algorithms.AES(key), modes.CBC(initialization_vector), backend=default_backend() ).decryptor() padded_plaintext = aes.update(ciphertext) + aes.finalize() # pylint: disable=no-member except ValueError as e: raise aead.DecryptionFailedException("Decryption failed.") from e # Remove the PKCS#7 padding from the plaintext try: unpadder = PKCS7(128).unpadder() return unpadder.update(padded_plaintext) + unpadder.finalize() except ValueError as e: raise aead.DecryptionFailedException("Plaintext padded incorrectly.") from e
PypiClean
/Django_patch-2.2.19-py3-none-any.whl/django/contrib/admin/static/admin/js/collapse.js
(function() { 'use strict'; var closestElem = function(elem, tagName) { if (elem.nodeName === tagName.toUpperCase()) { return elem; } if (elem.parentNode.nodeName === 'BODY') { return null; } return elem.parentNode && closestElem(elem.parentNode, tagName); }; window.addEventListener('load', function() { // Add anchor tag for Show/Hide link var fieldsets = document.querySelectorAll('fieldset.collapse'); for (var i = 0; i < fieldsets.length; i++) { var elem = fieldsets[i]; // Don't hide if fields in this fieldset have errors if (elem.querySelectorAll('div.errors').length === 0) { elem.classList.add('collapsed'); var h2 = elem.querySelector('h2'); var link = document.createElement('a'); link.setAttribute('id', 'fieldsetcollapser' + i); link.setAttribute('class', 'collapse-toggle'); link.setAttribute('href', '#'); link.textContent = gettext('Show'); h2.appendChild(document.createTextNode(' (')); h2.appendChild(link); h2.appendChild(document.createTextNode(')')); } } // Add toggle to hide/show anchor tag var toggleFunc = function(ev) { if (ev.target.matches('.collapse-toggle')) { ev.preventDefault(); ev.stopPropagation(); var fieldset = closestElem(ev.target, 'fieldset'); if (fieldset.classList.contains('collapsed')) { // Show ev.target.textContent = gettext('Hide'); fieldset.classList.remove('collapsed'); } else { // Hide ev.target.textContent = gettext('Show'); fieldset.classList.add('collapsed'); } } }; var inlineDivs = document.querySelectorAll('fieldset.module'); for (i = 0; i < inlineDivs.length; i++) { inlineDivs[i].addEventListener('click', toggleFunc); } }); })();
PypiClean
/GeoNode-3.2.0-py3-none-any.whl/geonode/monitoring/frontend/monitoring/src/components/cels/geoserver-status/index.js
import React from 'react'; import PropTypes from 'prop-types'; import { connect } from 'react-redux'; import SelectField from 'material-ui/SelectField'; import MenuItem from 'material-ui/MenuItem'; import AverageCPU from '../../molecules/average-cpu'; import AverageMemory from '../../molecules/average-memory'; import styles from './styles'; import actions from './actions'; const mapStateToProps = (state) => ({ cpu: state.geoserverCpuStatus.response, mem: state.geoserverMemStatus.response, interval: state.interval.interval, timestamp: state.interval.timestamp, services: state.services.hostgeoserver, }); @connect(mapStateToProps, actions) class GeoserverStatus extends React.Component { static propTypes = { cpu: PropTypes.object, getCpu: PropTypes.func.isRequired, getMem: PropTypes.func.isRequired, interval: PropTypes.number, mem: PropTypes.object, resetCpu: PropTypes.func.isRequired, resetMem: PropTypes.func.isRequired, services: PropTypes.array, timestamp: PropTypes.instanceOf(Date), } constructor(props) { super(props); this.state = { host: '', }; this.get = ( host = this.state.host, interval = this.props.interval, ) => { this.props.getCpu(host, interval); this.props.getMem(host, interval); }; this.handleChange = (event, target, host) => { this.setState({ host }); this.get(); }; } componentWillReceiveProps(nextProps) { if (nextProps && nextProps.services && nextProps.timestamp) { let host = nextProps.services[0].name; let firstTime = false; if (this.state.host === '') { firstTime = true; this.setState({ host }); } else { host = this.state.host; } if (firstTime || nextProps.timestamp !== this.props.timestamp) { this.get(host, nextProps.interval); } } } componentWillUnmount() { this.props.resetCpu(); this.props.resetMem(); } render() { let cpu = 0; if (this.props.cpu) { cpu = undefined; const data = this.props.cpu.data.data; if (data.length > 0) { if (data[0].data.length > 0) { const metric = data[0].data[0]; cpu = Math.floor(metric.val); } } } let mem = 0; if (this.props.mem) { mem = undefined; const data = this.props.mem.data.data; if (data.length > 0) { if (data[0].data.length > 0) { const metric = data[0].data[0]; mem = Math.floor(metric.val); } } } const hosts = this.props.services ? this.props.services.map((host) => <MenuItem key={host.name} value={host.name} primaryText={ `${host.name} [${host.host}]` } /> ) : undefined; return this.props.services ? ( <div style={styles.content}> <h5>GeoServer HW Status</h5> <SelectField floatingLabelText="Host" value={this.state.host} onChange={this.handleChange} > {hosts} </SelectField> <div style={styles.geonode}> <AverageCPU cpu={cpu} /> <AverageMemory mem={mem} /> </div> </div> ) : null; } } export default GeoserverStatus;
PypiClean
/CulturelandPython-0.1.2.tar.gz/CulturelandPython-0.1.2/README.md
# CulturelandPython - A Python library for unofficial Cultureland. This Python library can login, and redeem gift cards into your account. **Be advised, this project is UNOFFICIAL you are responsible for everything that might occur due to using this python library.** - 컬쳐랜드 문화상품권 충전하는 파이썬 라이브러리입니다. 이 라이브러리는 로그인 및 문화상품권 충전을 할 수 있습니다. **이 프로젝트는 비공식이기에 이 파이썬 라이브러리를 사용하며 생기는 모든 문제는 본인에게 책임이 있습니다.** ## Installation ### 1. PIP Installation Simply use `pip install CulturelandPython`. ### 2. Chromium Installation This project automatically detects your chrome version and downloads webdriver using [webdriver_manager](https://github.com/SergeyPirogov/webdriver_manager). Special thanks to the original repository owner. ## Usage This Python library has 2 main features. Logging in and redeeming giftcards. You can achieve that goal by using Python expressions below. ### Logging in - Success ``` from CulturelandPython import client >>> c = client.CulturelandClient('gooday2die', 'PASSWORD') >>> c Cultureland Client Object, Logged in as gooday2die ``` Using `CulturelandClient` class from `client` will generate an client object that is connected to the Cultureland System. If the login was successful, there will be **no return value**. If you are willing to use this class in a big project or fast runtime demanding project, I would suggest you using `asyncio` or `threading` when using this class. ### Logging in - Failure ``` >>> c = client.CulturelandClient("gooday2die", "WRONG_PASSWD") Traceback (most recent call last): File "<stdin>", line 1, in <module> File "/home/gooday2die/.local/lib/python3.8/site-packages/CulturelandPython/client.py", line 21, in __init__ self.login() File "/home/gooday2die/.local/lib/python3.8/site-packages/CulturelandPython/client.py", line 38, in login login.login(self.web_driver, self.username, self.passwd) File "/home/gooday2die/.local/lib/python3.8/site-packages/CulturelandPython/login.py", line 23, in login raise LoginFailureException CulturelandPython.login.LoginFailureException ``` When logging into the system with incorrect credentials, there will be `Exception` thrown as `CulturelandPython.login.LoginFailureException`. ### Redeeming ``` >>> c Cultureland Client Object, Logged in as gooday2die >>> c.redeem('4180-0252-0565-2549') [True, 1000] >>> c.redeem('4180-0252-0565-2549') [False, '잔액이 0원인 상품권'] ``` You can redeem by using `redeem` method of `CulturelandClient` object. The method returns `list` type object. - If the Redeeming process was successful, it returns [True, Amount Redeemed] - If the Redeeming process was unsuccessful, it returns [False, Error Reason] ## Contacts & ETC If you got questions on this project, or have a good idea to add into this project, let me know by POC edina00@naver.com. Also Since this project is NOT official project and it is kind of sketchy, this project might be removed from Github if Cultureland wants me to remove this project from Github. Any pull requests as well as issue reporting is welcomed.
PypiClean
/CleanAdminDjango-1.5.3.1.tar.gz/CleanAdminDjango-1.5.3.1/django/contrib/messages/storage/fallback.py
from django.contrib.messages.storage.base import BaseStorage from django.contrib.messages.storage.cookie import CookieStorage from django.contrib.messages.storage.session import SessionStorage class FallbackStorage(BaseStorage): """ Tries to store all messages in the first backend, storing any unstored messages in each subsequent backend backend. """ storage_classes = (CookieStorage, SessionStorage) def __init__(self, *args, **kwargs): super(FallbackStorage, self).__init__(*args, **kwargs) self.storages = [storage_class(*args, **kwargs) for storage_class in self.storage_classes] self._used_storages = set() def _get(self, *args, **kwargs): """ Gets a single list of messages from all storage backends. """ all_messages = [] for storage in self.storages: messages, all_retrieved = storage._get() # If the backend hasn't been used, no more retrieval is necessary. if messages is None: break if messages: self._used_storages.add(storage) all_messages.extend(messages) # If this storage class contained all the messages, no further # retrieval is necessary if all_retrieved: break return all_messages, all_retrieved def _store(self, messages, response, *args, **kwargs): """ Stores the messages, returning any unstored messages after trying all backends. For each storage backend, any messages not stored are passed on to the next backend. """ for storage in self.storages: if messages: messages = storage._store(messages, response, remove_oldest=False) # Even if there are no more messages, continue iterating to ensure # storages which contained messages are flushed. elif storage in self._used_storages: storage._store([], response) self._used_storages.remove(storage) return messages
PypiClean
/DjangoDjangoAppCenter-0.0.11-py3-none-any.whl/DjangoAppCenter/simpleui/static/admin/simpleui-x/elementui/color-picker.js
module.exports = /******/ (function (modules) { // webpackBootstrap /******/ // The module cache /******/ var installedModules = {}; /******/ /******/ // The require function /******/ function __webpack_require__(moduleId) { /******/ /******/ // Check if module is in cache /******/ if (installedModules[moduleId]) { /******/ return installedModules[moduleId].exports; /******/ } /******/ // Create a new module (and put it into the cache) /******/ var module = installedModules[moduleId] = { /******/ i: moduleId, /******/ l: false, /******/ exports: {} /******/ }; /******/ /******/ // Execute the module function /******/ modules[moduleId].call(module.exports, module, module.exports, __webpack_require__); /******/ /******/ // Flag the module as loaded /******/ module.l = true; /******/ /******/ // Return the exports of the module /******/ return module.exports; /******/ } /******/ /******/ /******/ // expose the modules object (__webpack_modules__) /******/ __webpack_require__.m = modules; /******/ /******/ // expose the module cache /******/ __webpack_require__.c = installedModules; /******/ /******/ // define getter function for harmony exports /******/ __webpack_require__.d = function (exports, name, getter) { /******/ if (!__webpack_require__.o(exports, name)) { /******/ Object.defineProperty(exports, name, {enumerable: true, get: getter}); /******/ } /******/ }; /******/ /******/ // define __esModule on exports /******/ __webpack_require__.r = function (exports) { /******/ if (typeof Symbol !== 'undefined' && Symbol.toStringTag) { /******/ Object.defineProperty(exports, Symbol.toStringTag, {value: 'Module'}); /******/ } /******/ Object.defineProperty(exports, '__esModule', {value: true}); /******/ }; /******/ /******/ // create a fake namespace object /******/ // mode & 1: value is a module id, require it /******/ // mode & 2: merge all properties of value into the ns /******/ // mode & 4: return value when already ns object /******/ // mode & 8|1: behave like require /******/ __webpack_require__.t = function (value, mode) { /******/ if (mode & 1) value = __webpack_require__(value); /******/ if (mode & 8) return value; /******/ if ((mode & 4) && typeof value === 'object' && value && value.__esModule) return value; /******/ var ns = Object.create(null); /******/ __webpack_require__.r(ns); /******/ Object.defineProperty(ns, 'default', {enumerable: true, value: value}); /******/ if (mode & 2 && typeof value != 'string') for (var key in value) __webpack_require__.d(ns, key, function (key) { return value[key]; }.bind(null, key)); /******/ return ns; /******/ }; /******/ /******/ // getDefaultExport function for compatibility with non-harmony modules /******/ __webpack_require__.n = function (module) { /******/ var getter = module && module.__esModule ? /******/ function getDefault() { return module['default']; } : /******/ function getModuleExports() { return module; }; /******/ __webpack_require__.d(getter, 'a', getter); /******/ return getter; /******/ }; /******/ /******/ // Object.prototype.hasOwnProperty.call /******/ __webpack_require__.o = function (object, property) { return Object.prototype.hasOwnProperty.call(object, property); }; /******/ /******/ // __webpack_public_path__ /******/ __webpack_require__.p = "/dist/"; /******/ /******/ /******/ // Load entry module and return exports /******/ return __webpack_require__(__webpack_require__.s = 53); /******/ }) /************************************************************************/ /******/({ /***/ 0: /***/ (function (module, __webpack_exports__, __webpack_require__) { "use strict"; /* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "a", function () { return normalizeComponent; }); /* globals __VUE_SSR_CONTEXT__ */ // IMPORTANT: Do NOT use ES2015 features in this file (except for modules). // This module is a runtime utility for cleaner component module output and will // be included in the final webpack user bundle. function normalizeComponent( scriptExports, render, staticRenderFns, functionalTemplate, injectStyles, scopeId, moduleIdentifier, /* server only */ shadowMode /* vue-cli only */ ) { // Vue.extend constructor export interop var options = typeof scriptExports === 'function' ? scriptExports.options : scriptExports // render functions if (render) { options.render = render options.staticRenderFns = staticRenderFns options._compiled = true } // functional template if (functionalTemplate) { options.functional = true } // scopedId if (scopeId) { options._scopeId = 'data-v-' + scopeId } var hook if (moduleIdentifier) { // server build hook = function (context) { // 2.3 injection context = context || // cached call (this.$vnode && this.$vnode.ssrContext) || // stateful (this.parent && this.parent.$vnode && this.parent.$vnode.ssrContext) // functional // 2.2 with runInNewContext: true if (!context && typeof __VUE_SSR_CONTEXT__ !== 'undefined') { context = __VUE_SSR_CONTEXT__ } // inject component styles if (injectStyles) { injectStyles.call(this, context) } // register component module identifier for async chunk inferrence if (context && context._registeredComponents) { context._registeredComponents.add(moduleIdentifier) } } // used by ssr in case component is cached and beforeCreate // never gets called options._ssrRegister = hook } else if (injectStyles) { hook = shadowMode ? function () { injectStyles.call(this, this.$root.$options.shadowRoot) } : injectStyles } if (hook) { if (options.functional) { // for template-only hot-reload because in that case the render fn doesn't // go through the normalizer options._injectStyles = hook // register for functioal component in vue file var originalRender = options.render options.render = function renderWithStyleInjection(h, context) { hook.call(context) return originalRender(h, context) } } else { // inject component registration as beforeCreate hook var existing = options.beforeCreate options.beforeCreate = existing ? [].concat(existing, hook) : [hook] } } return { exports: scriptExports, options: options } } /***/ }), /***/ 11: /***/ (function (module, exports) { module.exports = require("element-ui/lib/input"); /***/ }), /***/ 12: /***/ (function (module, exports) { module.exports = require("element-ui/lib/utils/clickoutside"); /***/ }), /***/ 18: /***/ (function (module, exports) { module.exports = require("element-ui/lib/button"); /***/ }), /***/ 4: /***/ (function (module, exports) { module.exports = require("element-ui/lib/mixins/emitter"); /***/ }), /***/ 5: /***/ (function (module, exports) { module.exports = require("element-ui/lib/utils/vue-popper"); /***/ }), /***/ 53: /***/ (function (module, __webpack_exports__, __webpack_require__) { "use strict"; __webpack_require__.r(__webpack_exports__); // CONCATENATED MODULE: ./node_modules/_vue-loader@15.7.1@vue-loader/lib/loaders/templateLoader.js??vue-loader-options!./node_modules/_vue-loader@15.7.1@vue-loader/lib??vue-loader-options!./packages/color-picker/src/main.vue?vue&type=template&id=55c8ade7& var render = function () { var _vm = this var _h = _vm.$createElement var _c = _vm._self._c || _h return _c( "div", { directives: [ { name: "clickoutside", rawName: "v-clickoutside", value: _vm.hide, expression: "hide" } ], class: [ "el-color-picker", _vm.colorDisabled ? "is-disabled" : "", _vm.colorSize ? "el-color-picker--" + _vm.colorSize : "" ] }, [ _vm.colorDisabled ? _c("div", {staticClass: "el-color-picker__mask"}) : _vm._e(), _c( "div", { staticClass: "el-color-picker__trigger", on: {click: _vm.handleTrigger} }, [ _c( "span", { staticClass: "el-color-picker__color", class: {"is-alpha": _vm.showAlpha} }, [ _c("span", { staticClass: "el-color-picker__color-inner", style: { backgroundColor: _vm.displayedColor } }), !_vm.value && !_vm.showPanelColor ? _c("span", { staticClass: "el-color-picker__empty el-icon-close" }) : _vm._e() ] ), _c("span", { directives: [ { name: "show", rawName: "v-show", value: _vm.value || _vm.showPanelColor, expression: "value || showPanelColor" } ], staticClass: "el-color-picker__icon el-icon-arrow-down" }) ] ), _c("picker-dropdown", { ref: "dropdown", class: ["el-color-picker__panel", _vm.popperClass || ""], attrs: { color: _vm.color, "show-alpha": _vm.showAlpha, predefine: _vm.predefine }, on: {pick: _vm.confirmValue, clear: _vm.clearValue}, model: { value: _vm.showPicker, callback: function ($$v) { _vm.showPicker = $$v }, expression: "showPicker" } }) ], 1 ) } var staticRenderFns = [] render._withStripped = true // CONCATENATED MODULE: ./packages/color-picker/src/main.vue?vue&type=template&id=55c8ade7& // CONCATENATED MODULE: ./packages/color-picker/src/color.js var _typeof = typeof Symbol === "function" && typeof Symbol.iterator === "symbol" ? function (obj) { return typeof obj; } : function (obj) { return obj && typeof Symbol === "function" && obj.constructor === Symbol && obj !== Symbol.prototype ? "symbol" : typeof obj; }; function _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError("Cannot call a class as a function"); } } var hsv2hsl = function hsv2hsl(hue, sat, val) { return [hue, sat * val / ((hue = (2 - sat) * val) < 1 ? hue : 2 - hue) || 0, hue / 2]; }; // Need to handle 1.0 as 100%, since once it is a number, there is no difference between it and 1 // <http://stackoverflow.com/questions/7422072/javascript-how-to-detect-number-as-a-decimal-including-1-0> var isOnePointZero = function isOnePointZero(n) { return typeof n === 'string' && n.indexOf('.') !== -1 && parseFloat(n) === 1; }; var isPercentage = function isPercentage(n) { return typeof n === 'string' && n.indexOf('%') !== -1; }; // Take input from [0, n] and return it as [0, 1] var bound01 = function bound01(value, max) { if (isOnePointZero(value)) value = '100%'; var processPercent = isPercentage(value); value = Math.min(max, Math.max(0, parseFloat(value))); // Automatically convert percentage into number if (processPercent) { value = parseInt(value * max, 10) / 100; } // Handle floating point rounding errors if (Math.abs(value - max) < 0.000001) { return 1; } // Convert into [0, 1] range if it isn't already return value % max / parseFloat(max); }; var INT_HEX_MAP = {10: 'A', 11: 'B', 12: 'C', 13: 'D', 14: 'E', 15: 'F'}; var toHex = function toHex(_ref) { var r = _ref.r, g = _ref.g, b = _ref.b; var hexOne = function hexOne(value) { value = Math.min(Math.round(value), 255); var high = Math.floor(value / 16); var low = value % 16; return '' + (INT_HEX_MAP[high] || high) + (INT_HEX_MAP[low] || low); }; if (isNaN(r) || isNaN(g) || isNaN(b)) return ''; return '#' + hexOne(r) + hexOne(g) + hexOne(b); }; var HEX_INT_MAP = {A: 10, B: 11, C: 12, D: 13, E: 14, F: 15}; var parseHexChannel = function parseHexChannel(hex) { if (hex.length === 2) { return (HEX_INT_MAP[hex[0].toUpperCase()] || +hex[0]) * 16 + (HEX_INT_MAP[hex[1].toUpperCase()] || +hex[1]); } return HEX_INT_MAP[hex[1].toUpperCase()] || +hex[1]; }; var hsl2hsv = function hsl2hsv(hue, sat, light) { sat = sat / 100; light = light / 100; var smin = sat; var lmin = Math.max(light, 0.01); var sv = void 0; var v = void 0; light *= 2; sat *= light <= 1 ? light : 2 - light; smin *= lmin <= 1 ? lmin : 2 - lmin; v = (light + sat) / 2; sv = light === 0 ? 2 * smin / (lmin + smin) : 2 * sat / (light + sat); return { h: hue, s: sv * 100, v: v * 100 }; }; // `rgbToHsv` // Converts an RGB color value to HSV // *Assumes:* r, g, and b are contained in the set [0, 255] or [0, 1] // *Returns:* { h, s, v } in [0,1] var rgb2hsv = function rgb2hsv(r, g, b) { r = bound01(r, 255); g = bound01(g, 255); b = bound01(b, 255); var max = Math.max(r, g, b); var min = Math.min(r, g, b); var h = void 0, s = void 0; var v = max; var d = max - min; s = max === 0 ? 0 : d / max; if (max === min) { h = 0; // achromatic } else { switch (max) { case r: h = (g - b) / d + (g < b ? 6 : 0); break; case g: h = (b - r) / d + 2; break; case b: h = (r - g) / d + 4; break; } h /= 6; } return {h: h * 360, s: s * 100, v: v * 100}; }; // `hsvToRgb` // Converts an HSV color value to RGB. // *Assumes:* h is contained in [0, 1] or [0, 360] and s and v are contained in [0, 1] or [0, 100] // *Returns:* { r, g, b } in the set [0, 255] var hsv2rgb = function hsv2rgb(h, s, v) { h = bound01(h, 360) * 6; s = bound01(s, 100); v = bound01(v, 100); var i = Math.floor(h); var f = h - i; var p = v * (1 - s); var q = v * (1 - f * s); var t = v * (1 - (1 - f) * s); var mod = i % 6; var r = [v, q, p, p, t, v][mod]; var g = [t, v, v, q, p, p][mod]; var b = [p, p, t, v, v, q][mod]; return { r: Math.round(r * 255), g: Math.round(g * 255), b: Math.round(b * 255) }; }; var Color = function () { function Color(options) { _classCallCheck(this, Color); this._hue = 0; this._saturation = 100; this._value = 100; this._alpha = 100; this.enableAlpha = false; this.format = 'hex'; this.value = ''; options = options || {}; for (var option in options) { if (options.hasOwnProperty(option)) { this[option] = options[option]; } } this.doOnChange(); } Color.prototype.set = function set(prop, value) { if (arguments.length === 1 && (typeof prop === 'undefined' ? 'undefined' : _typeof(prop)) === 'object') { for (var p in prop) { if (prop.hasOwnProperty(p)) { this.set(p, prop[p]); } } return; } this['_' + prop] = value; this.doOnChange(); }; Color.prototype.get = function get(prop) { return this['_' + prop]; }; Color.prototype.toRgb = function toRgb() { return hsv2rgb(this._hue, this._saturation, this._value); }; Color.prototype.fromString = function fromString(value) { var _this = this; if (!value) { this._hue = 0; this._saturation = 100; this._value = 100; this.doOnChange(); return; } var fromHSV = function fromHSV(h, s, v) { _this._hue = Math.max(0, Math.min(360, h)); _this._saturation = Math.max(0, Math.min(100, s)); _this._value = Math.max(0, Math.min(100, v)); _this.doOnChange(); }; if (value.indexOf('hsl') !== -1) { var parts = value.replace(/hsla|hsl|\(|\)/gm, '').split(/\s|,/g).filter(function (val) { return val !== ''; }).map(function (val, index) { return index > 2 ? parseFloat(val) : parseInt(val, 10); }); if (parts.length === 4) { this._alpha = Math.floor(parseFloat(parts[3]) * 100); } else if (parts.length === 3) { this._alpha = 100; } if (parts.length >= 3) { var _hsl2hsv = hsl2hsv(parts[0], parts[1], parts[2]), h = _hsl2hsv.h, s = _hsl2hsv.s, v = _hsl2hsv.v; fromHSV(h, s, v); } } else if (value.indexOf('hsv') !== -1) { var _parts = value.replace(/hsva|hsv|\(|\)/gm, '').split(/\s|,/g).filter(function (val) { return val !== ''; }).map(function (val, index) { return index > 2 ? parseFloat(val) : parseInt(val, 10); }); if (_parts.length === 4) { this._alpha = Math.floor(parseFloat(_parts[3]) * 100); } else if (_parts.length === 3) { this._alpha = 100; } if (_parts.length >= 3) { fromHSV(_parts[0], _parts[1], _parts[2]); } } else if (value.indexOf('rgb') !== -1) { var _parts2 = value.replace(/rgba|rgb|\(|\)/gm, '').split(/\s|,/g).filter(function (val) { return val !== ''; }).map(function (val, index) { return index > 2 ? parseFloat(val) : parseInt(val, 10); }); if (_parts2.length === 4) { this._alpha = Math.floor(parseFloat(_parts2[3]) * 100); } else if (_parts2.length === 3) { this._alpha = 100; } if (_parts2.length >= 3) { var _rgb2hsv = rgb2hsv(_parts2[0], _parts2[1], _parts2[2]), _h = _rgb2hsv.h, _s = _rgb2hsv.s, _v = _rgb2hsv.v; fromHSV(_h, _s, _v); } } else if (value.indexOf('#') !== -1) { var hex = value.replace('#', '').trim(); if (!/^(?:[0-9a-fA-F]{3}){1,2}$/.test(hex)) return; var r = void 0, g = void 0, b = void 0; if (hex.length === 3) { r = parseHexChannel(hex[0] + hex[0]); g = parseHexChannel(hex[1] + hex[1]); b = parseHexChannel(hex[2] + hex[2]); } else if (hex.length === 6 || hex.length === 8) { r = parseHexChannel(hex.substring(0, 2)); g = parseHexChannel(hex.substring(2, 4)); b = parseHexChannel(hex.substring(4, 6)); } if (hex.length === 8) { this._alpha = Math.floor(parseHexChannel(hex.substring(6)) / 255 * 100); } else if (hex.length === 3 || hex.length === 6) { this._alpha = 100; } var _rgb2hsv2 = rgb2hsv(r, g, b), _h2 = _rgb2hsv2.h, _s2 = _rgb2hsv2.s, _v2 = _rgb2hsv2.v; fromHSV(_h2, _s2, _v2); } }; Color.prototype.compare = function compare(color) { return Math.abs(color._hue - this._hue) < 2 && Math.abs(color._saturation - this._saturation) < 1 && Math.abs(color._value - this._value) < 1 && Math.abs(color._alpha - this._alpha) < 1; }; Color.prototype.doOnChange = function doOnChange() { var _hue = this._hue, _saturation = this._saturation, _value = this._value, _alpha = this._alpha, format = this.format; if (this.enableAlpha) { switch (format) { case 'hsl': var hsl = hsv2hsl(_hue, _saturation / 100, _value / 100); this.value = 'hsla(' + _hue + ', ' + Math.round(hsl[1] * 100) + '%, ' + Math.round(hsl[2] * 100) + '%, ' + _alpha / 100 + ')'; break; case 'hsv': this.value = 'hsva(' + _hue + ', ' + Math.round(_saturation) + '%, ' + Math.round(_value) + '%, ' + _alpha / 100 + ')'; break; default: var _hsv2rgb = hsv2rgb(_hue, _saturation, _value), r = _hsv2rgb.r, g = _hsv2rgb.g, b = _hsv2rgb.b; this.value = 'rgba(' + r + ', ' + g + ', ' + b + ', ' + _alpha / 100 + ')'; } } else { switch (format) { case 'hsl': var _hsl = hsv2hsl(_hue, _saturation / 100, _value / 100); this.value = 'hsl(' + _hue + ', ' + Math.round(_hsl[1] * 100) + '%, ' + Math.round(_hsl[2] * 100) + '%)'; break; case 'hsv': this.value = 'hsv(' + _hue + ', ' + Math.round(_saturation) + '%, ' + Math.round(_value) + '%)'; break; case 'rgb': var _hsv2rgb2 = hsv2rgb(_hue, _saturation, _value), _r = _hsv2rgb2.r, _g = _hsv2rgb2.g, _b = _hsv2rgb2.b; this.value = 'rgb(' + _r + ', ' + _g + ', ' + _b + ')'; break; default: this.value = toHex(hsv2rgb(_hue, _saturation, _value)); } } }; return Color; }(); /* harmony default export */ var src_color = (Color); // CONCATENATED MODULE: ./node_modules/_vue-loader@15.7.1@vue-loader/lib/loaders/templateLoader.js??vue-loader-options!./node_modules/_vue-loader@15.7.1@vue-loader/lib??vue-loader-options!./packages/color-picker/src/components/picker-dropdown.vue?vue&type=template&id=06601625& var picker_dropdownvue_type_template_id_06601625_render = function () { var _vm = this var _h = _vm.$createElement var _c = _vm._self._c || _h return _c( "transition", {attrs: {name: "el-zoom-in-top"}, on: {"after-leave": _vm.doDestroy}}, [ _c( "div", { directives: [ { name: "show", rawName: "v-show", value: _vm.showPopper, expression: "showPopper" } ], staticClass: "el-color-dropdown" }, [ _c( "div", {staticClass: "el-color-dropdown__main-wrapper"}, [ _c("hue-slider", { ref: "hue", staticStyle: {float: "right"}, attrs: {color: _vm.color, vertical: ""} }), _c("sv-panel", {ref: "sl", attrs: {color: _vm.color}}) ], 1 ), _vm.showAlpha ? _c("alpha-slider", {ref: "alpha", attrs: {color: _vm.color}}) : _vm._e(), _vm.predefine ? _c("predefine", { attrs: {color: _vm.color, colors: _vm.predefine} }) : _vm._e(), _c( "div", {staticClass: "el-color-dropdown__btns"}, [ _c( "span", {staticClass: "el-color-dropdown__value"}, [ _c("el-input", { attrs: {"validate-event": false, size: "mini"}, on: {blur: _vm.handleConfirm}, nativeOn: { keyup: function ($event) { if ( !("button" in $event) && _vm._k( $event.keyCode, "enter", 13, $event.key, "Enter" ) ) { return null } return _vm.handleConfirm($event) } }, model: { value: _vm.customInput, callback: function ($$v) { _vm.customInput = $$v }, expression: "customInput" } }) ], 1 ), _c( "el-button", { staticClass: "el-color-dropdown__link-btn", attrs: {size: "mini", type: "text"}, on: { click: function ($event) { _vm.$emit("clear") } } }, [ _vm._v( "\n " + _vm._s(_vm.t("el.colorpicker.clear")) + "\n " ) ] ), _c( "el-button", { staticClass: "el-color-dropdown__btn", attrs: {plain: "", size: "mini"}, on: {click: _vm.confirmValue} }, [ _vm._v( "\n " + _vm._s(_vm.t("el.colorpicker.confirm")) + "\n " ) ] ) ], 1 ) ], 1 ) ] ) } var picker_dropdownvue_type_template_id_06601625_staticRenderFns = [] picker_dropdownvue_type_template_id_06601625_render._withStripped = true // CONCATENATED MODULE: ./packages/color-picker/src/components/picker-dropdown.vue?vue&type=template&id=06601625& // CONCATENATED MODULE: ./node_modules/_vue-loader@15.7.1@vue-loader/lib/loaders/templateLoader.js??vue-loader-options!./node_modules/_vue-loader@15.7.1@vue-loader/lib??vue-loader-options!./packages/color-picker/src/components/sv-panel.vue?vue&type=template&id=d8583596& var sv_panelvue_type_template_id_d8583596_render = function () { var _vm = this var _h = _vm.$createElement var _c = _vm._self._c || _h return _c( "div", { staticClass: "el-color-svpanel", style: { backgroundColor: _vm.background } }, [ _c("div", {staticClass: "el-color-svpanel__white"}), _c("div", {staticClass: "el-color-svpanel__black"}), _c( "div", { staticClass: "el-color-svpanel__cursor", style: { top: _vm.cursorTop + "px", left: _vm.cursorLeft + "px" } }, [_c("div")] ) ] ) } var sv_panelvue_type_template_id_d8583596_staticRenderFns = [] sv_panelvue_type_template_id_d8583596_render._withStripped = true // CONCATENATED MODULE: ./packages/color-picker/src/components/sv-panel.vue?vue&type=template&id=d8583596& // EXTERNAL MODULE: external "vue" var external_vue_ = __webpack_require__(7); var external_vue_default = /*#__PURE__*/__webpack_require__.n(external_vue_); // CONCATENATED MODULE: ./packages/color-picker/src/draggable.js var isDragging = false; /* harmony default export */ var draggable = (function (element, options) { if (external_vue_default.a.prototype.$isServer) return; var moveFn = function moveFn(event) { if (options.drag) { options.drag(event); } }; var upFn = function upFn(event) { document.removeEventListener('mousemove', moveFn); document.removeEventListener('mouseup', upFn); document.onselectstart = null; document.ondragstart = null; isDragging = false; if (options.end) { options.end(event); } }; element.addEventListener('mousedown', function (event) { if (isDragging) return; document.onselectstart = function () { return false; }; document.ondragstart = function () { return false; }; document.addEventListener('mousemove', moveFn); document.addEventListener('mouseup', upFn); isDragging = true; if (options.start) { options.start(event); } }); }); // CONCATENATED MODULE: ./node_modules/_babel-loader@7.1.5@babel-loader/lib!./node_modules/_vue-loader@15.7.1@vue-loader/lib??vue-loader-options!./packages/color-picker/src/components/sv-panel.vue?vue&type=script&lang=js& // // // // // // // // // // // // // // // // // /* harmony default export */ var sv_panelvue_type_script_lang_js_ = ({ name: 'el-sl-panel', props: { color: { required: true } }, computed: { colorValue: function colorValue() { var hue = this.color.get('hue'); var value = this.color.get('value'); return {hue: hue, value: value}; } }, watch: { colorValue: function colorValue() { this.update(); } }, methods: { update: function update() { var saturation = this.color.get('saturation'); var value = this.color.get('value'); var el = this.$el; var width = el.clientWidth, height = el.clientHeight; this.cursorLeft = saturation * width / 100; this.cursorTop = (100 - value) * height / 100; this.background = 'hsl(' + this.color.get('hue') + ', 100%, 50%)'; }, handleDrag: function handleDrag(event) { var el = this.$el; var rect = el.getBoundingClientRect(); var left = event.clientX - rect.left; var top = event.clientY - rect.top; left = Math.max(0, left); left = Math.min(left, rect.width); top = Math.max(0, top); top = Math.min(top, rect.height); this.cursorLeft = left; this.cursorTop = top; this.color.set({ saturation: left / rect.width * 100, value: 100 - top / rect.height * 100 }); } }, mounted: function mounted() { var _this = this; draggable(this.$el, { drag: function drag(event) { _this.handleDrag(event); }, end: function end(event) { _this.handleDrag(event); } }); this.update(); }, data: function data() { return { cursorTop: 0, cursorLeft: 0, background: 'hsl(0, 100%, 50%)' }; } }); // CONCATENATED MODULE: ./packages/color-picker/src/components/sv-panel.vue?vue&type=script&lang=js& /* harmony default export */ var components_sv_panelvue_type_script_lang_js_ = (sv_panelvue_type_script_lang_js_); // EXTERNAL MODULE: ./node_modules/_vue-loader@15.7.1@vue-loader/lib/runtime/componentNormalizer.js var componentNormalizer = __webpack_require__(0); // CONCATENATED MODULE: ./packages/color-picker/src/components/sv-panel.vue /* normalize component */ var component = Object(componentNormalizer["a" /* default */])( components_sv_panelvue_type_script_lang_js_, sv_panelvue_type_template_id_d8583596_render, sv_panelvue_type_template_id_d8583596_staticRenderFns, false, null, null, null ) /* hot reload */ if (false) { var api; } component.options.__file = "packages/color-picker/src/components/sv-panel.vue" /* harmony default export */ var sv_panel = (component.exports); // CONCATENATED MODULE: ./node_modules/_vue-loader@15.7.1@vue-loader/lib/loaders/templateLoader.js??vue-loader-options!./node_modules/_vue-loader@15.7.1@vue-loader/lib??vue-loader-options!./packages/color-picker/src/components/hue-slider.vue?vue&type=template&id=5cdc43b1& var hue_slidervue_type_template_id_5cdc43b1_render = function () { var _vm = this var _h = _vm.$createElement var _c = _vm._self._c || _h return _c( "div", { staticClass: "el-color-hue-slider", class: {"is-vertical": _vm.vertical} }, [ _c("div", { ref: "bar", staticClass: "el-color-hue-slider__bar", on: {click: _vm.handleClick} }), _c("div", { ref: "thumb", staticClass: "el-color-hue-slider__thumb", style: { left: _vm.thumbLeft + "px", top: _vm.thumbTop + "px" } }) ] ) } var hue_slidervue_type_template_id_5cdc43b1_staticRenderFns = [] hue_slidervue_type_template_id_5cdc43b1_render._withStripped = true // CONCATENATED MODULE: ./packages/color-picker/src/components/hue-slider.vue?vue&type=template&id=5cdc43b1& // CONCATENATED MODULE: ./node_modules/_babel-loader@7.1.5@babel-loader/lib!./node_modules/_vue-loader@15.7.1@vue-loader/lib??vue-loader-options!./packages/color-picker/src/components/hue-slider.vue?vue&type=script&lang=js& // // // // // // // // // // // // // /* harmony default export */ var hue_slidervue_type_script_lang_js_ = ({ name: 'el-color-hue-slider', props: { color: { required: true }, vertical: Boolean }, data: function data() { return { thumbLeft: 0, thumbTop: 0 }; }, computed: { hueValue: function hueValue() { var hue = this.color.get('hue'); return hue; } }, watch: { hueValue: function hueValue() { this.update(); } }, methods: { handleClick: function handleClick(event) { var thumb = this.$refs.thumb; var target = event.target; if (target !== thumb) { this.handleDrag(event); } }, handleDrag: function handleDrag(event) { var rect = this.$el.getBoundingClientRect(); var thumb = this.$refs.thumb; var hue = void 0; if (!this.vertical) { var left = event.clientX - rect.left; left = Math.min(left, rect.width - thumb.offsetWidth / 2); left = Math.max(thumb.offsetWidth / 2, left); hue = Math.round((left - thumb.offsetWidth / 2) / (rect.width - thumb.offsetWidth) * 360); } else { var top = event.clientY - rect.top; top = Math.min(top, rect.height - thumb.offsetHeight / 2); top = Math.max(thumb.offsetHeight / 2, top); hue = Math.round((top - thumb.offsetHeight / 2) / (rect.height - thumb.offsetHeight) * 360); } this.color.set('hue', hue); }, getThumbLeft: function getThumbLeft() { if (this.vertical) return 0; var el = this.$el; var hue = this.color.get('hue'); if (!el) return 0; var thumb = this.$refs.thumb; return Math.round(hue * (el.offsetWidth - thumb.offsetWidth / 2) / 360); }, getThumbTop: function getThumbTop() { if (!this.vertical) return 0; var el = this.$el; var hue = this.color.get('hue'); if (!el) return 0; var thumb = this.$refs.thumb; return Math.round(hue * (el.offsetHeight - thumb.offsetHeight / 2) / 360); }, update: function update() { this.thumbLeft = this.getThumbLeft(); this.thumbTop = this.getThumbTop(); } }, mounted: function mounted() { var _this = this; var _$refs = this.$refs, bar = _$refs.bar, thumb = _$refs.thumb; var dragConfig = { drag: function drag(event) { _this.handleDrag(event); }, end: function end(event) { _this.handleDrag(event); } }; draggable(bar, dragConfig); draggable(thumb, dragConfig); this.update(); } }); // CONCATENATED MODULE: ./packages/color-picker/src/components/hue-slider.vue?vue&type=script&lang=js& /* harmony default export */ var components_hue_slidervue_type_script_lang_js_ = (hue_slidervue_type_script_lang_js_); // CONCATENATED MODULE: ./packages/color-picker/src/components/hue-slider.vue /* normalize component */ var hue_slider_component = Object(componentNormalizer["a" /* default */])( components_hue_slidervue_type_script_lang_js_, hue_slidervue_type_template_id_5cdc43b1_render, hue_slidervue_type_template_id_5cdc43b1_staticRenderFns, false, null, null, null ) /* hot reload */ if (false) { var hue_slider_api; } hue_slider_component.options.__file = "packages/color-picker/src/components/hue-slider.vue" /* harmony default export */ var hue_slider = (hue_slider_component.exports); // CONCATENATED MODULE: ./node_modules/_vue-loader@15.7.1@vue-loader/lib/loaders/templateLoader.js??vue-loader-options!./node_modules/_vue-loader@15.7.1@vue-loader/lib??vue-loader-options!./packages/color-picker/src/components/alpha-slider.vue?vue&type=template&id=068c66cb& var alpha_slidervue_type_template_id_068c66cb_render = function () { var _vm = this var _h = _vm.$createElement var _c = _vm._self._c || _h return _c( "div", { staticClass: "el-color-alpha-slider", class: {"is-vertical": _vm.vertical} }, [ _c("div", { ref: "bar", staticClass: "el-color-alpha-slider__bar", style: { background: _vm.background }, on: {click: _vm.handleClick} }), _c("div", { ref: "thumb", staticClass: "el-color-alpha-slider__thumb", style: { left: _vm.thumbLeft + "px", top: _vm.thumbTop + "px" } }) ] ) } var alpha_slidervue_type_template_id_068c66cb_staticRenderFns = [] alpha_slidervue_type_template_id_068c66cb_render._withStripped = true // CONCATENATED MODULE: ./packages/color-picker/src/components/alpha-slider.vue?vue&type=template&id=068c66cb& // CONCATENATED MODULE: ./node_modules/_babel-loader@7.1.5@babel-loader/lib!./node_modules/_vue-loader@15.7.1@vue-loader/lib??vue-loader-options!./packages/color-picker/src/components/alpha-slider.vue?vue&type=script&lang=js& // // // // // // // // // // // // // // // // // // // /* harmony default export */ var alpha_slidervue_type_script_lang_js_ = ({ name: 'el-color-alpha-slider', props: { color: { required: true }, vertical: Boolean }, watch: { 'color._alpha': function color_alpha() { this.update(); }, 'color.value': function colorValue() { this.update(); } }, methods: { handleClick: function handleClick(event) { var thumb = this.$refs.thumb; var target = event.target; if (target !== thumb) { this.handleDrag(event); } }, handleDrag: function handleDrag(event) { var rect = this.$el.getBoundingClientRect(); var thumb = this.$refs.thumb; if (!this.vertical) { var left = event.clientX - rect.left; left = Math.max(thumb.offsetWidth / 2, left); left = Math.min(left, rect.width - thumb.offsetWidth / 2); this.color.set('alpha', Math.round((left - thumb.offsetWidth / 2) / (rect.width - thumb.offsetWidth) * 100)); } else { var top = event.clientY - rect.top; top = Math.max(thumb.offsetHeight / 2, top); top = Math.min(top, rect.height - thumb.offsetHeight / 2); this.color.set('alpha', Math.round((top - thumb.offsetHeight / 2) / (rect.height - thumb.offsetHeight) * 100)); } }, getThumbLeft: function getThumbLeft() { if (this.vertical) return 0; var el = this.$el; var alpha = this.color._alpha; if (!el) return 0; var thumb = this.$refs.thumb; return Math.round(alpha * (el.offsetWidth - thumb.offsetWidth / 2) / 100); }, getThumbTop: function getThumbTop() { if (!this.vertical) return 0; var el = this.$el; var alpha = this.color._alpha; if (!el) return 0; var thumb = this.$refs.thumb; return Math.round(alpha * (el.offsetHeight - thumb.offsetHeight / 2) / 100); }, getBackground: function getBackground() { if (this.color && this.color.value) { var _color$toRgb = this.color.toRgb(), r = _color$toRgb.r, g = _color$toRgb.g, b = _color$toRgb.b; return 'linear-gradient(to right, rgba(' + r + ', ' + g + ', ' + b + ', 0) 0%, rgba(' + r + ', ' + g + ', ' + b + ', 1) 100%)'; } return null; }, update: function update() { this.thumbLeft = this.getThumbLeft(); this.thumbTop = this.getThumbTop(); this.background = this.getBackground(); } }, data: function data() { return { thumbLeft: 0, thumbTop: 0, background: null }; }, mounted: function mounted() { var _this = this; var _$refs = this.$refs, bar = _$refs.bar, thumb = _$refs.thumb; var dragConfig = { drag: function drag(event) { _this.handleDrag(event); }, end: function end(event) { _this.handleDrag(event); } }; draggable(bar, dragConfig); draggable(thumb, dragConfig); this.update(); } }); // CONCATENATED MODULE: ./packages/color-picker/src/components/alpha-slider.vue?vue&type=script&lang=js& /* harmony default export */ var components_alpha_slidervue_type_script_lang_js_ = (alpha_slidervue_type_script_lang_js_); // CONCATENATED MODULE: ./packages/color-picker/src/components/alpha-slider.vue /* normalize component */ var alpha_slider_component = Object(componentNormalizer["a" /* default */])( components_alpha_slidervue_type_script_lang_js_, alpha_slidervue_type_template_id_068c66cb_render, alpha_slidervue_type_template_id_068c66cb_staticRenderFns, false, null, null, null ) /* hot reload */ if (false) { var alpha_slider_api; } alpha_slider_component.options.__file = "packages/color-picker/src/components/alpha-slider.vue" /* harmony default export */ var alpha_slider = (alpha_slider_component.exports); // CONCATENATED MODULE: ./node_modules/_vue-loader@15.7.1@vue-loader/lib/loaders/templateLoader.js??vue-loader-options!./node_modules/_vue-loader@15.7.1@vue-loader/lib??vue-loader-options!./packages/color-picker/src/components/predefine.vue?vue&type=template&id=06e03093& var predefinevue_type_template_id_06e03093_render = function () { var _vm = this var _h = _vm.$createElement var _c = _vm._self._c || _h return _c("div", {staticClass: "el-color-predefine"}, [ _c( "div", {staticClass: "el-color-predefine__colors"}, _vm._l(_vm.rgbaColors, function (item, index) { return _c( "div", { key: _vm.colors[index], staticClass: "el-color-predefine__color-selector", class: {selected: item.selected, "is-alpha": item._alpha < 100}, on: { click: function ($event) { _vm.handleSelect(index) } } }, [_c("div", {style: {"background-color": item.value}})] ) }), 0 ) ]) } var predefinevue_type_template_id_06e03093_staticRenderFns = [] predefinevue_type_template_id_06e03093_render._withStripped = true // CONCATENATED MODULE: ./packages/color-picker/src/components/predefine.vue?vue&type=template&id=06e03093& // CONCATENATED MODULE: ./node_modules/_babel-loader@7.1.5@babel-loader/lib!./node_modules/_vue-loader@15.7.1@vue-loader/lib??vue-loader-options!./packages/color-picker/src/components/predefine.vue?vue&type=script&lang=js& // // // // // // // // // // // // // // // /* harmony default export */ var predefinevue_type_script_lang_js_ = ({ props: { colors: {type: Array, required: true}, color: {required: true} }, data: function data() { return { rgbaColors: this.parseColors(this.colors, this.color) }; }, methods: { handleSelect: function handleSelect(index) { this.color.fromString(this.colors[index]); }, parseColors: function parseColors(colors, color) { return colors.map(function (value) { var c = new src_color(); c.enableAlpha = true; c.format = 'rgba'; c.fromString(value); c.selected = c.value === color.value; return c; }); } }, watch: { '$parent.currentColor': function $parentCurrentColor(val) { var color = new src_color(); color.fromString(val); this.rgbaColors.forEach(function (item) { item.selected = color.compare(item); }); }, colors: function colors(newVal) { this.rgbaColors = this.parseColors(newVal, this.color); }, color: function color(newVal) { this.rgbaColors = this.parseColors(this.colors, newVal); } } }); // CONCATENATED MODULE: ./packages/color-picker/src/components/predefine.vue?vue&type=script&lang=js& /* harmony default export */ var components_predefinevue_type_script_lang_js_ = (predefinevue_type_script_lang_js_); // CONCATENATED MODULE: ./packages/color-picker/src/components/predefine.vue /* normalize component */ var predefine_component = Object(componentNormalizer["a" /* default */])( components_predefinevue_type_script_lang_js_, predefinevue_type_template_id_06e03093_render, predefinevue_type_template_id_06e03093_staticRenderFns, false, null, null, null ) /* hot reload */ if (false) { var predefine_api; } predefine_component.options.__file = "packages/color-picker/src/components/predefine.vue" /* harmony default export */ var predefine = (predefine_component.exports); // EXTERNAL MODULE: external "element-ui/lib/utils/vue-popper" var vue_popper_ = __webpack_require__(5); var vue_popper_default = /*#__PURE__*/__webpack_require__.n(vue_popper_); // EXTERNAL MODULE: external "element-ui/lib/mixins/locale" var locale_ = __webpack_require__(6); var locale_default = /*#__PURE__*/__webpack_require__.n(locale_); // EXTERNAL MODULE: external "element-ui/lib/input" var input_ = __webpack_require__(11); var input_default = /*#__PURE__*/__webpack_require__.n(input_); // EXTERNAL MODULE: external "element-ui/lib/button" var button_ = __webpack_require__(18); var button_default = /*#__PURE__*/__webpack_require__.n(button_); // CONCATENATED MODULE: ./node_modules/_babel-loader@7.1.5@babel-loader/lib!./node_modules/_vue-loader@15.7.1@vue-loader/lib??vue-loader-options!./packages/color-picker/src/components/picker-dropdown.vue?vue&type=script&lang=js& // // // // // // // // // // // // // // // // // // // // // // // // // // // // // // // // // // // // // // // // /* harmony default export */ var picker_dropdownvue_type_script_lang_js_ = ({ name: 'el-color-picker-dropdown', mixins: [vue_popper_default.a, locale_default.a], components: { SvPanel: sv_panel, HueSlider: hue_slider, AlphaSlider: alpha_slider, ElInput: input_default.a, ElButton: button_default.a, Predefine: predefine }, props: { color: { required: true }, showAlpha: Boolean, predefine: Array }, data: function data() { return { customInput: '' }; }, computed: { currentColor: function currentColor() { var parent = this.$parent; return !parent.value && !parent.showPanelColor ? '' : parent.color.value; } }, methods: { confirmValue: function confirmValue() { this.$emit('pick'); }, handleConfirm: function handleConfirm() { this.color.fromString(this.customInput); } }, mounted: function mounted() { this.$parent.popperElm = this.popperElm = this.$el; this.referenceElm = this.$parent.$el; }, watch: { showPopper: function showPopper(val) { var _this = this; if (val === true) { this.$nextTick(function () { var _$refs = _this.$refs, sl = _$refs.sl, hue = _$refs.hue, alpha = _$refs.alpha; sl && sl.update(); hue && hue.update(); alpha && alpha.update(); }); } }, currentColor: { immediate: true, handler: function handler(val) { this.customInput = val; } } } }); // CONCATENATED MODULE: ./packages/color-picker/src/components/picker-dropdown.vue?vue&type=script&lang=js& /* harmony default export */ var components_picker_dropdownvue_type_script_lang_js_ = (picker_dropdownvue_type_script_lang_js_); // CONCATENATED MODULE: ./packages/color-picker/src/components/picker-dropdown.vue /* normalize component */ var picker_dropdown_component = Object(componentNormalizer["a" /* default */])( components_picker_dropdownvue_type_script_lang_js_, picker_dropdownvue_type_template_id_06601625_render, picker_dropdownvue_type_template_id_06601625_staticRenderFns, false, null, null, null ) /* hot reload */ if (false) { var picker_dropdown_api; } picker_dropdown_component.options.__file = "packages/color-picker/src/components/picker-dropdown.vue" /* harmony default export */ var picker_dropdown = (picker_dropdown_component.exports); // EXTERNAL MODULE: external "element-ui/lib/utils/clickoutside" var clickoutside_ = __webpack_require__(12); var clickoutside_default = /*#__PURE__*/__webpack_require__.n(clickoutside_); // EXTERNAL MODULE: external "element-ui/lib/mixins/emitter" var emitter_ = __webpack_require__(4); var emitter_default = /*#__PURE__*/__webpack_require__.n(emitter_); // CONCATENATED MODULE: ./node_modules/_babel-loader@7.1.5@babel-loader/lib!./node_modules/_vue-loader@15.7.1@vue-loader/lib??vue-loader-options!./packages/color-picker/src/main.vue?vue&type=script&lang=js& // // // // // // // // // // // // // // // // // // // // // // // // // // // // // // // // /* harmony default export */ var mainvue_type_script_lang_js_ = ({ name: 'ElColorPicker', mixins: [emitter_default.a], props: { value: String, showAlpha: Boolean, colorFormat: String, disabled: Boolean, size: String, popperClass: String, predefine: Array }, inject: { elForm: { default: '' }, elFormItem: { default: '' } }, directives: {Clickoutside: clickoutside_default.a}, computed: { displayedColor: function displayedColor() { if (!this.value && !this.showPanelColor) { return 'transparent'; } return this.displayedRgb(this.color, this.showAlpha); }, _elFormItemSize: function _elFormItemSize() { return (this.elFormItem || {}).elFormItemSize; }, colorSize: function colorSize() { return this.size || this._elFormItemSize || (this.$ELEMENT || {}).size; }, colorDisabled: function colorDisabled() { return this.disabled || (this.elForm || {}).disabled; } }, watch: { value: function value(val) { if (!val) { this.showPanelColor = false; } else if (val && val !== this.color.value) { this.color.fromString(val); } }, color: { deep: true, handler: function handler() { this.showPanelColor = true; } }, displayedColor: function displayedColor(val) { if (!this.showPicker) return; var currentValueColor = new src_color({ enableAlpha: this.showAlpha, format: this.colorFormat }); currentValueColor.fromString(this.value); var currentValueColorRgb = this.displayedRgb(currentValueColor, this.showAlpha); if (val !== currentValueColorRgb) { this.$emit('active-change', val); } } }, methods: { handleTrigger: function handleTrigger() { if (this.colorDisabled) return; this.showPicker = !this.showPicker; }, confirmValue: function confirmValue() { var value = this.color.value; this.$emit('input', value); this.$emit('change', value); this.dispatch('ElFormItem', 'el.form.change', value); this.showPicker = false; }, clearValue: function clearValue() { this.$emit('input', null); this.$emit('change', null); if (this.value !== null) { this.dispatch('ElFormItem', 'el.form.change', null); } this.showPanelColor = false; this.showPicker = false; this.resetColor(); }, hide: function hide() { this.showPicker = false; this.resetColor(); }, resetColor: function resetColor() { var _this = this; this.$nextTick(function (_) { if (_this.value) { _this.color.fromString(_this.value); } else { _this.showPanelColor = false; } }); }, displayedRgb: function displayedRgb(color, showAlpha) { if (!(color instanceof src_color)) { throw Error('color should be instance of Color Class'); } var _color$toRgb = color.toRgb(), r = _color$toRgb.r, g = _color$toRgb.g, b = _color$toRgb.b; return showAlpha ? 'rgba(' + r + ', ' + g + ', ' + b + ', ' + color.get('alpha') / 100 + ')' : 'rgb(' + r + ', ' + g + ', ' + b + ')'; } }, mounted: function mounted() { var value = this.value; if (value) { this.color.fromString(value); } this.popperElm = this.$refs.dropdown.$el; }, data: function data() { var color = new src_color({ enableAlpha: this.showAlpha, format: this.colorFormat }); return { color: color, showPicker: false, showPanelColor: false }; }, components: { PickerDropdown: picker_dropdown } }); // CONCATENATED MODULE: ./packages/color-picker/src/main.vue?vue&type=script&lang=js& /* harmony default export */ var src_mainvue_type_script_lang_js_ = (mainvue_type_script_lang_js_); // CONCATENATED MODULE: ./packages/color-picker/src/main.vue /* normalize component */ var main_component = Object(componentNormalizer["a" /* default */])( src_mainvue_type_script_lang_js_, render, staticRenderFns, false, null, null, null ) /* hot reload */ if (false) { var main_api; } main_component.options.__file = "packages/color-picker/src/main.vue" /* harmony default export */ var main = (main_component.exports); // CONCATENATED MODULE: ./packages/color-picker/index.js /* istanbul ignore next */ main.install = function (Vue) { Vue.component(main.name, main); }; /* harmony default export */ var color_picker = __webpack_exports__["default"] = (main); /***/ }), /***/ 6: /***/ (function (module, exports) { module.exports = require("element-ui/lib/mixins/locale"); /***/ }), /***/ 7: /***/ (function (module, exports) { module.exports = require("vue"); /***/ }) /******/ });
PypiClean
/BayNet-0.3.0.tar.gz/BayNet-0.3.0/baynet/utils/visualisation.py
from pathlib import Path import igraph import graphviz class GraphComparison(igraph.Graph): """Union of graph_a and graph_b, with edges assigned colours for plotting.""" # pylint: disable=not-an-iterable, unsupported-assignment-operation def __init__( self, graph_a: igraph.Graph, graph_b: igraph.Graph, nodes: list, a_not_b_col: str = "red", b_not_a_col: str = "blue", reversed_in_b_col: str = "green", both_col: str = "black", line_width: int = 2, ): """Create comparison graph.""" super().__init__( directed=True, vertex_attrs={'fontsize': None, 'fontname': None, 'label': None}, edge_attrs={'color': None, 'penwidth': None, 'style': None}, ) self.line_width = line_width self.add_vertices(nodes) self.vs['label'] = nodes self.vs['fontsize'] = 30 self.vs['fontname'] = "Helvetica" self.add_edges(graph_a.edges & graph_b.edges) self.colour_uncoloured(both_col) self.add_edges(graph_a.edges.difference(graph_b.skeleton_edges)) self.colour_uncoloured(a_not_b_col) self.add_edges(graph_b.edges.difference(graph_a.skeleton_edges)) self.colour_uncoloured(b_not_a_col) self.add_edges( graph_a.edges.intersection(graph_b.skeleton_edges).difference( graph_a.edges.intersection(graph_b.edges) ) ) self.colour_uncoloured(reversed_in_b_col) def colour_uncoloured(self, colour: str) -> None: """Colour edges not yet given a colour.""" for edge in self.es: if edge['color'] is None: edge['color'] = colour if colour == "red": edge['style'] = "dashed" else: edge['style'] = "solid" edge['penwidth'] = self.line_width def plot(self, path: Path = Path().parent / 'comparison.png') -> None: """Save a graphviz plot of comparison.""" draw_graph(self, path) def draw_graph( graph: igraph.Graph, save_path: Path = Path().parent / 'graph.png', ) -> None: """Save a graphviz plot of a given graph.""" temp_path = save_path.parent / 'temp.dot' with open(temp_path, 'w') as temp_file: graph.write_dot(temp_file) graphviz_source = graphviz.Source.from_file(temp_path) temp_path.unlink() with open(save_path, 'wb') as save_file: save_file.write(graphviz_source.pipe(format=save_path.suffix.strip('.')))
PypiClean
/ASGIWebDAV-1.3.2.tar.gz/ASGIWebDAV-1.3.2/asgi_webdav/cli.py
from logging import getLogger import click try: import uvicorn except ImportError: uvicorn = None from asgi_webdav.constants import AppEntryParameters, DevMode from asgi_webdav.server import convert_aep_to_uvicorn_kwargs logger = getLogger(__name__) def convert_click_kwargs_to_aep(kwargs: dict) -> AppEntryParameters: if kwargs.get("dev"): dev_mode = DevMode.DEV elif kwargs.get("litmus"): dev_mode = DevMode.LIMTUS else: dev_mode = None aep = AppEntryParameters( bind_host=kwargs["host"], bind_port=kwargs["port"], config_file=kwargs["config"], admin_user=kwargs["user"], root_path=kwargs["root_path"], dev_mode=dev_mode, logging_display_datetime=kwargs["logging_display_datetime"], logging_use_colors=kwargs["logging_display_datetime"], ) return aep @click.command("runserver", help="Run ASGI WebDAV server") @click.option( "-V", "--version", is_flag=True, default=False, help="Print version info and exit.", ) @click.option( "-H", "--host", default="127.0.0.1", help="Bind socket to this host. [default: 127.0.0.1]", ) @click.option( "-P", "--port", default=8000, help="Bind socket to this port. [default: 8000]" ) @click.option( "-c", "--config", default=None, help="Load configuration from file. [default: None]", ) @click.option( "-u", "--user", type=(str, str), default=None, help="Administrator username/password. [default: username password]", ) # @click.option( # "--anonymous", # is_flag=True, # default=False, # help="anonymous support", # ) @click.option( "-r", "--root-path", default=None, help="Mapping provider URI to path '/'. [default: None]", ) @click.option( "--logging-display-datetime/--logging-no-display-datetime", is_flag=True, default=True, help="Turn on datetime in logging", ) @click.option( "--logging-use-colors/--logging-no-use-colors", is_flag=True, default=True, help="Turn on color in logging", ) @click.option( "--dev", is_flag=True, default=False, help="Enter Development(for coding) mode, DON'T use it in production!", ) @click.option( "--litmus", is_flag=True, default=False, help="Enter Litmus(for test) mode, DON'T use it in production!", ) def main(**kwargs): if kwargs["version"]: from asgi_webdav import __version__ print(__version__) exit() if uvicorn is None: print( "Please install ASGI web server implementation first.\n" " eg: pip install -U ASGIWebDAV[uvicorn]" ) exit(1) aep = convert_click_kwargs_to_aep(kwargs) kwargs = convert_aep_to_uvicorn_kwargs(aep) logger.debug(f"uvicorn's kwargs:{kwargs}") return uvicorn.run(**kwargs)
PypiClean
/Cubane-1.0.11.tar.gz/Cubane-1.0.11/cubane/cms/templatetags/cms_tags.py
from __future__ import unicode_literals from django.conf import settings from django.http import HttpResponseRedirect from django import template from django.template import Context from django.contrib import messages from django.template.loader import TemplateDoesNotExist from django.template.defaultfilters import slugify from django.utils.safestring import mark_safe from django.utils.html import format_html, escape from django.core.paginator import Page as PaginatorPage from cubane.lib.templatetags import * from cubane.lib.html import transpose_html_headlines from cubane.lib.html import cleanup_html from cubane.lib.app import model_to_hash from cubane.lib.acl import Acl from cubane.lib.template import get_template from cubane.cms.forms import MailChimpSubscriptionForm from cubane.media.views import load_images_for_content from cubane.cms.views import get_page_links_from_content from cubane.cms.views import get_page_links_from_page from cubane.cms.views import get_cms from cubane.cms.views import get_cms_settings from mailsnake import MailSnake import re import copy register = template.Library() # old (deprecated) google analytics (ga.js) GOOGLE_ANALYTICS_SNIPPET = """<script>var _gaq=_gaq||[];_gaq.push(['_setAccount','%s']);_gaq.push(['_trackPageview']);(function(){var ga=document.createElement('script');ga.type='text/javascript';ga.async=true;ga.src=('https:'==document.location.protocol?'https://ssl':'http://www')+'.google-analytics.com/ga.js';var s=document.getElementsByTagName('script')[0];s.parentNode.insertBefore(ga, s);})();</script>""" # universal google analytics (analytics.js) GOOGLE_ANALYTICS_UNIVERSAL_SNIPPET = """<script>(function(i,s,o,g,r,a,m){i['GoogleAnalyticsObject']=r;i[r]=i[r]||function(){(i[r].q=i[r].q||[]).push(arguments)},i[r].l=1*new Date();a=s.createElement(o),m=s.getElementsByTagName(o)[0];a.async=1;a.src=g;m.parentNode.insertBefore(a,m) })(window,document,'script','https://www.google-analytics.com/analytics.js','ga');ga('create','%s','auto');ga('send','pageview');</script>""" GOOGLE_ANALYTICS_UNIVERSAL_SNIPPET_ASYNC = """<script>window.ga=window.ga||function(){(ga.q=ga.q||[]).push(arguments)};ga.l=+new Date;ga('create', '%s', 'auto');ga('send', 'pageview');</script><script async src='https://www.google-analytics.com/analytics.js'></script>""" # universal google analytics for ecommerce GOOGLE_ANALYTICS_ECOMMERCE_SNIPPET = """<script>(function(i,s,o,g,r,a,m){i['GoogleAnalyticsObject']=r;i[r]=i[r]||function(){(i[r].q=i[r].q||[]).push(arguments)},i[r].l=1*new Date();a=s.createElement(o),m=s.getElementsByTagName(o)[0];a.async=1;a.src=g;m.parentNode.insertBefore(a,m)})(window,document,'script','https://www.google-analytics.com/analytics.js','ga');ga('create', '%s');ga('require', 'ec');</script>""" GOOGLE_ANALYTICS_ECOMMERCE_SNIPPET_ASYNC = """<script>window.ga=window.ga||function(){(ga.q=ga.q||[]).push(arguments)};ga.l=+new Date;ga('create', '%s', 'auto');ga('require', 'ec');</script><script async src='https://www.google-analytics.com/analytics.js'></script>""" # other GOOGLE_ANALYTICS_UNIVERSAL_WITH_HASH_LOCATION_SNIPPET = """<script>(function(i,s,o,g,r,a,m){i['GoogleAnalyticsObject']=r;i[r]=i[r]||function(){(i[r].q=i[r].q||[]).push(arguments)},i[r].l=1*new Date();a=s.createElement(o),m=s.getElementsByTagName(o)[0];a.async=1;a.src=g;m.parentNode.insertBefore(a,m)})(window,document,'script','https://www.google-analytics.com/analytics.js','ga');ga('create','%s','auto');ga('send','pageview',{'page':location.pathname+location.search+location.hash});window.onhashchange=function(){ga('send','pageview',{'page':location.pathname+location.search+location.hash});};</script>""" TWITTER_WIDGET_ID = """<a class="twitter-timeline" href="https://twitter.com/twitterapi" data-widget-id="%s">Tweets by %s</a><script>!function(d,s,id){var js,fjs=d.getElementsByTagName(s)[0];if(!d.getElementById(id)){js=d.createElement(s);js.id=id;js.src="//platform.twitter.com/widgets.js";fjs.parentNode.insertBefore(js,fjs);}}(document,"script","twitter-wjs");</script>""" def get_attr(s, attrname): """ Return XML attribute value within given string s of given attribute name. """ m = re.search(r'%s="(.*?)"' % attrname, s) if m: return m.group(1) elif 'cubane' in attrname: # for legacy reasons, also support data-ikit-... attributes attrname = attrname.replace('data-cubane-', 'data-ikit-') m = re.search(r'%s="(.*?)"' % attrname, s) if m: return m.group(1) # not found return '' def set_attr(s, attrname, value): """ Set XML attribute value of given attribute in given string s. """ return re.sub(r'%s="(.*?)"' % attrname, '%s="%s"' % (attrname, value), s) def rewrite_images(content, images, render_image, noscript=False, image_shape=settings.DEFAULT_IMAGE_SHAPE): """ Rewrite img tags to the responsive format for fast responsive websites. """ if image_shape not in settings.IMAGE_SHAPES: image_shape = 'original' def rewrite_image(match): s = match.group(1) _id = get_attr(s, 'data-cubane-media-id') width = get_attr(s, 'data-width') size = get_attr(s, 'data-cubane-media-size') style = get_attr(s, 'style') lightbox = get_attr(s, 'data-cubane-lightbox') == 'true' # only generate code for lightbox if we have cubane.lightbox # installed... if 'cubane.lightbox' not in settings.INSTALLED_APPS: lightbox = False # remove width if we are in auto size mode... if size == 'auto': width = None try: _id = int(_id) image = images.get(_id, None) if image: return render_image(image, shape=image_shape, width=width, style=style, lightbox=lightbox, noscript=noscript) except ValueError: pass return match.group(0) return re.sub(r'<img(.*?\/?)>', rewrite_image, content ) def rewrite_image_references(content): """ Rewrites image content that might refer to an outdated image version, since the original image has been re-uploaded which might have been changed the version number. """ if content is None: return content # collect all media identifiers images = load_images_for_content(content) def rewrite_image(match): s = match.group(0) _id = get_attr(s, 'data-cubane-media-id') try: _id = int(_id) image = images.get(_id, None) if image: s = set_attr(s, 'src', image.url) except ValueError: pass return s return re.sub(r'<img(.*?\/?)>', rewrite_image, content ) def rewrite_page_links(content, page_links): """ Rewrite given cms slot content by replacing any page link references in the form #link[type:id] into the corresponding actual URL of the references entity bby using the page link data structure provided. """ def rewrite_page_link(match): s = match.group(1) m = re.match(r'(\w+):(\d+)', s) if m: _type = m.group(1) _id = m.group(2) items = page_links.get(_type, {}) obj = items.get(_id) if obj and hasattr(obj, 'url'): s = obj.url elif obj and hasattr(obj, 'get_absolute_url'): s = obj.get_absolute_url() else: if settings.DEBUG: raise ValueError('Unable to resolve page link #link[%s:%s].' % ( _type, _id )) else: s = '' return s return re.sub(r'#link\[(.*?)\]', rewrite_page_link, content) def render_meta_tag(name, value): """ Render a meta tag with the given name and value if a value is defined. """ if value: return format_html('<meta name="{}" content="{}" />', name, value) else: return '' def get_edit_reference(request, instance, property_names, help_text=None, shortcut=False): """ Return reference information about editing the given list of properties for the given object instance. """ if settings.CUBANE_FRONTEND_EDITING: if request is not None and request.user is not None and (request.user.is_staff or request.user.is_superuser): if instance is not None and property_names: # make sure that the user has access to the instance if Acl.of(instance.__class__).can_edit_instance(request, instance): # try to extract help text from form if help_text is None and len(property_names) == 1: if hasattr(instance.__class__, 'get_form'): form = instance.__class__.get_form() if form: field = form.declared_fields.get(property_names[0]) if field: help_text = field.help_text # construct reference return '%s%s|%s|%s%s' % ( '!' if shortcut else '', model_to_hash(instance.__class__), instance.pk, ':'.join(property_names), '|%s' % help_text if help_text else '' ) return None class SlotNode(template.Node): def __init__(self, slotname, headline_transpose=0, image_shape=None): self.slotname = slotname self.headline_transpose = headline_transpose self.image_shape = image_shape def render(self, context): """ Render slot content. """ slotname = value_or_literal(self.slotname, context) headline_transpose = value_or_literal(self.headline_transpose, context) image_shape = value_or_literal(self.image_shape, context) page = value_or_none('page', context) child_page = value_or_none('child_page', context) preview = value_or_default('cms_preview', context, False) noscript = value_or_default('noscript', context, False) images = context.get('images', {}) is_enquiry_template = value_or_default('is_enquiry_template', context, False) # make sure that this slot actually exists if slotname not in settings.CMS_SLOTNAMES: return template_error("Slot '%s' does not exist (referenced via %s)" % (slotname, self.slotname)) # switch page to child_page if present if child_page: page = child_page # extract correct content from page based on the slotname provided if page: content = page.get_slot_content(slotname) else: content = '' # make sure that headline transpose is an integer we can work with try: headline_transpose = int(headline_transpose) except ValueError: headline_transpose = 0 # run through content pipeline cms = get_cms() request = context.get('request') content = cms.on_render_content_pipeline(request, content, context) # rewrite image url to use responsive lazy-load mechanism for images, # (we are not doing this in preview mode). if not preview and not is_enquiry_template and 'cubane.media' in settings.INSTALLED_APPS: from cubane.media.templatetags.media_tags import render_image content = rewrite_images(content, images, render_image, noscript, image_shape) # page links if not preview: page_links = context.get('page_links', {}) content = rewrite_page_links(content, page_links) # transpose headlines content = transpose_html_headlines(content, headline_transpose) # cleanup markup content = cleanup_html(content) # mark content as safe content = mark_safe(content) # wrap content into a seperate slot container if we are configured # to do so... if settings.CMS_RENDER_SLOT_CONTAINER: content = '<div class="cms-slot-container">' + content + '</div>' # frontend editing? if settings.CUBANE_FRONTEND_EDITING: ref = get_edit_reference(request, page, ['slot_%s' % slotname]) if ref: content = '<div edit="%s">%s</div>' % (ref, content) # in preview mode, we wrap the content into a container, so that we # can identify the content in the backend and provide live-editing # preview capabilities... if preview: return '<div class="cms-slot" data-slotname="%s" data-headline-transpose="%d">%s</div>' % ( slotname, headline_transpose, content ) else: return content class ChildPagesNode(template.Node): def __init__(self, child_pages, child_page_slug): self.child_pages = child_pages self.child_page_slug = child_page_slug def render(self, context): """ Render list of child pages for the current page. """ def _get_post_template(prefix, slug): t = None template_filename = 'cubane/cms/%s/%s.html' % (prefix, slug) try: if slug: t = get_template(template_filename) except TemplateDoesNotExist: pass return t, template_filename page = value_or_none('page', context) child_page_slug = value_or_literal(self.child_page_slug, context) child_pages = None # get child pages or paged child pages if self.child_pages == 'child_pages': # default argument child_pages = value_or_none('paged_child_pages', context) if child_pages == None: child_pages = value_or_none(self.child_pages, context) if child_pages: # resolve template for rendering entities t, template_filename = _get_post_template('posts', child_page_slug) if t is None: t, template_filename = _get_post_template('child_pages', child_page_slug) # if we cannot find the template, tell the user about it if t == None: raise ValueError( ("Error rendering child page listing item for '%s'. " + "Unable to load template '%s'. Please make sure that " + "the template exists.") % ( child_page_slug, template_filename ) ) # inject child pages into copy of context for rendering the template # if child pages is a paginator page, inject paginator itself # into the template context d = { 'child_pages': child_pages, 'paginator': child_pages.paginator if isinstance(child_pages, PaginatorPage) else None } with context.push(**d): return t.render(context) else: return '' class MapNode(template.Node): def __init__(self, lat, lng, zoom, name, api_key): self.lat = lat self.lng = lng self.zoom = zoom self.name = name self.api_key = api_key def render(self, context): lat = value_or_none(self.lat, context) lng = value_or_none(self.lng, context) zoom = value_or_none(self.zoom, context) name = value_or_none(self.name, context) if lat and lng and zoom and name: return htmltag('div', { 'class': 'enquiry-map-canvas', 'data-lat': lat, 'data-lng': lng, 'data-zoom': zoom, 'data-title': name, 'data-key': self.api_key }) else: return '' class ContactMapNode(template.Node): def __init__(self, settings, api_key): self.api_key = api_key self.settings = settings def render(self, context): settings = value_or_none(self.settings, context) if settings: return htmltag('div', { 'class': 'enquiry-map-canvas', 'data-lat': settings.lat, 'data-lng': settings.lng, 'data-zoom': settings.zoom, 'data-title': settings.name, 'data-key': self.api_key }) else: return '' class EditNode(template.Node): """ Renders additional information that enables frontend editing. """ def __init__(self, reference, property_names, help_text, css_class=None, shortcut=False, nodelist=None): self.reference = reference self.property_names = property_names self.help_text = help_text self.css_class = css_class self.shortcut = shortcut self.nodelist = nodelist def render(self, context): if settings.CUBANE_FRONTEND_EDITING: request = value_or_none('request', context) property_names = value_or_literal(self.property_names, context) css_class = value_or_none(self.css_class, context) help_text = value_or_none(self.help_text, context) # resolve property names if property_names is None: instance, property_name, _value = resolve_object_property_reference(context, self.reference) property_names = [property_name] else: instance = value_or_none(self.reference, context) property_names = property_names.split(',') property_names = [p.strip() for p in property_names] property_names = filter(lambda x: x, property_names) # get edit reference ref = get_edit_reference(request, instance, property_names, help_text, self.shortcut) if ref: if self.nodelist is not None: inner_content = self.nodelist.render(context) return '<div edit="%s"%s>%s</div>' % ( ref, (' class="%s"' % css_class) if css_class else '', inner_content ) else: return ' edit="%s"' % ref return '' if self.nodelist is None else self.nodelist.render(context) @register.tag('slot') def slot(parser, token): """ Renders a cms slot with content from the current page, which is assumed to be in a template variable with the name 'page'. Syntax: {% slot <slotname> [<headline-transpose>] %} """ bits = token.split_contents() # slotname if len(bits) < 2: raise template.TemplateSyntaxError( "'%s' takes at least one argument: <slotname> [<headline-transpose>] [<image-shape>]" % bits[0] ) slotname = bits[1] # headline transpose headline_transpose = bits[2] if len(bits) >= 3 else '0' # image shape image_shape = bits[3] if len(bits) >= 4 else None return SlotNode(slotname, headline_transpose, image_shape) @register.tag('child_pages') def child_pages(parser, token): """ Renders a list of child pages, such as projects that belong to the current page. Syntax: {% child_pages [<child_pages>] [child_page_slug] %} """ bits = token.split_contents() entities = 'child_pages' entity_slug = 'child_page_slug' if len(bits) >= 2: entities = bits[1] if len(bits) == 3: entity_slug = bits[2] return ChildPagesNode(entities, entity_slug) @register.tag('posts') def posts(parser, token): """ Renders a list of posts, such as projects that belong to the current page. This is a replacement of the child_pages template tag, in order to replace the term child-page. Syntax: {% posts [<posts>] [post_slug] %} """ return child_pages(parser, token) @register.tag('contact_map') def contact_map(parser, token): """ Presents an interactive google map showing the location of the business according to settings. Syntax: {% contact_map [<settings>] %} """ bits = token.split_contents() return ContactMapNode(bits[1] if len(bits) == 2 else 'settings', settings.CUBANE_GOOGLE_MAP_API_KEY) @register.tag('map') def map(parser, token): """ Presents an interactive google map showing the location according to the values given. Syntax: {% contact_map <lat>, <lng>, <zoom>, <name> %} """ bits = token.split_contents() lat = bits[1] lng = bits[2] zoom = bits[3] name = bits[4] return MapNode(lat, lng, zoom, name, settings.CUBANE_GOOGLE_MAP_API_KEY) @register.simple_tag def image_tag(src, *args, **kwargs): """ This tag provides method for generating HTML image Syntax: {% image_tag <src> <args> %} """ image = '<img src="%s"' % escape(src) for key, value in kwargs.iteritems(): image += ' %s="%s"' % (escape(key), escape(value)) image += '>' return mark_safe(image) @register.simple_tag def link_tag(href, content, *args, **kwargs): """ This tag provides method for generating HTML link Syntax: {% link_tag <href> <name> <args> %} """ link = '<a href="%s"' % escape(href) for key, value in kwargs.iteritems(): link += ' %s="%s"' % (escape(key), escape(value)) link += '>%s</a>' % content return mark_safe(link) @register.simple_tag def social_tag(href, src): """ Render mixed link_tag and image_tag because django doesn't support nesting template tags Syntax: {% social_tag <href> <src> <link_args> <image_args> """ image = image_tag(src) social_link = link_tag(href, image) return mark_safe(social_link) @register.simple_tag(takes_context=True) def site_identification(context): """ Embeds various site identification keys, such as webmaster tools etc. """ settings = value_or_none('settings', context) s = '' if settings: s += render_meta_tag( 'google-site-verification', settings.webmaster_key ) s += render_meta_tag( 'globalsign-domain-verification', settings.globalsign_key ) return mark_safe(s) def get_google_analytics_key(context): """ Return the default google analytics integration key which is configured in cms settings for PRODUCTION mode. However, in DEBUG mode we rely on setting.DEBUG_GOOGLE_ANALYTICS. """ if settings.DEBUG: if settings.DEBUG_GOOGLE_ANALYTICS: return settings.DEBUG_GOOGLE_ANALYTICS else: return '' else: cms_settings = value_or_none('settings', context) if cms_settings and cms_settings.analytics_key: return cms_settings.analytics_key else: return '' def get_google_analytics_universal(context): """ Return code snippet for google analytics universal (async or sync). """ key = get_google_analytics_key(context) if settings.CUBANE_GOOGLE_ANALYTICS_ASYNC: return mark_safe(GOOGLE_ANALYTICS_UNIVERSAL_SNIPPET_ASYNC % key) else: return mark_safe(GOOGLE_ANALYTICS_UNIVERSAL_SNIPPET % key) def get_google_analytics_ecommerce(context): """ Return code snippet for google analytics universal for e-commerce (async or sync). """ key = get_google_analytics_key(context) if settings.CUBANE_GOOGLE_ANALYTICS_ASYNC: return mark_safe(GOOGLE_ANALYTICS_ECOMMERCE_SNIPPET_ASYNC % key) else: return mark_safe(GOOGLE_ANALYTICS_ECOMMERCE_SNIPPET % key) @register.simple_tag(takes_context=True) def google_analytics(context): """ Embeds google analytics tracking facility. """ return mark_safe(GOOGLE_ANALYTICS_SNIPPET % get_google_analytics_key(context)) @register.simple_tag(takes_context=True) def google_analytics_universal(context): """ Embeds google analytics tracking facility. """ key = get_google_analytics_key(context) cms_settings = value_or_none('settings', context) if cms_settings and cms_settings.analytics_hash_location: return mark_safe(GOOGLE_ANALYTICS_UNIVERSAL_WITH_HASH_LOCATION_SNIPPET % key) else: return get_google_analytics_universal(context) @register.simple_tag(takes_context=True) def google_analytics_ecommerce(context): """ Embeds google analytics tracking facility for ecommerce application. """ return get_google_analytics_ecommerce(context) @register.simple_tag(takes_context=True) def google_analytics_ecommerce_send(context): """ Send page impression for google analytics ecommernce. """ return mark_safe("<script>ga('send', 'pageview');</script>") @register.simple_tag(takes_context=True) def twitter_widget(context): """ Embeds twitter widget. """ settings = value_or_none('settings', context) if settings and settings.twitter_widget_id and settings.twitter_name: return mark_safe(TWITTER_WIDGET_ID % (settings.twitter_widget_id, settings.twitter_name)) else: return '' @register.simple_tag(takes_context=True) def social_media_links(context, social_id=None): """ Renders social media links. Usage {% social_media_links %} """ d = { 'social_id': social_id } with context.push(**d): return get_template('cubane/cms/social.html').render(context) @register.simple_tag(takes_context=True) def opening_times(context): """ Renders the opening times if enabled. Usage {% opening_times %} """ return get_template('cubane/cms/opening_times.html').render(context) @register.simple_tag(takes_context=True) def meta_title(context, page=None): """ Renders the meta title of the current page based on the current page's meta title or title (depending on what is available). The name of the website is appended at the end (unless it is already included within the page title or meta title). """ if page == None: page = context.get('current_page') cms_settings = context.get('settings') if page: if isinstance(page, basestring): title = page else: title = page.meta_title if title: title = title.strip() if cms_settings: if cms_settings.meta_name: meta_name = cms_settings.meta_name else: meta_name = cms_settings.name if meta_name: meta_name = meta_name.strip() if not title.endswith(meta_name): title += settings.CMS_META_TITLE_SEPARATOR + meta_name.strip() return title elif cms_settings and cms_settings.name: return cms_settings.name.strip() return '' @register.simple_tag(takes_context=True) def newsletter_signup_form(context): """ Renders a default nessletter signup form based on MailChimp. """ settings = value_or_none('settings', context) if not settings: raise ValueError("Expected 'settings' in template context.") if not settings.mailchimp_api or not settings.mailchimp_list_id: return '' request = value_or_none('request', context) if not request: raise ValueError("Expected 'request' in template context.") if request.method == 'POST': form = MailChimpSubscriptionForm(request.POST) else: form = MailChimpSubscriptionForm() msg = None msg_type = None if request.method == 'POST' and form.is_valid(): d = form.cleaned_data merge_vars = { 'FNAME': d.get('mailchimp_subscription__name', '') } ms = MailSnake(settings.mailchimp_api) try: ms.listSubscribe(id=settings.mailchimp_list_id, email_address=d['mailchimp_subscription__email'], merge_vars=merge_vars) msg = 'Almost finished...We need to confirm your email address. To complete the subscription process, please click the link in the email we just sent you.' msg_type = 'success' except: msg = 'Unfortunately we were unable to process your request. Please try again later...' msg_type = 'error' # render form t = get_template('cubane/cms/newsletter_form.html') c = copy.copy(context) c['form'] = form c['msg'] = msg c['msg_type'] = msg_type return t.render(c) @register.simple_tag(takes_context=True) def newsletter_signup_form_ajax(context): """ Renders a default nessletter signup form based on MailChimp. """ settings = value_or_none('settings', context) if not settings: raise ValueError("Expected 'settings' in template context.") if not settings.mailchimp_api or not settings.mailchimp_list_id: return '' request = value_or_none('request', context) if not request: raise ValueError("Expected 'request' in template context.") if request.method == 'POST': form = MailChimpSubscriptionForm(request.POST) else: form = MailChimpSubscriptionForm() form.fields['mailchimp_subscription__name'].required = False del form.fields['mailchimp_subscription__name'] # render form t = get_template('cubane/cms/newsletter_form.html') c = copy.copy(context) c['form'] = form return t.render(c) @register.simple_tag(takes_context=True) def cms_content(context, content, headline_transpose=0, image_shape=None): """ Renders cms content. """ if content is None: return '' # run through content pipeline cms = get_cms() request = context.get('request') content = cms.on_render_content_pipeline(request, content, context) # make sure that headline transpose is an integer we can work with try: headline_transpose = int(headline_transpose) except ValueError: headline_transpose = 0 preview = value_or_default('cms_preview', context, False) # lazy-loaded images (not in preview mode) if not preview and 'cubane.media' in settings.INSTALLED_APPS: from cubane.media.templatetags.media_tags import render_image images = context.get('images', {}) images = load_images_for_content(content, images) noscript = value_or_default('noscript', context, False) content = rewrite_images(content, images, render_image, noscript, image_shape) # page links if not preview: page_links = context.get('page_links', {}) page_links = get_page_links_from_content(content, preview) content = rewrite_page_links(content, page_links) # transpose headlines if headline_transpose > 0: content = transpose_html_headlines(content, headline_transpose) # cleanup markup content = cleanup_html(content) # frontend editing return mark_safe(content) def edit_or_compose(parser, token, shortcut=False): """ Edit or compose template tags: following format: {% edit object.property %} or {% edit object 'property' %} or {% edit object 'property1, property2...' %}" """ bits = token.split_contents() # usage tag_name = bits[0] if len(bits) < 2: raise template.TemplateSyntaxError( 'Usage: %s <reference_or_instance> [<property_names>] [<help_text>] [class=<class-name>]' % tag_name ) # extract keyword arguments args, kwargs = get_template_args(bits) # object/property reference reference = args[0] # optional argument: property names property_names = args[1] if len(args) >= 2 else None # optional argument: help text help_text = args[2] if len(args) >= 3 else None # optional class (kwarg) css_class = kwargs.get('class') # compose block tag? if tag_name == 'compose': nodelist = parser.parse(('endcompose',)) parser.delete_first_token() elif tag_name == 'compose!': nodelist = parser.parse(('endcompose!',)) parser.delete_first_token() else: nodelist = None # edit node return EditNode(reference, property_names, help_text, css_class, shortcut, nodelist) @register.tag('edit') def edit(parser, token): """ Inject additional hidden information that is used for frontend editing: {% edit object.property %} or {% edit object 'property' %} or {% edit object 'property1, property2...' %}" """ return edit_or_compose(parser, token) @register.tag('edit!') def edit_shortcut(parser, token): """ Inject additional hidden information that is used for frontend editing: {% edit! object.property %} or {% edit! object 'property' %} or {% edit! object 'property1, property2...' %}" """ return edit_or_compose(parser, token, shortcut=True) @register.tag('compose') def compose(parser, token): """ Inject additional hidden information that is used for frontend editing by wrapping the containing content into a separate 'div' tag: {% compose object.property %} ... {% endcompose %} """ return edit_or_compose(parser, token) @register.tag('compose!') def compose_shortcut(parser, token): """ Inject additional hidden information that is used for frontend editing by wrapping the containing content into a separate 'div' tag: {% compose! object.property %} ... {% endcompose %} """ return edit_or_compose(parser, token, shortcut=True) @register.simple_tag() def site_notification(): """ Render notification message if configured in settings. """ if settings.CUBANE_SITE_NOTIFICATION: cms_settings = get_cms_settings() if cms_settings.notification_enabled and cms_settings.notification_text: return mark_safe('<div class="cubane-notification-container" style="background-color: #b94a48; color: white; font-family: Arial, Helvetica, sans-serif; padding: 15px 0; margin: 0; line-height: 1.25em; font-size: 16px;"><div class="cubane-notification" style="max-width: 1200px; margin: 0 auto; padding: 0;">%s</div></div>' % cms_settings.notification_text) return ''
PypiClean
/FFC-2017.1.0.tar.gz/FFC-2017.1.0/ffc/quadrature/quadratureoptimization.py
from ufl.utils.sorting import sorted_by_key # FFC modules from ffc.log import info, error from ffc.cpp import format from ffc.quadrature.symbolics import optimise_code, BASIS, IP, GEO from ffc.quadrature.symbolics import create_product, create_sum, create_symbol, create_fraction def optimize_integral_ir(ir, parameters): "Compute optimized intermediate representation of integral." # FIXME: input argument "parameters" has been added to optimize_integral_ir # FIXME: which shadows a local parameter # Get integral type and optimization parameters integral_type = ir["integral_type"] parameters = ir["optimise_parameters"] # Check whether we should optimize if parameters["optimisation"]: # Get parameters integrals = ir["trans_integrals"] integral_type = ir["integral_type"] num_facets = ir["num_facets"] num_vertices = ir["num_vertices"] geo_consts = ir["geo_consts"] psi_tables_map = ir["psi_tables_map"] # Optimize based on integral type if integral_type == "cell": info("Optimising expressions for cell integral") if parameters["optimisation"] in ("precompute_ip_const", "precompute_basis_const"): _precompute_expressions(integrals, geo_consts, parameters["optimisation"]) else: _simplify_expression(integrals, geo_consts, psi_tables_map) elif integral_type == "exterior_facet": for i in range(num_facets): info("Optimising expressions for facet integral %d" % i) if parameters["optimisation"] in ("precompute_ip_const", "precompute_basis_const"): _precompute_expressions(integrals[i], geo_consts, parameters["optimisation"]) else: _simplify_expression(integrals[i], geo_consts, psi_tables_map) elif integral_type == "interior_facet": for i in range(num_facets): for j in range(num_facets): info("Optimising expressions for facet integral (%d, %d)" % (i, j)) if parameters["optimisation"] in ("precompute_ip_const", "precompute_basis_const"): _precompute_expressions(integrals[i][j], geo_consts, parameters["optimisation"]) else: _simplify_expression(integrals[i][j], geo_consts, psi_tables_map) elif integral_type == "vertex": for i in range(num_vertices): info("Optimising expressions for poin integral %d" % i) if parameters["optimisation"] in ("precompute_ip_const", "precompute_basis_const"): _precompute_expressions(integrals[i], geo_consts, parameters["optimisation"]) else: _simplify_expression(integrals[i], geo_consts, psi_tables_map) else: error("Unhandled domain type: " + str(integral_type)) return ir def _simplify_expression(integral, geo_consts, psi_tables_map): for points, terms, functions, ip_consts, coordinate, conditionals in integral: # NOTE: sorted is needed to pass the regression tests on the buildbots # but it might be inefficient for speed. # A solution could be to only compare the output of evaluating the # integral, not the header files. for loop, (data, entry_vals) in sorted_by_key(terms): t_set, u_weights, u_psi_tables, u_nzcs, basis_consts = data new_entry_vals = [] psi_tables = set() # NOTE: sorted is needed to pass the regression tests on the buildbots # but it might be inefficient for speed. # A solution could be to only compare the output of evaluating the # integral, not the header files. for entry, val, ops in sorted(entry_vals): value = optimise_code(val, ip_consts, geo_consts, t_set) # Check if value is zero if value.val: new_entry_vals.append((entry, value, value.ops())) psi_tables.update(set([psi_tables_map[b] for b in value.get_unique_vars(BASIS)])) terms[loop][0][2] = psi_tables terms[loop][1] = new_entry_vals def _precompute_expressions(integral, geo_consts, optimisation): for points, terms, functions, ip_consts, coordinate, conditionals in integral: for loop, (data, entry_vals) in sorted_by_key(terms): t_set, u_weights, u_psi_tables, u_nzcs, basis_consts = data new_entry_vals = [] for entry, val, ops in entry_vals: value = _extract_variables(val, basis_consts, ip_consts, geo_consts, t_set, optimisation) # Check if value is zero if value.val: new_entry_vals.append((entry, value, value.ops())) terms[loop][1] = new_entry_vals def _extract_variables(val, basis_consts, ip_consts, geo_consts, t_set, optimisation): f_G = format["geometry constant"] f_I = format["ip constant"] f_B = format["basis constant"] if val._prec == 0: return val elif val._prec == 1: if val.base_expr is None: return val new_base = _extract_variables(val.base_expr, basis_consts, ip_consts, geo_consts, t_set, optimisation) new_sym = create_symbol(val.v, val.t, new_base, val.base_op) if new_sym.t == BASIS: return _reduce_expression(new_sym, [], basis_consts, f_B, True) elif new_sym.t == IP: return _reduce_expression(new_sym, [], ip_consts, f_I, True) elif new_sym.t == GEO: return _reduce_expression(new_sym, [], geo_consts, f_G, True) # First handle child classes of product and sum. elif val._prec in (2, 3): new_vars = [] for v in val.vrs: new_vars.append(_extract_variables(v, basis_consts, ip_consts, geo_consts, t_set, optimisation)) if val._prec == 2: new_val = create_product(new_vars) if val._prec == 3: new_val = create_sum(new_vars) elif val._prec == 4: num = _extract_variables(val.num, basis_consts, ip_consts, geo_consts, t_set, optimisation) denom = _extract_variables(val.denom, basis_consts, ip_consts, geo_consts, t_set, optimisation) return create_fraction(num, denom) else: error("Unknown symbolic type: %s" % repr(val)) # Sort variables of product and sum. b_c, i_c, g_c = [], [], [] for v in new_val.vrs: if v.t == BASIS: if optimisation == "precompute_basis_const": b_c.append(v) elif v.t == IP: i_c.append(v) else: g_c.append(v) vrs = new_val.vrs[:] for v in g_c + i_c + b_c: vrs.remove(v) i_c.extend(_reduce_expression(new_val, g_c, geo_consts, f_G)) vrs.extend(_reduce_expression(new_val, i_c, ip_consts, f_I)) vrs.extend(_reduce_expression(new_val, b_c, basis_consts, f_B)) # print "b_c: " # for b in b_c: # print b # print "basis" # for k,v in basis_consts.items(): # print "k: ", k # print "v: ", v # print "geo" # for k,v in geo_consts.items(): # print "k: ", k # print "v: ", v # print "ret val: ", val if len(vrs) > 1: if new_val._prec == 2: new_object = create_product(vrs) elif new_val._prec == 3: new_object = create_sum(vrs) else: error("Must have product or sum here: %s" % repr(new_val)) if new_object.t == BASIS: if optimisation == "precompute_ip_const": return new_object elif optimisation == "precompute_basis_const": return _reduce_expression(new_object, [], basis_consts, f_B, True) elif new_object.t == IP: return _reduce_expression(new_object, [], ip_consts, f_I, True) elif new_object.t == GEO: return _reduce_expression(new_object, [], geo_consts, f_G, True) return vrs[0] # if new_val._prec == 2: # if len(vrs) > 1: # new_prod = create_product(vrs) # if new_prod.t == BASIS: # if optimisation == "precompute_ip_const": # return new_prod # elif optimisation == "precompute_basis_const": # return _reduce_expression(new_prod, [], basis_consts, f_B, True) # elif new_prod.t == IP: # return _reduce_expression(new_prod, [], ip_consts, f_I, True) # elif new_prod.t == GEO: # return _reduce_expression(new_prod, [], geo_consts, f_G, True) # return vrs[0] # elif new_val._prec == 3: # if len(vrs) > 1: # new_sum = create_sum(vrs) # if new_sum.t == BASIS: # return new_sum # return _reduce_expression(new_sum, [], basis_consts, f_B, True) # elif new_sum.t == IP: # return _reduce_expression(new_sum, [], ip_consts, f_I, True) # elif new_sum.t == GEO: # return _reduce_expression(new_sum, [], geo_consts, f_G, True) # return vrs[0] # else: # error("Must have product or sum here: %s" % repr(new_val)) def _reduce_expression(expr, symbols, const_dict, f_name, use_expr_type=False): if use_expr_type: if expr not in const_dict: const_dict[expr] = len(const_dict) return create_symbol(f_name(const_dict[expr]), expr.t) # Only something to be done if we have more than one symbol. if len(symbols) > 1: sym_type = symbols[0].t # Create new symbol. if expr._prec == 2: new_sym = create_product(symbols) elif expr._prec == 3: new_sym = create_sum(symbols) if new_sym not in const_dict: const_dict[new_sym] = len(const_dict) s = create_symbol(f_name(const_dict[new_sym]), sym_type) return [s] return symbols
PypiClean
/GPyM-0.60b.tar.gz/GPyM-0.60b/alien/GridCoordinates.py
import os,sys from numpy import arange, linspace, meshgrid, zeros, fromfile, concatenate from numpy import loadtxt, argmax from unique_counts import unique_counts def conv180to360(lon): ''' convert between -180~180 and 0~360 ''' if hasattr(lon,'__iter__'): return where(lon >= 0,array(lon), 360.+array(lon)) else: return lon if lon >=0 else 360+lon def conv360to180(lon): ''' convert between 0~360 and -180~180 ''' if hasattr(lon,'__iter__'): return where(lon >= 180,array(lon)-360., array(lon)) else: return lon-360. if lon >=180 else lon def nearest_idx(aSrc,val): ''' return nearest index ''' if hasattr(val,'__iter__'): return [abs(aSrc-v).argmin() for v in val] else: return abs(aSrc-val).argmin() def detect_map_direction(aSrc, yAxis=-2, xAxis=-1): ''' aSrc : 2d-array # only support global yet. ''' resY = 180./aSrc.shape[yAxis] resX = 360./aSrc.shape[xAxis] YsampleIdx = int(resY*30) unique_cnt_Y0 = unique_counts(aSrc[ YsampleIdx])[0] unique_cnt_Y1 = unique_counts(aSrc[-YsampleIdx])[0] print most_frq_val_Y1, most_frq_val_Y0 return class GridCoordinates(object): def __init__(self, mapCode, hres=None, vres=None, BBox=None): ''' mapCode = presets ['trip05','cru','u','v','^','n',...] #BBox=[[-90,90],[0,360]], res=1.0): ''' self.setup_grid(mapCode, hres, vres, BBox) def setup_grid(self, mapCode, hres=None, vres=None, BBox=None): for i,s in enumerate(mapCode): if s.isdigit(): break # find location of res. mapType, res = mapCode[:i],mapCode[i:] if i == len(mapCode)-1: # when res. not given mapType = mapCode # 1.0 degree assumed res = '1' res = float( res[0] + '.' + res[1:] ) # conv. res. to float hres = res vres = res if BBox == None: left, right = [-180.0, 180.0] if mapType in ['v','^'] else [0.0, 360.0] bottom, top = [90.0, -90.0] if mapType in ['v','n'] else [-90.0,90.0] BBox = [[left,right], [top,bottom]] else: bottom, left = BBox[0] top, right = BBox[1] hoff = hres/2. width = right-left nJ = width/hres voff = vres/2. if bottom < top else -vres/2. height = top-bottom if bottom < top else bottom-top nI = height/vres lon = linspace(left+hoff,right-hoff, nJ) lat = linspace(bottom+voff, top-voff, nI) self.mapType = mapType self.res = res self.vres = vres self.hres = hres self.BBox = BBox self.lat = lat self.lon = lon self.nI = nI self.nJ = nJ self.Lon, self.Lat = meshgrid(lon,lat) self.conv180to360 = conv180to360 self.conv360to180 = conv360to180 def get_idx(self, Y, X ,nearest=False, shift_lon=False): ''' X : Longitude(s) /* float or iterable */ Y : Latitude(s) /* float or iterable */ ''' if shift_lon == True: fnConv = self.conv360to180 if self.mapType in ['v','^'] else self.conv180to360 X = fnConv(X) if nearest == True: j = nearest_idx(self.lon,X) i = nearest_idx(self.lat,Y) else: lon = self.lon.tolist() lat = self.lat.tolist() j = [lon.index(x) for x in X] if hasattr(X,'__iter__') else lon.index(X) i = [lon.index(y) for y in Y] if hasattr(Y,'__iter__') else lat.index(Y) return i, j def get_crd(self, I, J): return self.lat[J], self.lon[I] def get_domain_idx(self, BBox, mode='nearest', shift_lon=False): ''' BBox : [ [south, west], [north, east] ] mode : [ 'nearest', 'exact', 'inner' ,'outter'] * both 'inner' and 'outer' include bounds * ''' [south, west], [north, east] = BBox nearest = False if mode == 'exact' else True llcr_idx = self.get_idx( south, west, nearest=nearest, shift_lon=shift_lon ) urcr_idx = self.get_idx( north, east, nearest=nearest, shift_lon=shift_lon ) sn_idx = [llcr_idx[0], urcr_idx[0]] we_idx = [llcr_idx[1], urcr_idx[1]] if self.mapType in ['n', 'v']: sn_idx = sn_idx[::-1] ####!!!! add treatment for 'inner' and 'outter' !!!!#### return [ [ sn_idx[0], we_idx[0] ], [ sn_idx[1], we_idx[1]] ] def get_domain_data(self, aSrc, BBox, mode='nearest', shift_lon=False): bbox_idx = self.get_domain_idx( BBox, mode=mode, shift_lon=shift_lon ) print 'bbox_idx', bbox_idx return aSrc[..., bbox_idx[0][0]:bbox_idx[1][0], bbox_idx[0][1]:bbox_idx[1][1]] def cut_domain(self, BBox, mode='nearest', shift_lon=False): return GridCoordinates( BBox ) def __repr__(self): sOut = '\n'.join( [self.mapType, # self.res, self.vres, # self.hres, # self.BBox, # self.lat, self.lon, # self.nI, # self.nJ, # self.Lon.shape, self.Lat.shape ] ) return sOut def main(*args): grid = GridCoordinates('u05') print '+'*80 grid = GridCoordinates('v') vasc = VASClimO('10') vasc(1951,2000) figure();imshow(vasc.data.mean(0));colorbar() print vasc.get_idx( 38.5, -0.5 ) print vasc.get_idx( 38.5, 359.5, shift_lon=True ) BBox = [[66.5,85.5],[70.5,170.5]] BBox = [[-10.5,10.5],[-60.5,-30.5]] aSrc = vasc.get_domain_data(vasc.data, BBox, shift_lon=True) print vasc.data.shape print vasc.yr.shape print aSrc.shape print vasc.data.max() figure();plot( ma.masked_equal( aSrc,-999).mean(-1).mean(-1) ) show() if __name__=='__main__': main(sys.argv)
PypiClean
/Flask-Statics-Helper-1.0.0.tar.gz/Flask-Statics-Helper-1.0.0/flask_statics/static/angular/i18n/angular-locale_ru-kg.js
'use strict'; angular.module("ngLocale", [], ["$provide", function($provide) { var PLURAL_CATEGORY = {ZERO: "zero", ONE: "one", TWO: "two", FEW: "few", MANY: "many", OTHER: "other"}; function getDecimals(n) { n = n + ''; var i = n.indexOf('.'); return (i == -1) ? 0 : n.length - i - 1; } function getVF(n, opt_precision) { var v = opt_precision; if (undefined === v) { v = Math.min(getDecimals(n), 3); } var base = Math.pow(10, v); var f = ((n * base) | 0) % base; return {v: v, f: f}; } $provide.value("$locale", { "DATETIME_FORMATS": { "AMPMS": [ "AM", "PM" ], "DAY": [ "\u0432\u043e\u0441\u043a\u0440\u0435\u0441\u0435\u043d\u044c\u0435", "\u043f\u043e\u043d\u0435\u0434\u0435\u043b\u044c\u043d\u0438\u043a", "\u0432\u0442\u043e\u0440\u043d\u0438\u043a", "\u0441\u0440\u0435\u0434\u0430", "\u0447\u0435\u0442\u0432\u0435\u0440\u0433", "\u043f\u044f\u0442\u043d\u0438\u0446\u0430", "\u0441\u0443\u0431\u0431\u043e\u0442\u0430" ], "MONTH": [ "\u044f\u043d\u0432\u0430\u0440\u044f", "\u0444\u0435\u0432\u0440\u0430\u043b\u044f", "\u043c\u0430\u0440\u0442\u0430", "\u0430\u043f\u0440\u0435\u043b\u044f", "\u043c\u0430\u044f", "\u0438\u044e\u043d\u044f", "\u0438\u044e\u043b\u044f", "\u0430\u0432\u0433\u0443\u0441\u0442\u0430", "\u0441\u0435\u043d\u0442\u044f\u0431\u0440\u044f", "\u043e\u043a\u0442\u044f\u0431\u0440\u044f", "\u043d\u043e\u044f\u0431\u0440\u044f", "\u0434\u0435\u043a\u0430\u0431\u0440\u044f" ], "SHORTDAY": [ "\u0432\u0441", "\u043f\u043d", "\u0432\u0442", "\u0441\u0440", "\u0447\u0442", "\u043f\u0442", "\u0441\u0431" ], "SHORTMONTH": [ "\u044f\u043d\u0432.", "\u0444\u0435\u0432\u0440.", "\u043c\u0430\u0440\u0442\u0430", "\u0430\u043f\u0440.", "\u043c\u0430\u044f", "\u0438\u044e\u043d\u044f", "\u0438\u044e\u043b\u044f", "\u0430\u0432\u0433.", "\u0441\u0435\u043d\u0442.", "\u043e\u043a\u0442.", "\u043d\u043e\u044f\u0431.", "\u0434\u0435\u043a." ], "fullDate": "EEEE, d MMMM y '\u0433'.", "longDate": "d MMMM y '\u0433'.", "medium": "d MMM y '\u0433'. H:mm:ss", "mediumDate": "d MMM y '\u0433'.", "mediumTime": "H:mm:ss", "short": "dd.MM.yy H:mm", "shortDate": "dd.MM.yy", "shortTime": "H:mm" }, "NUMBER_FORMATS": { "CURRENCY_SYM": "KGS", "DECIMAL_SEP": ",", "GROUP_SEP": "\u00a0", "PATTERNS": [ { "gSize": 3, "lgSize": 3, "maxFrac": 3, "minFrac": 0, "minInt": 1, "negPre": "-", "negSuf": "", "posPre": "", "posSuf": "" }, { "gSize": 3, "lgSize": 3, "maxFrac": 2, "minFrac": 2, "minInt": 1, "negPre": "-", "negSuf": "\u00a0\u00a4", "posPre": "", "posSuf": "\u00a0\u00a4" } ] }, "id": "ru-kg", "pluralCat": function(n, opt_precision) { var i = n | 0; var vf = getVF(n, opt_precision); if (vf.v == 0 && i % 10 == 1 && i % 100 != 11) { return PLURAL_CATEGORY.ONE; } if (vf.v == 0 && i % 10 >= 2 && i % 10 <= 4 && (i % 100 < 12 || i % 100 > 14)) { return PLURAL_CATEGORY.FEW; } if (vf.v == 0 && i % 10 == 0 || vf.v == 0 && i % 10 >= 5 && i % 10 <= 9 || vf.v == 0 && i % 100 >= 11 && i % 100 <= 14) { return PLURAL_CATEGORY.MANY; } return PLURAL_CATEGORY.OTHER;} }); }]);
PypiClean
/Auptimizer-2.0.tar.gz/Auptimizer-2.0/src/aup/Proposer/hpbandster/examples/example_4_cluster.py
import logging logging.basicConfig(level=logging.INFO) import argparse import pickle import time import hpbandster.core.nameserver as hpns import hpbandster.core.result as hpres from hpbandster.optimizers import BOHB as BOHB from hpbandster.examples.commons import MyWorker parser = argparse.ArgumentParser(description='Example 1 - sequential and local execution.') parser.add_argument('--min_budget', type=float, help='Minimum budget used during the optimization.', default=9) parser.add_argument('--max_budget', type=float, help='Maximum budget used during the optimization.', default=243) parser.add_argument('--n_iterations', type=int, help='Number of iterations performed by the optimizer', default=4) parser.add_argument('--n_workers', type=int, help='Number of workers to run in parallel.', default=2) parser.add_argument('--worker', help='Flag to turn this into a worker process', action='store_true') parser.add_argument('--run_id', type=str, help='A unique run id for this optimization run. An easy option is to use the job id of the clusters scheduler.') parser.add_argument('--nic_name',type=str, help='Which network interface to use for communication.') parser.add_argument('--shared_directory',type=str, help='A directory that is accessible for all processes, e.g. a NFS share.') args=parser.parse_args() # Every process has to lookup the hostname host = hpns.nic_name_to_host(args.nic_name) if args.worker: time.sleep(5) # short artificial delay to make sure the nameserver is already running w = MyWorker(sleep_interval = 0.5,run_id=args.run_id, host=host) w.load_nameserver_credentials(working_directory=args.shared_directory) w.run(background=False) exit(0) # Start a nameserver: # We now start the nameserver with the host name from above and a random open port (by setting the port to 0) NS = hpns.NameServer(run_id=args.run_id, host=host, port=0, working_directory=args.shared_directory) ns_host, ns_port = NS.start() # Most optimizers are so computationally inexpensive that we can affort to run a # worker in parallel to it. Note that this one has to run in the background to # not plock! w = MyWorker(sleep_interval = 0.5,run_id=args.run_id, host=host, nameserver=ns_host, nameserver_port=ns_port) w.run(background=True) # Run an optimizer # We now have to specify the host, and the nameserver information bohb = BOHB( configspace = MyWorker.get_configspace(), run_id = args.run_id, host=host, nameserver=ns_host, nameserver_port=ns_port, min_budget=args.min_budget, max_budget=args.max_budget ) res = bohb.run(n_iterations=args.n_iterations, min_n_workers=args.n_workers) # In a cluster environment, you usually want to store the results for later analysis. # One option is to simply pickle the Result object with open(os.path.join(args.shared_directory, 'results.pkl'), 'wb') as fh: pickle.dump(res, fh) # Step 4: Shutdown # After the optimizer run, we must shutdown the master and the nameserver. bohb.shutdown(shutdown_workers=True) NS.shutdown()
PypiClean
/Newsroom-1.0-py3-none-any.whl/newsroom/static/dist/notifications_js.dd3d41a0820ce20f4ee8.js
webpackJsonp([10],{ /***/ 14: /***/ (function(module, exports, __webpack_require__) { "use strict"; Object.defineProperty(exports, "__esModule", { value: true }); var _createClass = function () { function defineProperties(target, props) { for (var i = 0; i < props.length; i++) { var descriptor = props[i]; descriptor.enumerable = descriptor.enumerable || false; descriptor.configurable = true; if ("value" in descriptor) descriptor.writable = true; Object.defineProperty(target, descriptor.key, descriptor); } } return function (Constructor, protoProps, staticProps) { if (protoProps) defineProperties(Constructor.prototype, protoProps); if (staticProps) defineProperties(Constructor, staticProps); return Constructor; }; }(); function _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError("Cannot call a class as a function"); } } var defaultOptions = { credentials: 'same-origin' }; function options() { var custom = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : {}; return Object.assign({}, defaultOptions, custom); } function checkStatus(response) { if (response.status >= 200 && response.status < 300) { return response.json(); } else { var error = new Error(response.statusText); error.response = response; throw error; } } var Server = function () { function Server() { _classCallCheck(this, Server); } _createClass(Server, [{ key: 'get', /** * Make GET request * * @param {String} url * @return {Promise} */ value: function get(url) { return fetch(url, options({})).then(checkStatus); } /** * Make POST request to url * * @param {String} url * @param {Object} data * @return {Promise} */ }, { key: 'post', value: function post(url, data) { return fetch(url, options({ method: 'POST', headers: { 'Content-Type': 'application/json' }, body: JSON.stringify(data) })).then(checkStatus); } /** * Make POST request to url in keeps the format of the input * * @param {String} url * @param {Object} data * @return {Promise} */ }, { key: 'postFiles', value: function postFiles(url, data) { return fetch(url, options({ method: 'POST', body: data })).then(checkStatus); } /** * Make DELETE request to url * * @param {String} url * @return {Promise} */ }, { key: 'del', value: function del(url, data) { return fetch(url, options({ method: 'DELETE', headers: { 'Content-Type': 'application/json' }, body: data ? JSON.stringify(data) : null })).then(checkStatus); } }]); return Server; }(); exports.default = new Server(); /***/ }), /***/ 179: /***/ (function(module, exports, __webpack_require__) { "use strict"; Object.defineProperty(exports, "__esModule", { value: true }); exports.SET_BOOKMARKS_COUNT = exports.CLEAR_NOTIFICATION = exports.CLEAR_ALL_NOTIFICATIONS = exports.INIT_DATA = exports.NEW_NOTIFICATION = undefined; exports.newNotification = newNotification; exports.initData = initData; exports.clearAllNotifications = clearAllNotifications; exports.clearNotification = clearNotification; exports.deleteNotification = deleteNotification; exports.deleteAllNotifications = deleteAllNotifications; exports.pushNotification = pushNotification; var _utils = __webpack_require__(2); var _server = __webpack_require__(14); var _server2 = _interopRequireDefault(_server); function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; } var NEW_NOTIFICATION = exports.NEW_NOTIFICATION = 'NEW_NOTIFICATION'; function newNotification(notification) { return { type: NEW_NOTIFICATION, notification: notification }; } var INIT_DATA = exports.INIT_DATA = 'INIT_DATA'; function initData(data) { return { type: INIT_DATA, data: data }; } var CLEAR_ALL_NOTIFICATIONS = exports.CLEAR_ALL_NOTIFICATIONS = 'CLEAR_ALL_NOTIFICATIONS'; function clearAllNotifications() { return { type: CLEAR_ALL_NOTIFICATIONS }; } var CLEAR_NOTIFICATION = exports.CLEAR_NOTIFICATION = 'CLEAR_NOTIFICATION'; function clearNotification(id) { return { type: CLEAR_NOTIFICATION, id: id }; } /** * Deletes the given notification of the user * */ function deleteNotification(id) { return function (dispatch, getState) { var user = getState().user; var url = '/users/' + user + '/notifications/' + user + '_' + id; return _server2.default.del(url).then(function () { _utils.notify.success((0, _utils.gettext)('Notification cleared successfully')); dispatch(clearNotification(id)); }).catch(function (error) { return (0, _utils.errorHandler)(error, dispatch); }); }; } /** * Deletes all notifications for the user * */ function deleteAllNotifications() { return function (dispatch, getState) { var user = getState().user; var url = '/users/' + user + '/notifications'; return _server2.default.del(url).then(function () { _utils.notify.success((0, _utils.gettext)('Notifications cleared successfully')); dispatch(clearAllNotifications()); }).catch(function (error) { return (0, _utils.errorHandler)(error, dispatch); }); }; } /** * Handle server push notification * * @param {Object} data */ function pushNotification(push) { return function (dispatch, getState) { var user = getState().user; switch (push.event) { case 'history_matches': if (push.extra.users && push.extra.users.includes(getState().user)) { return dispatch(newNotification(push.extra)); } break; case 'bookmarks:' + user: return dispatch(setBookmarksCount(push.extra.count)); } }; } var SET_BOOKMARKS_COUNT = exports.SET_BOOKMARKS_COUNT = 'SET_BOOKMARKS_COUNT'; function setBookmarksCount(count) { return { type: SET_BOOKMARKS_COUNT, count: count }; } /***/ }), /***/ 2: /***/ (function(module, exports, __webpack_require__) { "use strict"; Object.defineProperty(exports, "__esModule", { value: true }); exports.notify = exports.now = undefined; exports.createStore = createStore; exports.render = render; exports.gettext = gettext; exports.getProductQuery = getProductQuery; exports.shortDate = shortDate; exports.getLocaleDate = getLocaleDate; exports.fullDate = fullDate; exports.formatTime = formatTime; exports.formatDate = formatDate; exports.getTextFromHtml = getTextFromHtml; exports.wordCount = wordCount; exports.toggleValue = toggleValue; exports.updateRouteParams = updateRouteParams; exports.formatHTML = formatHTML; exports.initWebSocket = initWebSocket; exports.errorHandler = errorHandler; exports.getConfig = getConfig; exports.getTimezoneOffset = getTimezoneOffset; exports.isTouchDevice = isTouchDevice; var _react = __webpack_require__(0); var _react2 = _interopRequireDefault(_react); var _lodash = __webpack_require__(7); var _reactRedux = __webpack_require__(6); var _redux = __webpack_require__(42); var _reduxLogger = __webpack_require__(47); var _reduxThunk = __webpack_require__(48); var _reduxThunk2 = _interopRequireDefault(_reduxThunk); var _reactDom = __webpack_require__(24); var _alertifyjs = __webpack_require__(49); var _alertifyjs2 = _interopRequireDefault(_alertifyjs); var _moment = __webpack_require__(3); var _moment2 = _interopRequireDefault(_moment); function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; } var now = exports.now = (0, _moment2.default)(); // to enable mocking in tests var TIME_FORMAT = getConfig('time_format'); var DATE_FORMAT = getConfig('date_format'); var DATETIME_FORMAT = TIME_FORMAT + ' ' + DATE_FORMAT; /** * Create redux store with default middleware * * @param {func} reducer * @return {Store} */ function createStore(reducer) { var logger = (0, _reduxLogger.createLogger)({ duration: true, collapsed: true, timestamp: false }); return (0, _redux.createStore)(reducer, (0, _redux.applyMiddleware)(_reduxThunk2.default, logger)); } /** * Render helper * * @param {Store} store * @param {Component} App * @param {Element} element */ function render(store, App, element) { return (0, _reactDom.render)(_react2.default.createElement( _reactRedux.Provider, { store: store }, _react2.default.createElement(App, null) ), element); } /** * Noop for now, but it's better to use it from beginning. * * It handles interpolation: * * gettext('Hello {{ name }}', {name: 'John'}); * * @param {String} text * @param {Object} params * @return {String} */ function gettext(text, params) { var translated = text; // temporary if (params) { Object.keys(params).forEach(function (param) { var paramRegexp = new RegExp('{{ ?' + param + ' ?}}', 'g'); translated = translated.replace(paramRegexp, params[param] || ''); }); } return translated; } /** * Returns query string query for a given product * * @param {Object} product * @return {string} */ function getProductQuery(product) { var q = product.sd_product_id ? 'products.code:' + product.sd_product_id : ''; q += product.query ? product.sd_product_id ? ' OR (' + product.query + ')' : product.query : ''; return q; } /** * Parse given date string and return Date instance * * @param {String} dateString * @return {Date} */ function parseDate(dateString) { return (0, _moment2.default)(dateString); } /** * Return date formatted for lists * * @param {String} dateString * @return {String} */ function shortDate(dateString) { var parsed = parseDate(dateString); return parsed.format(isToday(parsed) ? TIME_FORMAT : DATE_FORMAT); } /** * Return locale date * * @param {String} dateString * @return {String} */ function getLocaleDate(dateString) { return parseDate(dateString).format(DATETIME_FORMAT); } /** * Test if given day is today * * @param {Date} date * @return {Boolean} */ function isToday(date) { return date.format('YYYY-MM-DD') === now.format('YYYY-MM-DD'); } /** * Return full date representation * * @param {String} dateString * @return {String} */ function fullDate(dateString) { return parseDate(dateString).format(DATETIME_FORMAT); } /** * Format time of a date * * @param {String} dateString * @return {String} */ function formatTime(dateString) { return parseDate(dateString).format(TIME_FORMAT); } /** * Format date of a date (without time) * * @param {String} dateString * @return {String} */ function formatDate(dateString) { return parseDate(dateString).format(DATE_FORMAT); } /** * Wrapper for alertifyjs */ var notify = exports.notify = { success: function success(message) { return _alertifyjs2.default.success(message); }, error: function error(message) { return _alertifyjs2.default.error(message); } }; /** * Get text from html * * @param {string} html * @return {string} */ function getTextFromHtml(html) { var div = document.createElement('div'); div.innerHTML = formatHTML(html); var tree = document.createTreeWalker(div, NodeFilter.SHOW_TEXT, null, false); // ie requires all params var text = []; while (tree.nextNode()) { text.push(tree.currentNode.textContent); if (tree.currentNode.nextSibling) { switch (tree.currentNode.nextSibling.nodeName) { case 'BR': case 'HR': text.push('\n'); } continue; } switch (tree.currentNode.parentNode.nodeName) { case 'P': case 'LI': case 'H1': case 'H2': case 'H3': case 'H4': case 'H5': case 'DIV': case 'TABLE': case 'BLOCKQUOTE': text.push('\n'); } } return text.join(''); } /** * Get word count for given item * * @param {Object} item * @return {number} */ function wordCount(item) { if ((0, _lodash.isInteger)(item.word_count)) { return item.word_count; } if (!item.body_html) { return 0; } var text = getTextFromHtml(item.body_html); return text.split(' ').filter(function (x) { return x.trim(); }).length || 0; } /** * Toggle value within array * * returns a new array so can be used with setState * * @param {Array} items * @param {mixed} value * @return {Array} */ function toggleValue(items, value) { if (!items) { return [value]; } var without = items.filter(function (x) { return value !== x; }); return without.length === items.length ? without.concat([value]) : without; } function updateRouteParams(updates, state) { var params = new URLSearchParams(window.location.search); var dirty = false; Object.keys(updates).forEach(function (key) { if (updates[key]) { dirty = dirty || updates[key] !== params.get(key); params.set(key, updates[key]); } else { dirty = dirty || params.has(key) || params.entries.length == 0; params.delete(key); } }); if (dirty) { history.pushState(state, null, '?' + params.toString()); } } var SHIFT_OUT_REGEXP = new RegExp(String.fromCharCode(14), 'g'); /** * Replace some white characters in html * * @param {String} html * @return {String} */ function formatHTML(html) { return html.replace(SHIFT_OUT_REGEXP, html.indexOf('<pre>') === -1 ? '<br>' : '\n'); } /** * Initializes the web socket listener * @param store */ function initWebSocket(store, action) { if (window.newsroom) { var ws = new WebSocket(window.newsroom.websocket); ws.onmessage = function (message) { var data = JSON.parse(message.data); if (data.event) { store.dispatch(action(data)); } }; } } /** * Generic error handler for http requests * @param error * @param dispatch * @param setError */ function errorHandler(error, dispatch, setError) { console.error('error', error); if (error.response.status !== 400) { notify.error(error.response.statusText); return; } if (setError) { error.response.json().then(function (data) { dispatch(setError(data)); }); } } /** * Get config value * * @param {String} key * @param {Mixed} defaultValue * @return {Mixed} */ function getConfig(key, defaultValue) { return (0, _lodash.get)(window.newsroom, key, defaultValue); } function getTimezoneOffset() { return now.utcOffset() ? now.utcOffset() * -1 : 0; // it's oposite to Date.getTimezoneOffset } function isTouchDevice() { return 'ontouchstart' in window // works on most browsers || navigator.maxTouchPoints; // works on IE10/11 and Surface } /***/ }), /***/ 675: /***/ (function(module, exports, __webpack_require__) { "use strict"; var _utils = __webpack_require__(2); var _reducers = __webpack_require__(676); var _reducers2 = _interopRequireDefault(_reducers); var _NotificationsApp = __webpack_require__(677); var _NotificationsApp2 = _interopRequireDefault(_NotificationsApp); var _actions = __webpack_require__(179); function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; } var store = (0, _utils.createStore)(_reducers2.default); if (window.notificationData) { store.dispatch((0, _actions.initData)(window.notificationData)); } (0, _utils.render)(store, _NotificationsApp2.default, document.getElementById('header-notification')); (0, _utils.initWebSocket)(store, _actions.pushNotification); /***/ }), /***/ 676: /***/ (function(module, exports, __webpack_require__) { "use strict"; Object.defineProperty(exports, "__esModule", { value: true }); var _extends = Object.assign || function (target) { for (var i = 1; i < arguments.length; i++) { var source = arguments[i]; for (var key in source) { if (Object.prototype.hasOwnProperty.call(source, key)) { target[key] = source[key]; } } } return target; }; exports.default = notificationReducer; var _actions = __webpack_require__(179); var initialState = { user: null, notifications: [], bookmarksCount: 0 }; function notificationReducer() { var state = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : initialState; var action = arguments[1]; switch (action.type) { case _actions.NEW_NOTIFICATION: { var notifications = state.notifications.concat([action.notification.item]); return _extends({}, state, { notifications: notifications }); } case _actions.CLEAR_ALL_NOTIFICATIONS: return _extends({}, state, { notifications: [] }); case _actions.CLEAR_NOTIFICATION: { var _notifications = state.notifications.filter(function (n) { return n._id !== action.id; }); return _extends({}, state, { notifications: _notifications }); } case _actions.INIT_DATA: { return _extends({}, state, { user: action.data.user || null, notifications: action.data.notifications || [], bookmarksCount: action.data.bookmarksCount || 0 }); } case _actions.SET_BOOKMARKS_COUNT: return _extends({}, state, { bookmarksCount: action.count }); default: return state; } } /***/ }), /***/ 677: /***/ (function(module, exports, __webpack_require__) { "use strict"; Object.defineProperty(exports, "__esModule", { value: true }); var _createClass = function () { function defineProperties(target, props) { for (var i = 0; i < props.length; i++) { var descriptor = props[i]; descriptor.enumerable = descriptor.enumerable || false; descriptor.configurable = true; if ("value" in descriptor) descriptor.writable = true; Object.defineProperty(target, descriptor.key, descriptor); } } return function (Constructor, protoProps, staticProps) { if (protoProps) defineProperties(Constructor.prototype, protoProps); if (staticProps) defineProperties(Constructor, staticProps); return Constructor; }; }(); var _react = __webpack_require__(0); var _react2 = _interopRequireDefault(_react); var _propTypes = __webpack_require__(1); var _propTypes2 = _interopRequireDefault(_propTypes); var _reactRedux = __webpack_require__(6); var _actions = __webpack_require__(179); var _NotificationList = __webpack_require__(678); var _NotificationList2 = _interopRequireDefault(_NotificationList); var _BookmarksCount = __webpack_require__(679); var _BookmarksCount2 = _interopRequireDefault(_BookmarksCount); function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; } function _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError("Cannot call a class as a function"); } } function _possibleConstructorReturn(self, call) { if (!self) { throw new ReferenceError("this hasn't been initialised - super() hasn't been called"); } return call && (typeof call === "object" || typeof call === "function") ? call : self; } function _inherits(subClass, superClass) { if (typeof superClass !== "function" && superClass !== null) { throw new TypeError("Super expression must either be null or a function, not " + typeof superClass); } subClass.prototype = Object.create(superClass && superClass.prototype, { constructor: { value: subClass, enumerable: false, writable: true, configurable: true } }); if (superClass) Object.setPrototypeOf ? Object.setPrototypeOf(subClass, superClass) : subClass.__proto__ = superClass; } var NotificationsApp = function (_React$Component) { _inherits(NotificationsApp, _React$Component); function NotificationsApp(props, context) { _classCallCheck(this, NotificationsApp); return _possibleConstructorReturn(this, (NotificationsApp.__proto__ || Object.getPrototypeOf(NotificationsApp)).call(this, props, context)); } _createClass(NotificationsApp, [{ key: 'render', value: function render() { return [_react2.default.createElement(_NotificationList2.default, { key: 'notifications', notifications: this.props.notifications, clearNotification: this.props.clearNotification, clearAll: this.props.clearAll }), _react2.default.createElement(_BookmarksCount2.default, { key: 'bookmarks', count: this.props.bookmarksCount })]; } }]); return NotificationsApp; }(_react2.default.Component); NotificationsApp.propTypes = { user: _propTypes2.default.string, notifications: _propTypes2.default.arrayOf(_propTypes2.default.object), clearNotification: _propTypes2.default.func, clearAll: _propTypes2.default.func, bookmarksCount: _propTypes2.default.number }; var mapStateToProps = function mapStateToProps(state) { return { user: state.user, notifications: state.notifications, bookmarksCount: state.bookmarksCount }; }; var mapDispatchToProps = function mapDispatchToProps(dispatch) { return { clearNotification: function clearNotification(id) { return dispatch((0, _actions.deleteNotification)(id)); }, clearAll: function clearAll() { return dispatch((0, _actions.deleteAllNotifications)()); } }; }; exports.default = (0, _reactRedux.connect)(mapStateToProps, mapDispatchToProps)(NotificationsApp); /***/ }), /***/ 678: /***/ (function(module, exports, __webpack_require__) { "use strict"; Object.defineProperty(exports, "__esModule", { value: true }); var _createClass = function () { function defineProperties(target, props) { for (var i = 0; i < props.length; i++) { var descriptor = props[i]; descriptor.enumerable = descriptor.enumerable || false; descriptor.configurable = true; if ("value" in descriptor) descriptor.writable = true; Object.defineProperty(target, descriptor.key, descriptor); } } return function (Constructor, protoProps, staticProps) { if (protoProps) defineProperties(Constructor.prototype, protoProps); if (staticProps) defineProperties(Constructor, staticProps); return Constructor; }; }(); var _react = __webpack_require__(0); var _react2 = _interopRequireDefault(_react); var _propTypes = __webpack_require__(1); var _propTypes2 = _interopRequireDefault(_propTypes); var _utils = __webpack_require__(2); var _CloseButton = __webpack_require__(95); var _CloseButton2 = _interopRequireDefault(_CloseButton); function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; } function _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError("Cannot call a class as a function"); } } function _possibleConstructorReturn(self, call) { if (!self) { throw new ReferenceError("this hasn't been initialised - super() hasn't been called"); } return call && (typeof call === "object" || typeof call === "function") ? call : self; } function _inherits(subClass, superClass) { if (typeof superClass !== "function" && superClass !== null) { throw new TypeError("Super expression must either be null or a function, not " + typeof superClass); } subClass.prototype = Object.create(superClass && superClass.prototype, { constructor: { value: subClass, enumerable: false, writable: true, configurable: true } }); if (superClass) Object.setPrototypeOf ? Object.setPrototypeOf(subClass, superClass) : subClass.__proto__ = superClass; } var NotificationList = function (_React$Component) { _inherits(NotificationList, _React$Component); function NotificationList(props) { _classCallCheck(this, NotificationList); var _this = _possibleConstructorReturn(this, (NotificationList.__proto__ || Object.getPrototypeOf(NotificationList)).call(this, props)); _this.state = { displayItems: false }; _this.renderNotification = _this.renderNotification.bind(_this); _this.toggleDisplay = _this.toggleDisplay.bind(_this); return _this; } _createClass(NotificationList, [{ key: 'toggleDisplay', value: function toggleDisplay() { if (!this.state.displayItems && (!this.props.notifications || this.props.notifications.length == 0)) return; this.setState({ displayItems: !this.state.displayItems }); if (!this.state.displayItems) { document.getElementById('header-notification').classList.add('notif--open'); } else { document.getElementById('header-notification').classList.remove('notif--open'); } } }, { key: 'renderNotification', value: function renderNotification(newItem) { var _this2 = this; return _react2.default.createElement( 'div', { key: newItem._id, className: 'notif__list__item' }, _react2.default.createElement(_CloseButton2.default, { onClick: function onClick() { return _this2.props.clearNotification(newItem._id); } }), _react2.default.createElement( 'div', { className: 'notif__list__info' }, (0, _utils.gettext)('A story you downloaded has been updated') ), _react2.default.createElement( 'div', { className: 'notif__list__headline' }, _react2.default.createElement( 'a', { href: '/wire?item=' + newItem._id }, newItem.headline ) ), _react2.default.createElement( 'div', { className: 'wire-articles__item__meta-info' }, (0, _utils.gettext)('Created on'), ' ', (0, _utils.shortDate)(newItem.versioncreated) ) ); } }, { key: 'render', value: function render() { var _this3 = this; return _react2.default.createElement( 'div', { className: 'badge--top-right' }, this.props.notifications && this.props.notifications.length > 0 && _react2.default.createElement( 'div', { className: 'badge badge-pill badge-info badge-secondary' }, this.props.notifications && this.props.notifications.length ), _react2.default.createElement( 'span', { className: 'notif__circle' }, _react2.default.createElement('i', { className: 'icon--alert icon--white', onClick: this.toggleDisplay }) ), this.state.displayItems && this.props.notifications && this.props.notifications.length > 0 && _react2.default.createElement( 'div', { className: 'notif__list' }, _react2.default.createElement( 'div', { className: 'notif__list__header d-flex' }, _react2.default.createElement( 'span', { className: 'notif__list__header-headline ml-3' }, (0, _utils.gettext)('Notifications') ), _react2.default.createElement( 'button', { type: 'button', className: 'button-pill ml-auto mr-3', onClick: this.props.clearAll }, (0, _utils.gettext)('Clear All') ) ), this.props.notifications.map(function (notification) { return _this3.renderNotification(notification); }) ) ); } }]); return NotificationList; }(_react2.default.Component); NotificationList.propTypes = { notifications: _propTypes2.default.array, clearNotification: _propTypes2.default.func, clearAll: _propTypes2.default.func }; exports.default = NotificationList; /***/ }), /***/ 679: /***/ (function(module, exports, __webpack_require__) { "use strict"; Object.defineProperty(exports, "__esModule", { value: true }); var _react = __webpack_require__(0); var _react2 = _interopRequireDefault(_react); var _reactDom = __webpack_require__(24); var _reactDom2 = _interopRequireDefault(_reactDom); var _propTypes = __webpack_require__(1); var _propTypes2 = _interopRequireDefault(_propTypes); function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; } function BookmarksCount(_ref) { var count = _ref.count; return _reactDom2.default.createPortal(_react2.default.createElement( 'b', { className: 'font-weight-normal' }, count ), document.getElementById('bookmarks-count')); } BookmarksCount.propTypes = { count: _propTypes2.default.number.isRequired }; exports.default = BookmarksCount; /***/ }), /***/ 95: /***/ (function(module, exports, __webpack_require__) { "use strict"; Object.defineProperty(exports, "__esModule", { value: true }); var _react = __webpack_require__(0); var _react2 = _interopRequireDefault(_react); var _propTypes = __webpack_require__(1); var _propTypes2 = _interopRequireDefault(_propTypes); function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; } function CloseButton(_ref) { var onClick = _ref.onClick; return _react2.default.createElement( 'button', { type: 'button', className: 'close', 'aria-label': 'Close', onClick: onClick }, _react2.default.createElement( 'span', { 'aria-hidden': 'true' }, '\xD7' ) ); } CloseButton.propTypes = { onClick: _propTypes2.default.func.isRequired }; exports.default = CloseButton; /***/ }) },[675]);
PypiClean
/NNGT-2.7.0-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl/nngt/core/nx_graph.py
from collections import OrderedDict, deque from copy import deepcopy from itertools import chain import logging import sys import numpy as np import scipy.sparse as ssp import nngt from nngt.lib import InvalidArgument, BWEIGHT, nonstring_container, is_integer from nngt.lib.connect_tools import (_cleanup_edges, _set_dist_new_edges, _set_default_edge_attributes) from nngt.lib.graph_helpers import (_get_dtype, _get_nx_weights, _post_del_update) from nngt.lib.converters import _np_dtype, _to_np_array from nngt.lib.logger import _log_message from .graph_interface import GraphInterface, BaseProperty logger = logging.getLogger(__name__) # ---------- # # Properties # # ---------- # class _NxNProperty(BaseProperty): ''' Class for generic interactions with nodes properties (networkx) ''' def __getitem__(self, name): g = self.parent()._graph lst = [g.nodes[i][name] for i in range(g.number_of_nodes())] dtype = _np_dtype(super(_NxNProperty, self).__getitem__(name)) return _to_np_array(lst, dtype=dtype) def __setitem__(self, name, value): g = self.parent()._graph size = g.number_of_nodes() if name in self: if len(value) == size: for i in range(size): g.nodes[i][name] = value[i] else: raise ValueError("A list or a np.array with one entry per " "node in the graph is required") else: raise InvalidArgument("Attribute does not exist yet, use " "set_attribute to create it.") def new_attribute(self, name, value_type, values=None, val=None): g = self.parent()._graph if val is None: if value_type in ("int", "integer"): val = int(0) elif value_type in ("double", "float"): val = np.NaN elif value_type == "string": val = "" else: val = None value_type = "object" if values is None: values = [deepcopy(val) for _ in range(g.number_of_nodes())] # store name and value type in the dict super(_NxNProperty, self).__setitem__(name, value_type) # store the real values in the attribute self[name] = values self._num_values_set[name] = len(values) def set_attribute(self, name, values, nodes=None): ''' Set the node attribute. Parameters ---------- name : str Name of the node attribute. values : array, size N Values that should be set. nodes : array-like, optional (default: all nodes) Nodes for which the value of the property should be set. If `nodes` is not None, it must be an array of size N. ''' g = self.parent()._graph num_nodes = g.number_of_nodes() num_n = len(nodes) if nodes is not None else num_nodes if num_n == num_nodes: self[name] = values else: if num_n != len(values): raise ValueError("`nodes` and `values` must have the same " "size; got respectively " + str(num_n) + \ " and " + str(len(values)) + " entries.") else: for n, val in zip(nodes, values): g.nodes[n][name] = val self._num_values_set[name] = num_nodes class _NxEProperty(BaseProperty): ''' Class for generic interactions with edge properties (networkx) ''' def __getitem__(self, name): g = self.parent()._graph dtype = _np_dtype(super().__getitem__(name)) eprop = np.empty(g.number_of_edges(), dtype=dtype) edges = list(g.edges(data=name)) if len(edges): eids = np.asarray(list(g.edges(data="eid")))[:, 2] for i, eid in enumerate(np.argsort(eids)): eprop[i] = edges[eid][2] return eprop def __setitem__(self, name, value): g = self.parent()._graph if name in self: size = g.number_of_edges() if len(value) == size: edges = np.asarray(list(g.edges(data="eid"))) if size: order = np.argsort(edges[:, 2]) for i, idx in enumerate(order): s, t, _ = edges[idx] g.edges[s, t][name] = value[i] else: raise ValueError( "A list or a np.array with one entry per edge in the " "graph is required. For attribute " "'{}', got {} entries vs {} edges.".format( name, len(value), size)) else: raise InvalidArgument("Attribute does not exist yet, use " "set_attribute to create it.") def get_eattr(self, edges, name=None): g = self.parent()._graph if nonstring_container(edges[0]): # many edges name = self.keys() if name is None else [name] eprop = {k: [] for k in self.keys()} for edge in edges: data = g.get_edge_data(*edge) if data is None: raise ValueError( "Edge {} does not exist.".format(edge)) [eprop[k].append(data[k]) for k in name] for k, v in eprop.items(): dtype = _np_dtype(super().__getitem__(k)) eprop[k] = _to_np_array(v, dtype) if len(name) == 1: return eprop[name[0]] return eprop # single edge data = deepcopy(g.get_edge_data(*edges)) if not data: raise ValueError("Edge {} does not exist.".format(edges)) if name is None: del data["eid"] return data return data[name] def new_attribute(self, name, value_type, values=None, val=None): g = self.parent()._graph if val is None: if value_type in ("int", "integer"): val = int(0) elif value_type in ("double", "float"): val = np.NaN elif value_type == "string": val = "" else: val = None value_type = "object" if values is None: values = [deepcopy(val) for _ in range(g.number_of_edges())] # store name and value type in the dict super(_NxEProperty, self).__setitem__(name, value_type) # store the real values in the attribute self[name] = values self._num_values_set[name] = len(values) def set_attribute(self, name, values, edges=None, last_edges=False): ''' Set the edge property. Parameters ---------- name : str Name of the edge property. values : array Values that should be set. edges : array-like, optional (default: None) Edges for which the value of the property should be set. If `edges` is not None, it must be an array of shape `(len(values), 2)`. ''' graph = self.parent() g = graph._graph num_edges = g.number_of_edges() num_e = len(edges) if edges is not None else num_edges if num_e != len(values): raise ValueError("`edges` and `values` must have the same " "size; got respectively " + str(num_e) + \ " and " + str(len(values)) + " entries.") if edges is None: self[name] = values else: order = range(num_e) if not last_edges: get_eid = graph.edge_id eids = [get_eid(e) for e in edges] order = np.argsort(np.argsort(eids)) for i, e in zip(order, edges): edict = g[e[0]][e[1]] edict[name] = values[i] g.add_edge(e[0], e[1], **edict) if num_e: self._num_values_set[name] = num_edges # ----- # # Graph # # ----- # class _NxGraph(GraphInterface): ''' Container for networkx Graph ''' _nattr_class = _NxNProperty _eattr_class = _NxEProperty #-------------------------------------------------------------------------# # Class properties di_value = {"string": "", "double": 0., "int": int(0)} #-------------------------------------------------------------------------# # Constructor and instance properties def __init__(self, nodes=0, copy_graph=None, directed=True, weighted=False, **kwargs): self._nattr = _NxNProperty(self) self._eattr = _NxEProperty(self) self._max_eid = 0 g = copy_graph.graph if copy_graph is not None else None if g is not None: if not directed and g.is_directed(): g = g.to_undirected() elif directed and not g.is_directed(): g = g.to_directed() self._from_library_graph(g, copy=True) self._max_eid = copy_graph._max_eid else: nx = nngt._config["library"] self._graph = nx.DiGraph() if directed else nx.Graph() if nodes: self._graph.add_nodes_from(range(nodes)) #-------------------------------------------------------------------------# # Graph manipulation def edge_id(self, edge): ''' Return the ID a given edge or a list of edges in the graph. Raises an error if the edge is not in the graph or if one of the vertices in the edge is nonexistent. Parameters ---------- edge : 2-tuple or array of edges Edge descriptor (source, target). Returns ------- index : int or array of ints Index of the given `edge`. ''' g = self._graph if is_integer(edge[0]): return g[edge[0]][edge[1]]["eid"] elif nonstring_container(edge[0]): return [g[e[0]][e[1]]["eid"] for e in edge] raise AttributeError("`edge` must be either a 2-tuple of ints or " "an array of 2-tuples of ints.") def has_edge(self, edge): ''' Whether `edge` is present in the graph. .. versionadded:: 2.0 ''' return self._graph.has_edge(*edge) @property def edges_array(self): ''' Edges of the graph, sorted by order of creation, as an array of 2-tuple. ''' g = self._graph edges = np.full((self._max_eid, 2), -1) # fast iteration using list comprehension # could also be done with deque and map (deque forces lazy map to run) # deque(map(lambda x: _gen_edges(edges, x), g.edges(data="eid"))) [_gen_edges(edges, x) for x in g.edges(data="eid")] if self._max_eid > g.number_of_edges(): return edges[edges[:, 0] > -1] return edges def _get_edges(self, source_node=None, target_node=None): ''' Called by Graph.get_edges if source_node and target_node are not both integers. ''' nx = nngt._config["library"] g = self._graph target_node = \ [target_node] if is_integer(target_node) else target_node if source_node is not None: source_node = \ [source_node] if is_integer(source_node) else source_node if target_node is None: if g.is_directed(): return list(g.out_edges(source_node)) return [ e if e[0] <= e[1] else e[::-1] for e in g.edges(source_node) ] res_iter = nx.edge_boundary(g, source_node, target_node) if g.is_directed(): return list(res_iter) return [e if e[0] <= e[1] else e[::-1] for e in res_iter] if target_node is None: # return all edges return list(g.edges) if g.is_directed(): return list(g.in_edges(target_node)) return [ e if e[0] <= e[1] else e[::-1] for e in g.edges(target_node) ] def new_node(self, n=1, neuron_type=1, attributes=None, value_types=None, positions=None, groups=None): ''' Adding a node to the graph, with optional properties. Parameters ---------- n : int, optional (default: 1) Number of nodes to add. neuron_type : int, optional (default: 1) Type of neuron (1 for excitatory, -1 for inhibitory) attributes : dict, optional (default: None) Dictionary containing the attributes of the nodes. value_types : dict, optional (default: None) Dict of the `attributes` types, necessary only if the `attributes` do not exist yet. positions : array of shape (n, 2), optional (default: None) Positions of the neurons. Valid only for :class:`~nngt.SpatialGraph` or :class:`~nngt.SpatialNetwork`. groups : str, int, or list, optional (default: None) :class:`~nngt.core.NeuralGroup` to which the neurons belong. Valid only for :class:`~nngt.Network` or :class:`~nngt.SpatialNetwork`. Returns ------- The node or a list of the nodes created. ''' g = self._graph num_nodes = g.number_of_nodes() new_nodes = list(range(num_nodes, num_nodes + n)) for v in new_nodes: g.add_node(v) attributes = {} if attributes is None else deepcopy(attributes) if attributes: for k, v in attributes.items(): if k not in self._nattr: self._nattr.new_attribute(k, value_types[k], val=v) else: v = v if nonstring_container(v) else [v] self._nattr.set_attribute(k, v, nodes=new_nodes) # set default values for all attributes that were not set for k in self.node_attributes: if k not in attributes: dtype = self.get_attribute_type(k) filler = [None for _ in new_nodes] # change for strings, doubles and ints if dtype == "string": filler = ["" for _ in new_nodes] elif dtype == "double": filler = [np.NaN for _ in new_nodes] elif dtype == "int": filler = [0 for _ in new_nodes] self._nattr.set_attribute(k, filler, nodes=new_nodes) if self.is_spatial(): old_pos = self._pos self._pos = np.full((self.node_nb(), 2), np.NaN) num_existing = len(old_pos) if old_pos is not None else 0 if num_existing != 0: self._pos[:num_existing] = old_pos if positions is not None and len(positions): assert self.is_spatial(), \ "`positions` argument requires a SpatialGraph/SpatialNetwork." self._pos[new_nodes] = positions if groups is not None: assert self.is_network(), \ "`positions` argument requires a Network/SpatialNetwork." if nonstring_container(groups): assert len(groups) == n, "One group per neuron required." for g, node in zip(groups, new_nodes): self.population.add_to_group(g, node) else: self.population.add_to_group(groups, new_nodes) if len(new_nodes) == 1: return new_nodes[0] return new_nodes def delete_nodes(self, nodes): ''' Remove nodes (and associated edges) from the graph. ''' g = self._graph if nonstring_container(nodes): for n in nodes: g.remove_node(n) else: g.remove_node(nodes) # relabel nodes from zero nx = nngt._config["library"] nx.relabel_nodes(g, {n: i for i, n in enumerate(g.nodes)}, copy=False) # update attributes for key in self._nattr: self._nattr._num_values_set[key] = self.node_nb() for key in self._eattr: self._eattr._num_values_set[key] = self.edge_nb() # check spatial and structure properties _post_del_update(self, nodes) def new_edge(self, source, target, attributes=None, ignore=False, self_loop=False): ''' Adding a connection to the graph, with optional properties. .. versionchanged :: 2.0 Added `self_loop` argument to enable adding self-loops. Parameters ---------- source : :class:`int/node` Source node. target : :class:`int/node` Target node. attributes : :class:`dict`, optional (default: ``{}``) Dictionary containing optional edge properties. If the graph is weighted, defaults to ``{"weight": 1.}``, the unit weight for the connection (synaptic strength in NEST). ignore : bool, optional (default: False) If set to True, ignore attempts to add an existing edge and accept self-loops; otherwise an error is raised. self_loop : bool, optional (default: False) Whether to allow self-loops or not. Returns ------- The new connection or None if nothing was added. ''' g = self._graph attributes = {} if attributes is None else deepcopy(attributes) # check that nodes exist num_nodes = g.number_of_nodes() if source >= num_nodes or target >= num_nodes: raise InvalidArgument("`source` or `target` does not exist.") # set default values for attributes that were not passed _set_default_edge_attributes(self, attributes, num_edges=1) if g.has_edge(source, target): if not ignore: raise InvalidArgument("Trying to add existing edge.") _log_message(logger, "INFO", "Existing edge {} ignored.".format((source, target))) else: if source == target: if not ignore and not self_loop: raise InvalidArgument("Trying to add a self-loop.") elif ignore: _log_message(logger, "INFO", "Self-loop on {} ignored.".format(source)) return None for attr in attributes: if "_corr" in attr: raise NotImplementedError("Correlated attributes are not " "available with networkx.") if self.is_weighted() and "weight" not in attributes: attributes["weight"] = 1. # check distance _set_dist_new_edges(attributes, self, [(source, target)]) g.add_edge(source, target) g[source][target]["eid"] = self._max_eid self._max_eid += 1 # call parent function to set the attributes self._attr_new_edges([(source, target)], attributes=attributes) return (source, target) def new_edges(self, edge_list, attributes=None, check_duplicates=False, check_self_loops=True, check_existing=True, ignore_invalid=False): ''' Add a list of edges to the graph. .. versionchanged:: 2.0 Can perform all possible checks before adding new edges via the ``check_duplicates`` ``check_self_loops``, and ``check_existing`` arguments. Parameters ---------- edge_list : list of 2-tuples or np.array of shape (edge_nb, 2) List of the edges that should be added as tuples (source, target) attributes : :class:`dict`, optional (default: ``{}``) Dictionary containing optional edge properties. If the graph is weighted, defaults to ``{"weight": ones}``, where ``ones`` is an array the same length as the `edge_list` containing a unit weight for each connection (synaptic strength in NEST). check_duplicates : bool, optional (default: False) Check for duplicate edges within `edge_list`. check_self_loops : bool, optional (default: True) Check for self-loops. check_existing : bool, optional (default: True) Check whether some of the edges in `edge_list` already exist in the graph or exist multiple times in `edge_list` (also performs `check_duplicates`). ignore_invalid : bool, optional (default: False) Ignore invalid edges: they are not added to the graph and are silently dropped. Unless this is set to true, an error is raised whenever one of the three checks fails. .. warning:: Setting `check_existing` to False will lead to undefined behavior if existing edges are provided! Only use it (for speedup) if you are sure that you are indeed only adding new edges. Returns ------- Returns new edges only. ''' g = self._graph attributes = {} if attributes is None else deepcopy(attributes) num_edges = len(edge_list) # check that all nodes exist if num_edges: if np.max(edge_list) >= g.number_of_nodes(): raise InvalidArgument("Some nodes do no exist.") for k, v in attributes.items(): assert nonstring_container(v) and len(v) == num_edges, \ "One attribute per edge is required." if "_corr" in k: raise NotImplementedError("Correlated attributes are not " "available with networkx.") # set default values for attributes that were not passed _set_default_edge_attributes(self, attributes, num_edges) # check edges new_attr = None if check_duplicates or check_self_loops or check_existing: edge_list, new_attr = _cleanup_edges( self, edge_list, attributes, check_duplicates, check_self_loops, check_existing, ignore_invalid) else: new_attr = attributes # create the edges initial_eid = self._max_eid num_added = len(edge_list) if num_added: arr_edges = np.zeros((num_added, 3), dtype=int) arr_edges[:, :2] = edge_list arr_edges[:, 2] = np.arange( initial_eid, initial_eid + num_added) # create the edges with an eid attribute g.add_weighted_edges_from(arr_edges, weight="eid") # check distance _set_dist_new_edges(new_attr, self, edge_list) # call parent function to set the attributes self._attr_new_edges(edge_list, attributes=new_attr) self._max_eid += num_added return edge_list def delete_edges(self, edges): ''' Remove a list of edges ''' if len(edges): if nonstring_container(edges[0]): self._graph.remove_edges_from(edges) else: self._graph.remove_edge(*edges) for key in self._eattr: self._eattr._num_values_set[key] = self.edge_nb() def clear_all_edges(self): ''' Remove all edges from the graph ''' g = self._graph g.remove_edges_from(tuple(g.edges())) self._eattr.clear() #-------------------------------------------------------------------------# # Getters def node_nb(self): ''' Number of nodes in the graph ''' return self._graph.number_of_nodes() def edge_nb(self): ''' Number of edges in the graph ''' return self._graph.number_of_edges() def get_degrees(self, mode="total", nodes=None, weights=None): g = self._graph w = _get_nx_weights(self, weights) nodes = range(g.number_of_nodes()) if nodes is None else nodes dtype = int if weights in {False, None} else float di_deg = None if mode == 'total' or not self._graph.is_directed(): di_deg = g.degree(nodes, weight=w) elif mode == 'in': di_deg = g.in_degree(nodes, weight=w) elif mode == 'out': di_deg = g.out_degree(nodes, weight=w) else: raise ValueError("Unknown `mode` '{}'".format(mode)) if nonstring_container(nodes): return np.array([di_deg[i] for i in nodes], dtype=dtype) return di_deg def is_connected(self, mode="strong"): ''' Return whether the graph is connected. Parameters ---------- mode : str, optional (default: "strong") Whether to test connectedness with directed ("strong") or undirected ("weak") connections. ''' g = self._graph if g.is_directed() and mode == "weak": g = g.to_undirected(as_view=True) try: import networkx as nx nx.diameter(g) return True except nx.exception.NetworkXError: return False def neighbours(self, node, mode="all"): ''' Return the neighbours of `node`. Parameters ---------- node : int Index of the node of interest. mode : string, optional (default: "all") Type of neighbours that will be returned: "all" returns all the neighbours regardless of directionality, "in" returns the in-neighbours (also called predecessors) and "out" retruns the out-neighbours (or successors). Returns ------- neighbours : set The neighbours of `node`. ''' g = self._graph # special case for undirected if not g.is_directed(): return set(g.neighbors(node)) if mode == "all": # for directed graphs, neighbors ~ successors return set(g.successors(node)).union(g.predecessors(node)) elif mode == "in": return set(g.predecessors(node)) elif mode == "out": return set(g.successors(node)) raise ArgumentError('Invalid `mode` argument {}; possible values are ' '"all", "out" or "in".'.format(mode)) def _from_library_graph(self, graph, copy=True): ''' Initialize `self._graph` from existing library object. ''' import networkx as nx nodes = {n: i for i, n in enumerate(graph)} num_nodes = graph.number_of_nodes() num_edges = graph.number_of_edges() # check if nodes start from 0 and are continuous if set(nodes.keys()) != set(range(num_nodes)): # forced copy to restore nodes to [0, num_nodes[ g = None if graph.is_directed(): g = nx.DiGraph() else: g = nx.Graph() # add nodes for i, (n, attr) in enumerate(graph.nodes(data=True)): attr["id"] = n g.add_node(i, **attr) # add edges [g.add_edge(nodes[u], nodes[v], **attr) for u, v, attr in graph.edges(data=True)] # make edges ids def set_eid(e, eid): g.edges[e]["eid"] = eid [set_eid(e, i) for i, e in enumerate(g.edges)] self._max_eid = num_edges self._graph = graph = g else: # all good self._graph = graph.copy() if copy else graph # get attributes names and "types" and initialize them if num_nodes: for key, val in graph.nodes[0].items(): super(type(self._nattr), self._nattr).__setitem__( key, _get_dtype(val)) if num_edges: e0 = next(iter(graph.edges)) for key, val in graph.edges[e0].items(): if key != "eid": super(type(self._eattr), self._eattr).__setitem__( key, _get_dtype(val)) # tool function to generate the edges_array def _gen_edges(array, edata): source, target, eid = edata array[eid] = (source, target)
PypiClean
/MyTact-1.0.1.tar.gz/MyTact-1.0.1/mytact/utils.py
from __future__ import print_function, unicode_literals import six import os import sys from pyfiglet import Figlet, figlet_format import re import json from PyInquirer import style_from_dict, Token, prompt, print_json, Separator from PyInquirer import Validator, ValidationError from pprint import pprint try: import colorama colorama.init() except ImportError: colorama = None try: from termcolor import colored except ImportError: colored = None style = style_from_dict({ Token.QuestionMark: '#673ab7 bold', Token.Answer: '#f44336 bold', Token.Instruction: '', # default Token.Separator: '#cc5454', Token.Selected: '#cc5454', # default Token.Pointer: '#673ab7 bold', Token.Question: '', }) def log(object, color="white", figlet=False, pretty=False, font='slant'): if not pretty: if colored: if not figlet: six.print_(colored(object, color)) else: six.print_(colored(figlet_format( object, font=font), color)) else: six.print_(object) else: pprint(object) class EmptyValidator(Validator): def validate(self, value): if len(value.text): return True else: raise ValidationError( message="You can't leave this blank", cursor_position=len(value.text)) class EmailValidator(Validator): pattern = r"\"?([-a-zA-Z0-9.`?{}]+@\w+\.\w+)\"?" def validate(self, email): if len(email.text): if re.match(self.pattern, email.text): return True else: raise ValidationError( message="Invalid email", cursor_position=len(email.text)) else: raise ValidationError( message="You can't leave this blank", cursor_position=len(email.text)) class PhoneNumberValidator(Validator): def validate(self, document): ok = re.match('^([01]{1})?[-.\s]?\(?(\d{3})\)?[-.\s]?(\d{3})[-.\s]?(\d{4})\s?((?:#|ext\.?\s?|x\.?\s?){1}(?:\d+)?)?$', document.text) if not ok: raise ValidationError( message='Please enter a valid phone number', cursor_position=len(document.text)) # Move cursor to end else: return True def askContactsInfo(kwargs): questions = [ { 'type': 'input', 'name': 'firstname', 'message': 'Enter firstname:', 'default': "" if kwargs.get("firstname") is None else kwargs["firstname"], 'validate': EmptyValidator, }, { 'type': 'input', 'name': 'lastname', 'message': 'Enter lastname:', 'default': "" if kwargs.get("lastname") is None else kwargs["lastname"], 'validate': EmptyValidator, }, { # 'qmark': u'📞 ', 'type': 'input', 'name': 'phone', 'message': 'Enter phone:', 'default': "" if kwargs.get("phone") is None else kwargs["phone"], 'validate': PhoneNumberValidator, }, { # 'qmark': u'📧 ', 'type': 'input', 'name': 'email', 'message': 'Enter email:', 'default': "" if kwargs.get("email") is None else kwargs["email"], 'validate': EmailValidator, } ] answers = prompt(questions, style=style) return answers def askField(data): questions = [] for key, value in data.items(): question = { 'type': 'input', 'name': key, 'message': 'Enter {}:'.format(key), 'default': value, 'validate': EmailValidator if key=="email" else PhoneNumberValidator if key == "phone" else EmptyValidator, } questions.append(question) answers = prompt(questions, style=style) return answers def pretty_format(data): contacts = [] for contact in data: _contact = "{} {} ({}) <{}> [{}]".format( contact["firstname"], contact["lastname"], contact["email"], contact["phone"], contact["id"] ) contacts.append(_contact) return contacts def selectContact(data): questions = [ { 'qmark': u'📝 ', 'type': 'list', 'name': 'contact', 'message': 'Select Contact', 'choices': pretty_format(data), 'filter': lambda val: val.lower() } ] answers = prompt(questions, style=style) return answers def getConfigDir(): if sys.platform == "win32": app_config_dir = os.getenv("LOCALAPPDATA") else: app_config_dir = os.getenv("HOME") if os.getenv("XDG_CONFIG_HOME"): app_config_dir = os.getenv("XDG_CONFIG_HOME") configDir = os.path.join(app_config_dir, ".config") return configDir def create_data(): path = os.path.join(getConfigDir(), "data.json") schema = [{"contacts": []}, {"todos": []}] if not os.path.exists(path) or len(open(path, 'r').read().strip()) == 0: with open(path, "w") as _data: json.dump(schema,_data)
PypiClean
/ISPIP-1.13.tar.gz/ISPIP-1.13/ispip/trainingsetstats.py
import pandas as pd from sklearn.metrics import f1_score, matthews_corrcoef, precision_recall_curve, roc_curve, auc from .compare_auc_delong_xu import delong_roc_test def trainningsetstats(): # df = pd.read_csv("/Users/evanedelstein/Desktop/MetaDPIv2/metadpi/input/input_data_all.csv") test_set = pd.read_csv( "/Users/evanedelstein/Desktop/MetaDPIv2/metadpi/input/test_all_prot.csv") proteins = test_set["protein"].tolist() proteins = [i.upper() for i in proteins] # df = pd.read_csv("/Users/evanedelstein/Desktop/Research_Evan/Raji_Summer2019_atom/Meta_DPI/Data/Test_data/meta-ppisp-results-comma-new.txt") df = pd.read_csv( "/Users/evanedelstein/Desktop/Research_Evan/Raji_Summer2019_atom/Meta_DPI/Data/vorffip_renumbered.txt") df["protein"] = [x.split('_')[1] for x in df['residue']] df.isnull().any() df = df.fillna(method='ffill') df = df[df['protein'].isin(proteins)] cutoff_dict = cutoff_file_parser( "/Users/evanedelstein/Desktop/MetaDPIv2/metadpi/input/cutoffs.csv") output = pd.DataFrame() for i in ['vorffip']: top = df.sort_values(by=[i], ascending=False).groupby((["protein"])).apply( lambda x: x.head(cutoff_dict[x.name])).index.get_level_values(1).tolist() df[f'{i}_bin'] = [ 1 if i in top else 0 for i in df.index.tolist()] output[f"{i}_fscore"] = df.groupby((["protein"])).apply( lambda x: fscore(x, 'annotated', i)) output[f"{i}_mcc"] = df.groupby((["protein"])).apply( lambda x: mcc(x, 'annotated', i)) # roc_and_pr_dic = roc_and_pr(df, "annotated", "meta-ppisp") roc_and_pr_dic = roc_and_pr(df, "annotated", "vorffip") f = fscore(df, "annotated", "vorffip") m = mcc(df, "annotated", "vorffip") print("fscore: ", f, "mcc: ", m) roc_df = pd.DataFrame() prdf = pd.DataFrame() roc_df["fpr"] = roc_and_pr_dic["fpr"] roc_df["tpr"] = roc_and_pr_dic["tpr"] prdf["precision"] = roc_and_pr_dic["precision"] prdf["recall"] = roc_and_pr_dic["recall"] roc_df.to_csv( "/Users/evanedelstein/Desktop/MetaDPIv2/metadpi/output/vorffip_roc.csv") prdf.to_csv( "/Users/evanedelstein/Desktop/MetaDPIv2/metadpi/output/vorffip_pr.csv") output.to_csv( "/Users/evanedelstein/Desktop/MetaDPIv2/metadpi/output/vorffip.csv") return def fscore(x, annotated_col, pred) -> tuple: return f1_score(x[annotated_col], x[f'{pred}_bin']) def mcc(x, annotated_col, pred) -> tuple: return matthews_corrcoef(x[annotated_col], x[f'{pred}_bin']) def cutoff_file_parser(cutoff_frame) -> dict: cutoff_frame_df: pd.DataFrame = pd.read_csv(cutoff_frame) cutoff_frame_df.columns = cutoff_frame_df.columns.str.lower() cutoff_dict = dict( zip(cutoff_frame_df['protein'], cutoff_frame_df['cutoff res'])) return cutoff_dict def roc_and_pr(test_frame: pd.DataFrame, annotated_col, pred) -> dict: fpr, tpr, roc_thresholds = roc_curve( test_frame[annotated_col], test_frame[pred]) roc_auc = round(auc(fpr, tpr), 3) precision, recall, pr_thresholds = precision_recall_curve( test_frame[annotated_col], test_frame[pred]) pr_auc = round(auc(recall, precision), 3) roc_and_pr_dic: dict = {"fpr": fpr, "tpr": tpr, "roc_thresholds": roc_thresholds, "roc_auc": roc_auc, "precision": precision, "recall": recall, "pr_thresholds": pr_thresholds, "pr_auc": pr_auc} print("Roc: ", roc_auc, "pr: ", pr_auc) return roc_and_pr_dic trainningsetstats()
PypiClean
/EasyEquities-1.2.3.tar.gz/EasyEquities-1.2.3/README.md
# [EasyEquities](https://www.easyequities.co.za/) Python Package This is a Python package for interacting with the [EasyEquities](https://www.easyequities.co.za/) trading platform. It provides a simple way to log in, check account balances, and retrieve your holdings. ## Requirements - Python 3.x - Selenium - pandas - Beautiful Soup 4 - GeckoDriverManager - Firefox ## Installation To install, run: ```bash pip install pandas selenium webdriver_manager beautifulsoup4 ``` Then, install the package from PIP using: ```bash pip install easyequities ``` ## Usage To use the package, you will need to import the package and create an instance of the EasyEquities class. You will need to provide your username and password as arguments. You can then use the `balance` and `holdings` methods to obtain your account balance and holdings, respectively. Here's an example of how to use the scraper: ```python from easyequities import EasyEquities # Instantiate the EasyEquities class with your account credentials ee = EasyEquities('your_username', 'your_password') # Get your account balance balance = ee.balance() print(balance) # Get your holdings holdings = ee.holdings() print(holdings) # Sell your holdings ee.sell_holdings('holding') # Buy more holdings ee.buy_holdings('holding', 'amount') # Close the EasyEquities website ee.close() ``` Note that the script uses Firefox as the web driver, so you'll need to have Firefox installed on your machine for it to work. If you don't have Firefox installed, you can download it [here](https://www.mozilla.org/en-US/firefox/new/). ## License This code is licensed under the MIT License. See the [LICENSE](https://github.com/kloniphani/EasyEquities/blob/main/LICENSE). Feel free to use it for any purpose. # Disclaimers Before you start using the code, a few disclaimers: - This code does not come with any guarantee or warranty. - I am not a financial advisor. This work does not represent any financial advice. - I do not recommend the use of this code for any investment decisions. - This code is designed for personal use, and is not designed for high-volume extractions. - Use the code at your own risk.
PypiClean
/NESTML-5.3.0-py3-none-any.whl/pynestml/utils/messages.py
from enum import Enum from typing import Tuple class MessageCode(Enum): """ A mapping between codes and the corresponding messages. """ START_PROCESSING_FILE = 0 START_SYMBOL_TABLE_BUILDING = 1 FUNCTION_CALL_TYPE_ERROR = 2 TYPE_NOT_DERIVABLE = 3 IMPLICIT_CAST = 4 CAST_NOT_POSSIBLE = 5 TYPE_DIFFERENT_FROM_EXPECTED = 6 ADD_SUB_TYPE_MISMATCH = 7 BUFFER_SET_TO_CONDUCTANCE_BASED = 8 NO_VARIABLE_FOUND = 9 SPIKE_INPUT_PORT_TYPE_NOT_DEFINED = 10 MODEL_CONTAINS_ERRORS = 11 START_PROCESSING_MODEL = 12 CODE_SUCCESSFULLY_GENERATED = 13 MODULE_SUCCESSFULLY_GENERATED = 14 NO_CODE_GENERATED = 15 VARIABLE_USED_BEFORE_DECLARATION = 16 VARIABLE_DEFINED_RECURSIVELY = 17 VALUE_ASSIGNED_TO_BUFFER = 18 ARG_NOT_KERNEL_OR_EQUATION = 19 ARG_NOT_SPIKE_INPUT = 20 NUMERATOR_NOT_ONE = 21 ORDER_NOT_DECLARED = 22 CONTINUOUS_INPUT_PORT_WITH_QUALIFIERS = 23 BLOCK_NOT_CORRECT = 24 VARIABLE_NOT_IN_STATE_BLOCK = 25 WRONG_NUMBER_OF_ARGS = 26 NO_RHS = 27 SEVERAL_LHS = 28 FUNCTION_REDECLARED = 29 FUNCTION_NOT_DECLARED = 30 NO_ODE = 31 NO_INIT_VALUE = 32 MODEL_REDECLARED = 33 NEST_COLLISION = 34 KERNEL_OUTSIDE_CONVOLVE = 35 NAME_COLLISION = 36 TYPE_NOT_SPECIFIED = 37 NO_TYPE_ALLOWED = 38 NO_ASSIGNMENT_ALLOWED = 39 NOT_A_VARIABLE = 40 MULTIPLE_KEYWORDS = 41 VECTOR_IN_NON_VECTOR = 42 VARIABLE_REDECLARED = 43 SOFT_INCOMPATIBILITY = 44 HARD_INCOMPATIBILITY = 45 NO_RETURN = 46 NOT_LAST_STATEMENT = 47 SYMBOL_NOT_RESOLVED = 48 SYNAPSE_SOLVED_BY_GSL = 49 TYPE_MISMATCH = 50 NO_SEMANTICS = 51 NEURON_SOLVED_BY_GSL = 52 NO_UNIT = 53 NOT_NEUROSCIENCE_UNIT = 54 INTERNAL_WARNING = 55 OPERATION_NOT_DEFINED = 56 CONVOLVE_NEEDS_BUFFER_PARAMETER = 57 INPUT_PATH_NOT_FOUND = 58 LEXER_ERROR = 59 PARSER_ERROR = 60 UNKNOWN_TARGET = 61 VARIABLE_WITH_SAME_NAME_AS_UNIT = 62 ANALYSING_TRANSFORMING_NEURON = 63 ODE_NEEDS_CONSISTENT_UNITS = 64 TEMPLATED_ARG_TYPES_INCONSISTENT = 65 MODULE_NAME_INFO = 66 TARGET_PATH_INFO = 67 ODE_FUNCTION_NEEDS_CONSISTENT_UNITS = 68 DELTA_FUNCTION_CANNOT_BE_MIXED = 69 UNKNOWN_TYPE = 70 ASTDATATYPE_TYPE_SYMBOL_COULD_NOT_BE_DERIVED = 71 KERNEL_WRONG_TYPE = 72 KERNEL_IV_WRONG_TYPE = 73 EMIT_SPIKE_FUNCTION_BUT_NO_OUTPUT_PORT = 74 NO_FILES_IN_INPUT_PATH = 75 STATE_VARIABLES_NOT_INITIALZED = 76 EQUATIONS_DEFINED_BUT_INTEGRATE_ODES_NOT_CALLED = 77 TEMPLATE_ROOT_PATH_CREATED = 78 VECTOR_PARAMETER_WRONG_BLOCK = 79 VECTOR_PARAMETER_WRONG_TYPE = 80 VECTOR_PARAMETER_WRONG_SIZE = 81 PRIORITY_DEFINED_FOR_ONLY_ONE_EVENT_HANDLER = 82 REPEATED_PRIORITY_VALUE = 83 DELAY_VARIABLE = 84 NEST_DELAY_DECORATOR_NOT_FOUND = 85 INPUT_PORT_SIZE_NOT_INTEGER = 86 INPUT_PORT_SIZE_NOT_GREATER_THAN_ZERO = 87 INSTALL_PATH_INFO = 88 CREATING_INSTALL_PATH = 89 CREATING_TARGET_PATH = 90 class Messages: """ This class contains a collection of error messages which enables a centralized maintaining and modifications of those. """ @classmethod def get_start_processing_file(cls, file_path): """ Returns a message indicating that processing of a file has started :param file_path: the path to the file :type file_path: str :return: message code tuple :rtype: (MessageCode,str) """ message = 'Start processing \'' + file_path + '\'!' return MessageCode.START_PROCESSING_FILE, message @classmethod def get_input_path_not_found(cls, path): message = 'Input path ("%s") not found!' % (path) return MessageCode.INPUT_PATH_NOT_FOUND, message @classmethod def get_unknown_target(cls, target): message = 'Unknown target ("%s")' % (target) return MessageCode.UNKNOWN_TARGET, message @classmethod def get_no_code_generated(cls): """ Returns a message indicating that no code will be generated on this run. :return: a message :rtype: (MessageCode,str) """ message = 'No target specified: no code will be generated' return MessageCode.NO_CODE_GENERATED, message @classmethod def get_lexer_error(cls): message = 'Error occurred during lexing: abort' return MessageCode.LEXER_ERROR, message @classmethod def get_could_not_determine_cond_based(cls, type_str, name): message = "Unable to determine based on type '" + type_str + "' of variable '" + name + "' whether conductance-based or current-based" return MessageCode.LEXER_ERROR, message @classmethod def get_parser_error(cls): message = 'Error occurred during parsing: abort' return MessageCode.PARSER_ERROR, message @classmethod def get_binary_operation_not_defined(cls, lhs, operator, rhs): message = 'Operation %s %s %s is not defined!' % (lhs, operator, rhs) return MessageCode.OPERATION_NOT_DEFINED, message @classmethod def get_binary_operation_type_could_not_be_derived(cls, lhs, operator, rhs, lhs_type, rhs_type): message = 'The type of the expression (left-hand side = \'%s\'; binary operator = \'%s\'; right-hand side = \'%s\') could not be derived: left-hand side has type \'%s\' whereas right-hand side has type \'%s\'!' % ( lhs, operator, rhs, lhs_type, rhs_type) return MessageCode.TYPE_MISMATCH, message @classmethod def get_unary_operation_not_defined(cls, operator, term): message = 'Operation %s%s is not defined!' % (operator, term) return MessageCode.OPERATION_NOT_DEFINED, message @classmethod def get_convolve_needs_buffer_parameter(cls): message = 'Convolve requires a buffer variable as second parameter!' return MessageCode.CONVOLVE_NEEDS_BUFFER_PARAMETER, message @classmethod def get_implicit_magnitude_conversion(cls, lhs, rhs, conversion_factor): message = 'Implicit magnitude conversion from %s to %s with factor %s ' % (lhs.print_symbol(), rhs.print_symbol(), conversion_factor) return MessageCode.IMPLICIT_CAST, message @classmethod def get_start_building_symbol_table(cls): """ Returns a message that the building for a neuron has been started. :return: a message :rtype: (MessageCode,str) """ return MessageCode.START_SYMBOL_TABLE_BUILDING, 'Start building symbol table!' @classmethod def get_function_call_implicit_cast(cls, arg_nr, function_call, expected_type, got_type, castable=False): """ Returns a message indicating that an implicit cast has been performed. :param arg_nr: the number of the argument which is cast :type arg_nr: int :param function_call: a single function call :type function_call: ast_function_call :param expected_type: the expected type :type expected_type: type_symbol :param got_type: the got-type :type got_type: TypeSymbol :param castable: is the type castable :type castable: bool :return: a message :rtype: (MessageCode,str) """ if not castable: message = str(arg_nr) + '. argument of function-call \'%s\' at is wrongly typed! Expected \'%s\',' \ ' found \'%s\'!' % (function_call.get_name(), got_type.print_symbol(), expected_type.print_symbol()) else: message = str(arg_nr) + '. argument of function-call \'%s\' is wrongly typed! ' \ 'Implicit cast from \'%s\' to \'%s\'.' % (function_call.get_name(), got_type.print_symbol(), expected_type.print_symbol()) return MessageCode.FUNCTION_CALL_TYPE_ERROR, message @classmethod def get_type_could_not_be_derived(cls, rhs): """ Returns a message indicating that the type of the rhs rhs could not be derived. :param rhs: an rhs :type rhs: ast_expression or ast_simple_expression :return: a message :rtype: (MessageCode,str) """ message = 'Type of \'%s\' could not be derived!' % rhs return MessageCode.TYPE_NOT_DERIVABLE, message @classmethod def get_implicit_cast_rhs_to_lhs(cls, rhs_type, lhs_type): """ Returns a message indicating that the type of the lhs does not correspond to the one of the rhs, but the rhs can be cast down to lhs type. :param rhs_type: the type of the rhs :type rhs_type: str :param lhs_type: the type of the lhs :type lhs_type: str :return: a message :rtype:(MessageCode,str) """ message = 'Implicit casting from (compatible) type \'%s\' to \'%s\'.' % (rhs_type, lhs_type) return MessageCode.IMPLICIT_CAST, message @classmethod def get_different_type_rhs_lhs(cls, rhs_expression, lhs_expression, rhs_type, lhs_type): """ Returns a message indicating that the type of the lhs does not correspond to the one of the rhs and can not be cast down to a common type. :param rhs_expression: the rhs rhs :type rhs_expression: ASTExpression or ASTSimpleExpression :param lhs_expression: the lhs rhs :type lhs_expression: ast_expression or ast_simple_expression :param rhs_type: the type of the rhs :type rhs_type: type_symbol :param lhs_type: the type of the lhs :type lhs_type: TypeSymbol :return: a message :rtype:(MessageCode,str) """ message = 'Type of lhs \'%s\' does not correspond to rhs \'%s\'! LHS: \'%s\', RHS: \'%s\'!' % ( lhs_expression, rhs_expression, lhs_type.print_symbol(), rhs_type.print_symbol()) return MessageCode.CAST_NOT_POSSIBLE, message @classmethod def get_type_different_from_expected(cls, expected_type, got_type): """ Returns a message indicating that the received type is different from the expected one. :param expected_type: the expected type :type expected_type: TypeSymbol :param got_type: the actual type :type got_type: type_symbol :return: a message :rtype: (MessageCode,str) """ from pynestml.symbols.type_symbol import TypeSymbol assert (expected_type is not None and isinstance(expected_type, TypeSymbol)), \ '(PyNestML.Utils.Message) Not a type symbol provided (%s)!' % type(expected_type) assert (got_type is not None and isinstance(got_type, TypeSymbol)), \ '(PyNestML.Utils.Message) Not a type symbol provided (%s)!' % type(got_type) message = 'Actual type different from expected. Expected: \'%s\', got: \'%s\'!' % ( expected_type.print_symbol(), got_type.print_symbol()) return MessageCode.TYPE_DIFFERENT_FROM_EXPECTED, message @classmethod def get_buffer_set_to_conductance_based(cls, buffer): """ Returns a message indicating that a buffer has been set to conductance based. :param buffer: the name of the buffer :type buffer: str :return: a message :rtype: (MessageCode,str) """ assert (buffer is not None and isinstance(buffer, str)), \ '(PyNestML.Utils.Message) Not a string provided (%s)!' % type(buffer) message = 'Buffer \'%s\' set to conductance based!' % buffer return MessageCode.BUFFER_SET_TO_CONDUCTANCE_BASED, message @classmethod def get_no_variable_found(cls, variable_name): """ Returns a message indicating that a variable has not been found. :param variable_name: the name of the variable :type variable_name: str :return: a message :rtype: (MessageCode,str) """ assert (variable_name is not None and isinstance(variable_name, str)), \ '(PyNestML.Utils.Message) Not a string provided (%s)!' % type(variable_name) message = 'No variable \'%s\' found!' % variable_name return MessageCode.NO_VARIABLE_FOUND, message @classmethod def get_input_port_type_not_defined(cls, input_port_name: str): """ Returns a message indicating that a input_port type has not been defined, thus nS is assumed. :param input_port_name: a input_port name :return: a message :rtype: (MessageCode,str) """ assert (input_port_name is not None and isinstance(input_port_name, str)), \ '(PyNestML.Utils.Message) Not a string provided (%s)!' % type(input_port_name) message = 'No type declared for spiking input port \'%s\'!' % input_port_name return MessageCode.SPIKE_INPUT_PORT_TYPE_NOT_DEFINED, message @classmethod def get_model_contains_errors(cls, model_name: str) -> Tuple[MessageCode, str]: """ Returns a message indicating that a model contains errors thus no code is generated. :param model_name: the name of the model :return: a message """ assert (model_name is not None and isinstance(model_name, str)), \ '(PyNestML.Utils.Message) Not a string provided (%s)!' % type(model_name) message = 'Model \'' + model_name + '\' contains errors. No code generated!' return MessageCode.MODEL_CONTAINS_ERRORS, message @classmethod def get_start_processing_model(cls, model_name: str) -> Tuple[MessageCode, str]: """ Returns a message indicating that the processing of a model is started. :param model_name: the name of the model :return: a message """ assert (model_name is not None and isinstance(model_name, str)), \ '(PyNestML.Utils.Message) Not a string provided (%s)!' % type(model_name) message = 'Starts processing of the model \'' + model_name + '\'' return MessageCode.START_PROCESSING_MODEL, message @classmethod def get_code_generated(cls, model_name, path): """ Returns a message indicating that code has been successfully generated for a neuron in a certain path. :param model_name: the name of the neuron. :type model_name: str :param path: the path to the file :type path: str :return: a message :rtype: (MessageCode,str) """ assert (model_name is not None and isinstance(model_name, str)), \ '(PyNestML.Utils.Message) Not a string provided (%s)!' % type(model_name) assert (path is not None and isinstance(path, str)), \ '(PyNestML.Utils.Message) Not a string provided (%s)!' % type(path) message = 'Successfully generated code for the model: \'' + model_name + '\' in: \'' + path + '\' !' return MessageCode.CODE_SUCCESSFULLY_GENERATED, message @classmethod def get_module_generated(cls, path): """ Returns a message indicating that a module has been successfully generated. :param path: the path to the generated file :type path: str :return: a message :rtype: (MessageCode,str) """ assert (path is not None and isinstance(path, str)), \ '(PyNestML.Utils.Message) Not a string provided (%s)!' % type(path) message = 'Successfully generated NEST module code in \'' + path + '\' !' return MessageCode.MODULE_SUCCESSFULLY_GENERATED, message @classmethod def get_variable_used_before_declaration(cls, variable_name): """ Returns a message indicating that a variable is used before declaration. :param variable_name: a variable name :type variable_name: str :return: a message :rtype: (MessageCode,str) """ assert (variable_name is not None and isinstance(variable_name, str)), \ '(PyNestML.Utils.Message) Not a string provided (%s)!' % type(variable_name) message = 'Variable \'%s\' used before declaration!' % variable_name return MessageCode.VARIABLE_USED_BEFORE_DECLARATION, message @classmethod def get_variable_not_defined(cls, variable_name): """ Returns a message indicating that a variable is not defined . :param variable_name: a variable name :type variable_name: str :return: a message :rtype: (MessageCode,str) """ assert (variable_name is not None and isinstance(variable_name, str)), \ '(PyNestML.Utils.Message) Not a string provided (%s)!' % type(variable_name) message = 'Variable \'%s\' not defined!' % variable_name return MessageCode.NO_VARIABLE_FOUND, message @classmethod def get_variable_defined_recursively(cls, variable_name): """ Returns a message indicating that a variable is defined recursively. :param variable_name: a variable name :type variable_name: str :return: a message :rtype: (MessageCode,str) """ assert (variable_name is not None and isinstance(variable_name, str)), \ '(PyNestML.Utils.Message) Not a string provided (%s)!' % type(variable_name) message = 'Variable \'%s\' defined recursively!' % variable_name return MessageCode.VARIABLE_DEFINED_RECURSIVELY, message @classmethod def get_value_assigned_to_buffer(cls, buffer_name): """ Returns a message indicating that a value has been assigned to a buffer. :param buffer_name: a buffer name :type buffer_name: str :return: a message :rtype: (MessageCode,str) """ assert (buffer_name is not None and isinstance(buffer_name, str)), \ '(PyNestML.Utils.Message) Not a string provided (%s)!' % type(buffer_name) message = 'Value assigned to buffer \'%s\'!' % buffer_name return MessageCode.VALUE_ASSIGNED_TO_BUFFER, message @classmethod def get_first_arg_not_kernel_or_equation(cls, func_name): """ Indicates that the first argument of an rhs is not an equation or kernel. :param func_name: the name of the function :type func_name: str :return: a message :rtype: (MessageCode,str) """ assert (func_name is not None and isinstance(func_name, str)), \ '(PyNestML.Utils.Message) Not a string provided (%s)!' % type(func_name) message = 'First argument of \'%s\' not a kernel or equation!' % func_name return MessageCode.ARG_NOT_KERNEL_OR_EQUATION, message @classmethod def get_second_arg_not_a_spike_port(cls, func_name: str) -> Tuple[MessageCode, str]: """ Indicates that the second argument of the NESTML convolve() call is not a spiking input port. :param func_name: the name of the function :return: a message """ assert (func_name is not None and isinstance(func_name, str)), \ '(PyNestML.Utils.Message) Not a string provided (%s)!' % type(func_name) message = 'Second argument of \'%s\' not a spiking input port!' % func_name return MessageCode.ARG_NOT_SPIKE_INPUT, message @classmethod def get_wrong_numerator(cls, unit): """ Indicates that the numerator of a unit is not 1. :param unit: the name of the unit :type unit: str :return: a message :rtype: (MessageCode,str) """ assert (unit is not None and isinstance(unit, str)), \ '(PyNestML.Utils.Message) Not a string provided (%s)!' % type(unit) message = 'Numeric numerator of unit \'%s\' not 1!' % unit return MessageCode.NUMERATOR_NOT_ONE, message @classmethod def get_order_not_declared(cls, lhs): """ Indicates that the order has not been declared. :param lhs: the name of the variable :type lhs: str :return: a message :rtype: (MessageCode,str) """ assert (lhs is not None and isinstance(lhs, str)), \ '(PyNestML.Utils.Message) Not a string provided (%s)!' % lhs message = 'Order of differential equation for %s is not declared!' % lhs return MessageCode.ORDER_NOT_DECLARED, message @classmethod def get_continuous_input_port_specified(cls, name, keyword): """ Indicates that the continuous time input port has been specified with an `inputQualifier` keyword. :param name: the name of the buffer :type name: str :param keyword: the keyword :type keyword: list(str) :return: a message :rtype: (MessageCode,str) """ assert (name is not None and isinstance(name, str)), \ '(PyNestML.Utils.Message) Not a string provided (%s)!' % name message = 'Continuous time input port \'%s\' specified with type keywords (%s)!' % (name, keyword) return MessageCode.CONTINUOUS_INPUT_PORT_WITH_QUALIFIERS, message @classmethod def get_block_not_defined_correctly(cls, block, missing): """ Indicates that a given block has been defined several times or non. :param block: the name of the block which is not defined or defined multiple times. :type block: str :param missing: True if missing, False if multiple times. :type missing: bool :return: a message :rtype: (MessageCode,str) """ assert (block is not None and isinstance(block, str)), \ '(PyNestML.Utils.Message) Not a string provided (%s)!' % type(block) assert (missing is not None and isinstance(missing, bool)), \ '(PyNestML.Utils.Message) Not a bool provided (%s)!' % type(missing) if missing: message = block + ' block not defined!' else: message = block + ' block defined more than once!' return MessageCode.BLOCK_NOT_CORRECT, message @classmethod def get_equation_var_not_in_state_block(cls, variable_name): """ Indicates that a variable in the equations block is not defined in the state block. :param variable_name: the name of the variable of an equation which is not defined in an equations block :type variable_name: str :return: a message :rtype: (MessageCode,str) """ assert (variable_name is not None and isinstance(variable_name, str)), \ '(PyNestML.Utils.Message) Not a string provided (%s)!' % type(variable_name) message = 'Ode equation lhs-variable \'%s\' not defined in state block!' % variable_name return MessageCode.VARIABLE_NOT_IN_STATE_BLOCK, message @classmethod def get_wrong_number_of_args(cls, function_call, expected, got): """ Indicates that a wrong number of arguments has been provided to the function call. :param function_call: a function call name :type function_call: str :param expected: the expected number of arguments :type expected: int :param got: the given number of arguments :type got: int :return: a message :rtype: (MessageCode,str) """ assert (function_call is not None and isinstance(function_call, str)), \ '(PyNestML.Utils.Message) Not a string provided (%s)!' % type(function_call) assert (expected is not None and isinstance(expected, int)), \ '(PyNestML.Utils.Message) Not a int provided (%s)!' % type(expected) assert (got is not None and isinstance(got, int)), \ '(PyNestML.Utils.Message) Not a string provided (%s)!' % type(got) message = 'Wrong number of arguments in function-call \'%s\'! Expected \'%s\', found \'%s\'.' % ( function_call, expected, got) return MessageCode.WRONG_NUMBER_OF_ARGS, message @classmethod def get_no_rhs(cls, name): """ Indicates that no right-hand side has been declared for the given variable. :param name: the name of the rhs variable :type name: str :return: a message :rtype: (MessageCode,str) """ assert (name is not None and isinstance(name, str)), \ '(PyNestML.Utils.Message) Not a string provided (%s)!' % type(name) message = 'Function variable \'%s\' has no right-hand side!' % name return MessageCode.NO_RHS, message @classmethod def get_several_lhs(cls, names): """ Indicates that several left hand sides have been defined. :param names: a list of variables :type names: list(str) :return: a message :rtype: (MessageCode,str) """ assert (names is not None and isinstance(names, list)), \ '(PyNestML.Utils.Message) Not a list provided (%s)!' % type(names) message = 'Function declared with several variables (%s)!' % names return MessageCode.SEVERAL_LHS, message @classmethod def get_function_redeclared(cls, name, predefined): """ Indicates that a function has been redeclared. :param name: the name of the function which has been redeclared. :type name: str :param predefined: True if function is predefined, otherwise False. :type predefined: bool :return: a message :rtype:(MessageCode,str) """ assert (name is not None and isinstance(name, str)), \ '(PyNestML.Utils.Message) Not a string provided (%s)!' % type(name) if predefined: message = 'Predefined function \'%s\' redeclared!' % name else: message = 'Function \'%s\' redeclared!' % name return MessageCode.FUNCTION_REDECLARED, message @classmethod def get_no_ode(cls, name): """ Indicates that no ODE has been defined for a variable inside the state block. :param name: the name of the variable which does not have a defined ode :type name: str :return: a message :rtype: (MessageCode,str) """ assert (name is not None and isinstance(name, str)), \ '(PyNestML.Utils.Message) Not a string provided (%s)!' % type(name) message = 'Variable \'%s\' not provided with an ODE!' % name return MessageCode.NO_ODE, message @classmethod def get_no_init_value(cls, name): """ Indicates that no initial value has been provided for a given variable. :param name: the name of the variable which does not have a initial value :type name: str :return: a message :rtype: (MessageCode,str) """ assert (name is not None and isinstance(name, str)), \ '(PyNestML.Utils.Message) Not a string provided (%s)!' % type(name) message = 'Initial value of ode variable \'%s\' not provided!' % name return MessageCode.NO_INIT_VALUE, message @classmethod def get_model_redeclared(cls, name: str) -> Tuple[MessageCode, str]: """ Indicates that a model has been redeclared. :param name: the name of the model which has been redeclared. :return: a message """ assert (name is not None and isinstance(name, str)), \ '(PyNestML.Utils.Message) Not a string provided (%s)!' % type(name) assert (name is not None and isinstance(name, str)), \ '(PyNestML.Utils.Message) Not a string provided (%s)!' % type(name) message = 'model \'%s\' redeclared!' % name return MessageCode.MODEL_REDECLARED, message @classmethod def get_nest_collision(cls, name): """ Indicates that a collision between a user defined function and a nest function occurred. :param name: the name of the function which collides to nest :type name: str :return: a message :rtype: (MessageCode,str) """ assert (name is not None and isinstance(name, str)), \ '(PyNestML.Utils.Message) Not a string provided (%s)!' % type(name) message = 'Function \'%s\' collides with NEST namespace!' % name return MessageCode.NEST_COLLISION, message @classmethod def get_kernel_outside_convolve(cls, name): """ Indicates that a kernel variable has been used outside a convolve call. :param name: the name of the kernel :type name: str :return: message :rtype: (MessageCode,str) """ assert (name is not None and isinstance(name, str)), \ '(PyNestML.Utils.Message) Not a string provided (%s)!' % type(name) message = 'Kernel \'%s\' used outside convolve!' % name return MessageCode.KERNEL_OUTSIDE_CONVOLVE, message @classmethod def get_compilation_unit_name_collision(cls, name, art1, art2): """ Indicates that a name collision with the same neuron inside two artifacts. :param name: the name of the neuron which leads to collision :type name: str :param art1: the first artifact name :type art1: str :param art2: the second artifact name :type art2: str :return: a message :rtype: (MessageCode,str) """ assert (name is not None and isinstance(name, str)), \ '(PyNestML.Utils.Message) Not a string provided (%s)!' % type(name) assert (art1 is not None and isinstance(art1, str)), \ '(PyNestML.Utils.Message) Not a string provided (%s)!' % type(art1) assert (art2 is not None and isinstance(art2, str)), \ '(PyNestML.Utils.Message) Not a string provided (%s)!' % type(art2) message = 'Name collision of \'%s\' in \'%s\' and \'%s\'!' % (name, art1, art2) return MessageCode.NAME_COLLISION, message @classmethod def get_data_type_not_specified(cls, name): """ Indicates that for a given element no type has been specified. :param name: the name of the variable for which a type has not been specified. :type name: str :return: a message :rtype: (MessageCode,str) """ assert (name is not None and isinstance(name, str)), \ '(PyNestML.Utils.Message) Not a string provided (%s)!' % type(name) message = 'Data type of \'%s\' at not specified!' % name return MessageCode.TYPE_NOT_SPECIFIED, message @classmethod def get_not_type_allowed(cls, name): """ Indicates that a type for the given element is not allowed. :param name: the name of the element for which a type is not allowed. :type name: str :return: a message :rtype: (MessageCode,str) """ assert (name is not None and isinstance(name, str)), \ '(PyNestML.Utils.Message) Not a string provided (%s)!' % type(name) message = 'No data type allowed for \'%s\'!' % name return MessageCode.NO_TYPE_ALLOWED, message @classmethod def get_assignment_not_allowed(cls, name): """ Indicates that an assignment to the given element is not allowed. :param name: the name of variable to which an assignment is not allowed. :type name: str :return: a message :rtype: (MessageCode,str) """ assert (name is not None and isinstance(name, str)), \ '(PyNestML.Utils.Message) Not a string provided (%s)!' % type(name) message = 'Assignment to \'%s\' not allowed!' % name return MessageCode.NO_ASSIGNMENT_ALLOWED, message @classmethod def get_not_a_variable(cls, name): """ Indicates that a given name does not represent a variable. :param name: the name of the variable :type name: str :return: a message :rtype: (MessageCode,str) """ assert (name is not None and isinstance(name, str)), \ '(PyNestML.Utils.Message) Not a string provided (%s)!' % type(name) message = '\'%s\' not a variable!' % name return MessageCode.NOT_A_VARIABLE, message @classmethod def get_multiple_keywords(cls, keyword): """ Indicates that a buffer has been declared with multiple keywords of the same type, e.g., inhibitory inhibitory :param keyword: the keyword which has been used multiple times :type keyword: str :return: a message :rtype: (MessageCode,str) """ assert (keyword is not None and isinstance(keyword, str)), \ '(PyNestML.Utils.Message) Not a string provided (%s)!' % type(keyword) message = 'Buffer specified with multiple \'%s\' keywords!' % keyword return MessageCode.MULTIPLE_KEYWORDS, message @classmethod def get_vector_in_non_vector(cls, vector, non_vector): """ Indicates that a vector has been used in a non-vector declaration. :param vector: the vector variable :type vector: str :param non_vector: the non-vector lhs :type non_vector: list(str) :return: a message :rtype: (MessageCode,str) """ assert (vector is not None and isinstance(vector, str)), \ '(PyNestML.Utils.Message) Not a string provided (%s)!' % type(vector) assert (non_vector is not None and isinstance(non_vector, list)), \ '(PyNestML.Utils.Message) Not a string provided (%s)!' % type(non_vector) message = 'Vector value \'%s\' used in a non-vector declaration of variables \'%s\'!' % (vector, non_vector) return MessageCode.VECTOR_IN_NON_VECTOR, message @classmethod def get_variable_redeclared(cls, name, predefined): """ Indicates that a given variable has been redeclared. A redeclaration can happen with user defined functions or with predefined functions (second parameter). :param name: the name of the variable :type name: str :param predefined: True if a pre-defined variable has been redeclared, otherwise False. :type predefined: bool :return: a message :rtype: (MessageCode,str) """ assert (name is not None and isinstance(name, str)), \ '(PyNestML.Utils.Message) Not a string provided (%s)!' % type(name) assert (predefined is not None and isinstance(predefined, bool)), \ '(PyNestML.Utils.Message) Not a bool provided (%s)!' % type(predefined) if predefined: message = 'Predefined variable \'%s\' redeclared!' % name else: message = 'Variable \'%s\' redeclared !' % name return MessageCode.VARIABLE_REDECLARED, message @classmethod def get_no_return(cls): """ Indicates that a given function has no return statement although required. :return: a message :rtype: (MessageCode,str) """ message = 'Return statement expected!' return MessageCode.NO_RETURN, message @classmethod def get_not_last_statement(cls, name): """ Indicates that given statement is not the last statement in a block, e.g., in the case that a return statement is not the last statement. :param name: the statement. :type name: str :return: a message :rtype: (MessageCode,str) """ assert (name is not None and isinstance(name, str)), \ '(PyNestML.Utils.Message) Not a string provided (%s)!' % type(name) message = '\'%s\' not the last statement!' % name return MessageCode.NOT_LAST_STATEMENT, message @classmethod def get_function_not_declared(cls, name): """ Indicates that a function, which is not declared, has been used. :param name: the name of the function. :type name: str :return: a message :rtype: (MessageCode,str) """ assert (name is not None and isinstance(name, str)), \ '(PyNestML.Utils.Message) Not a string provided (%s)!' % type(name) message = 'Function \'%s\' is not declared!' % name return MessageCode.FUNCTION_NOT_DECLARED, message @classmethod def get_could_not_resolve(cls, name): """ Indicates that the handed over name could not be resolved to a symbol. :param name: the name which could not be resolved :type name: str :return: a message :rtype: (MessageCode,str) """ assert (name is not None and isinstance(name, str)), \ '(PyNestML.Utils.Message) Not a string provided (%s)!' % type(name) message = 'Could not resolve symbol \'%s\'!' % name return MessageCode.SYMBOL_NOT_RESOLVED, message @classmethod def get_neuron_solved_by_solver(cls, name): """ Indicates that a neuron will be solved by the GSL solver inside the model printing process without any modifications to the initial model. :param name: the name of the neuron :type name: str :return: a message :rtype: (MessageCode,str) """ assert (name is not None and isinstance(name, str)), \ '(PyNestML.Utils.Message) Not a string provided (%s)!' % type(name) message = 'The neuron \'%s\' will be solved numerically with GSL solver without modification!' % name return MessageCode.NEURON_SOLVED_BY_GSL, message @classmethod def get_synapse_solved_by_solver(cls, name): """ Indicates that a synapse will be solved by the GSL solver inside the model printing process without any modifications to the initial model. :param name: the name of the synapse :type name: str :return: a message :rtype: (MessageCode,str) """ assert (name is not None and isinstance(name, str)), \ '(PyNestML.Utils.Message) Not a string provided (%s)!' % type(name) message = 'The synapse \'%s\' will be solved numerically with GSL solver without modification!' % name return MessageCode.SYNAPSE_SOLVED_BY_GSL, message @classmethod def get_could_not_be_solved(cls): """ Indicates that the set of equations could not be solved and will remain unchanged. :return: a message :rtype: (MessageCode,str) """ message = 'Equations or kernels could not be solved. The model remains unchanged!' return MessageCode.NEURON_ANALYZED, message @classmethod def get_equations_solved_exactly(cls): """ Indicates that all equations of the neuron are solved exactly by the solver script. :return: a message :rtype: (MessageCode,str) """ message = 'Equations are solved exactly!' return MessageCode.NEURON_ANALYZED, message @classmethod def get_equations_solved_by_gls(cls): """ Indicates that the set of ODEs as contained in the model will be solved by the gnu scientific library toolchain. :return: a message :rtype: (MessageCode,str) """ message = 'Kernels will be solved with GLS!' return MessageCode.NEURON_ANALYZED, message @classmethod def get_ode_solution_not_used(cls): """ Indicates that an ode has been defined in the model but is not used as part of the neurons solution. :return: a message :rtype: (MessageCode,str) """ message = 'The model has defined an ODE. But its solution is not used in the update state.' return MessageCode.NEURON_ANALYZED, message @classmethod def get_unit_does_not_exist(cls, name): """ Indicates that a unit does not exist. :param name: the name of the unit. :type name: str :return: a new code,message tuple :rtype: (MessageCode,str) """ assert (name is not None and isinstance(name, str)), \ '(PyNestML.Utils.Message) Not a string provided (%s)!' % type(name) message = 'Unit does not exist (%s).' % name return MessageCode.NO_UNIT, message @classmethod def get_not_neuroscience_unit_used(cls, name): """ Indicates that a non-neuroscientific unit, e.g., kg, has been used. Those units can not be converted to a corresponding representation in the simulation and are therefore represented by the factor 1. :param name: the name of the variable :type name: str :return: a nes code,message tuple :rtype: (MessageCode,str) """ assert (name is not None and isinstance(name, str)), \ '(PyNestML.Utils.Message) Not a string provided (%s)!' % type(name) message = 'Not convertible unit \'%s\' used, 1 assumed as factor!' % name return MessageCode.NOT_NEUROSCIENCE_UNIT, message @classmethod def get_ode_needs_consistent_units(cls, name, differential_order, lhs_type, rhs_type): assert (name is not None and isinstance(name, str)), \ '(PyNestML.Utils.Message) Not a string provided (%s)!' % type(name) message = 'ODE definition for \'' if differential_order > 1: message += 'd^' + str(differential_order) + ' ' + name + ' / dt^' + str(differential_order) + '\'' if differential_order > 0: message += 'd ' + name + ' / dt\'' else: message += '\'' + str(name) + '\'' message += ' has inconsistent units: expected \'' + lhs_type.print_symbol() + '\', got \'' + \ rhs_type.print_symbol() + '\'' return MessageCode.ODE_NEEDS_CONSISTENT_UNITS, message @classmethod def get_ode_function_needs_consistent_units(cls, name, declared_type, expression_type): assert (name is not None and isinstance(name, str)), \ '(PyNestML.Utils.Message) Not a string provided (%s)!' % type(name) message = 'ODE function definition for \'' + name + '\' has inconsistent units: expected \'' + \ declared_type.print_symbol() + '\', got \'' + expression_type.print_symbol() + '\'' return MessageCode.ODE_FUNCTION_NEEDS_CONSISTENT_UNITS, message @classmethod def get_variable_with_same_name_as_type(cls, name): """ Indicates that a variable has been declared with the same name as a physical unit, e.g. "V mV" :param name: the name of the variable :type name: str :return: a tuple containing message code and message text :rtype: (MessageCode,str) """ assert (name is not None and isinstance(name, str)), \ '(PyNestML.Utils.Message) Not a string provided (%s)!' % type(name) message = 'Variable \'%s\' has the same name as a physical unit!' % name return MessageCode.VARIABLE_WITH_SAME_NAME_AS_UNIT, message @classmethod def get_analysing_transforming_neuron(cls, name): """ Indicates start of code generation :param name: the name of the neuron model :type name: ASTNeuron :return: a nes code,message tuple :rtype: (MessageCode,str) """ assert (name is not None and isinstance(name, str)), \ '(PyNestML.Utils.Message) Not a string provided (%s)!' % type(name) message = 'Analysing/transforming neuron \'%s\'' % name return MessageCode.ANALYSING_TRANSFORMING_NEURON, message @classmethod def templated_arg_types_inconsistent(cls, function_name, failing_arg_idx, other_args_idx, failing_arg_type_str, other_type_str): """ For templated function arguments, indicates inconsistency between (formal) template argument types and actual derived types. :param name: the name of the neuron model :type name: ASTNeuron :return: a nes code,message tuple :rtype: (MessageCode,str) """ message = 'In function \'' + function_name + '\': actual derived type of templated parameter ' + \ str(failing_arg_idx + 1) + ' is \'' + failing_arg_type_str + '\', which is inconsistent with that of parameter(s) ' + \ ', '.join([str(_ + 1) for _ in other_args_idx]) + ', which have type \'' + other_type_str + '\'' return MessageCode.TEMPLATED_ARG_TYPES_INCONSISTENT, message @classmethod def delta_function_cannot_be_mixed(cls): """ Delta function cannot be mixed with expressions. """ message = "delta function cannot be mixed with expressions; please instead perform these operations on the convolve() function where this kernel is used" return MessageCode.DELTA_FUNCTION_CANNOT_BE_MIXED, message @classmethod def delta_function_one_arg(cls, deltafunc): """ Delta function takes exactly one argument. :param deltafunc: the delta function node :type name: ASTFunctionCall """ message = "delta function takes exactly one argument (time *t*); instead found " + ", ".join([ str(arg) for arg in deltafunc.get_args()]) return MessageCode.DELTA_FUNCTION_CANNOT_BE_MIXED, message @classmethod def unknown_type(cls, provided_type_str): """ Unknown type or unit literal. :param provided_type_str: the provided type as a string :type provided_type_str: str """ message = "Unknown type or unit literal: " + provided_type_str return MessageCode.UNKNOWN_TYPE, message @classmethod def astdatatype_type_symbol_could_not_be_derived(cls): """ Unknown type or unit literal. :param provided_type_str: the provided type as a string :type provided_type_str: str """ message = "ASTDataType type symbol could not be derived" return MessageCode.ASTDATATYPE_TYPE_SYMBOL_COULD_NOT_BE_DERIVED, message @classmethod def get_emit_spike_function_but_no_output_port(cls): """ Indicates that an emit_spike() function was called, but no spiking output port has been defined. :return: a (code, message) tuple :rtype: (MessageCode, str) """ message = 'emit_spike() function was called, but no spiking output port has been defined!' return MessageCode.EMIT_SPIKE_FUNCTION_BUT_NO_OUTPUT_PORT, message @classmethod def get_kernel_wrong_type(cls, kernel_name: str, differential_order: int, actual_type: str) -> Tuple[MessageCode, str]: """ Returns a message indicating that the type of a kernel is wrong. :param kernel_name: the name of the kernel :param differential_order: differential order of the kernel left-hand side, e.g. 2 if the kernel is g'' :param actual_type: the name of the actual type that was found in the model """ assert (kernel_name is not None and isinstance(kernel_name, str)), \ '(PyNestML.Utils.Message) Not a string provided (%s)!' % type(kernel_name) if differential_order == 0: expected_type_str = "real or int" else: assert differential_order > 0 expected_type_str = "s**-%d" % differential_order message = 'Kernel \'%s\' was found to be of type \'%s\' (should be %s)!' % ( kernel_name, actual_type, expected_type_str) return MessageCode.KERNEL_WRONG_TYPE, message @classmethod def get_kernel_iv_wrong_type(cls, iv_name: str, actual_type: str, expected_type: str) -> Tuple[MessageCode, str]: """ Returns a message indicating that the type of a kernel initial value is wrong. :param iv_name: the name of the state variable with an initial value :param actual_type: the name of the actual type that was found in the model :param expected_type: the name of the type that was expected """ message = 'Initial value \'%s\' was found to be of type \'%s\' (should be %s)!' % (iv_name, actual_type, expected_type) return MessageCode.KERNEL_IV_WRONG_TYPE, message @classmethod def get_no_files_in_input_path(cls, path: str): message = "No files found matching '*.nestml' in provided input path '" + path + "'" return MessageCode.NO_FILES_IN_INPUT_PATH, message @classmethod def get_state_variables_not_initialized(cls, var_name: str): message = "The variable \'%s\' is not initialized." % var_name return MessageCode.STATE_VARIABLES_NOT_INITIALZED, message @classmethod def get_equations_defined_but_integrate_odes_not_called(cls): message = "Equations defined but integrate_odes() not called" return MessageCode.EQUATIONS_DEFINED_BUT_INTEGRATE_ODES_NOT_CALLED, message @classmethod def get_template_root_path_created(cls, templates_root_dir: str): message = "Given template root path is not an absolute path. " \ "Creating the absolute path with default templates directory '" + templates_root_dir + "'" return MessageCode.TEMPLATE_ROOT_PATH_CREATED, message @classmethod def get_vector_parameter_wrong_block(cls, var, block): message = "The vector parameter '" + var + "' is declared in the wrong block '" + block + "'. " \ "The vector parameter can only be declared in parameters or internals block." return MessageCode.VECTOR_PARAMETER_WRONG_BLOCK, message @classmethod def get_vector_parameter_wrong_type(cls, var): message = "The vector parameter '" + var + "' is of the wrong type. " \ "The vector parameter can be only of type integer." return MessageCode.VECTOR_PARAMETER_WRONG_TYPE, message @classmethod def get_vector_parameter_wrong_size(cls, var, value): message = "The vector parameter '" + var + "' has value '" + value + "' " \ "which is less than or equal to 0." return MessageCode.VECTOR_PARAMETER_WRONG_SIZE, message @classmethod def get_priority_defined_for_only_one_receive_block(cls, event_handler_port_name: str): message = "Priority defined for only one event handler (" + event_handler_port_name + ")" return MessageCode.PRIORITY_DEFINED_FOR_ONLY_ONE_EVENT_HANDLER, message @classmethod def get_repeated_priorty_value(cls): message = "Priority values for event handlers need to be unique" return MessageCode.REPEATED_PRIORITY_VALUE, message @classmethod def get_function_is_delay_variable(cls, func): message = "Function '" + func + "' is not a function but a delay variable." return MessageCode.DELAY_VARIABLE, message @classmethod def get_nest_delay_decorator_not_found(cls): message = "To generate code for NEST Simulator, at least one parameter in the model should be decorated with the ``@nest::delay`` keyword." return MessageCode.NEST_DELAY_DECORATOR_NOT_FOUND, message @classmethod def get_input_port_size_not_integer(cls, port_name: str): message = "The size of the input port " + port_name + " is not of integer type." return MessageCode.INPUT_PORT_SIZE_NOT_INTEGER, message @classmethod def get_input_port_size_not_greater_than_zero(cls, port_name: str): message = "The size of the input port " + port_name + " must be greater than zero." return MessageCode.INPUT_PORT_SIZE_NOT_GREATER_THAN_ZERO, message @classmethod def get_target_path_info(cls, target_dir: str): message = "Target platform code will be generated in directory: '" + target_dir + "'" return MessageCode.TARGET_PATH_INFO, message @classmethod def get_install_path_info(cls, install_path: str): message = "Target platform code will be installed in directory: '" + install_path + "'" return MessageCode.INSTALL_PATH_INFO, message @classmethod def get_creating_target_path(cls, target_path: str): message = "Creating target directory: '" + target_path + "'" return MessageCode.CREATING_TARGET_PATH, message @classmethod def get_creating_install_path(cls, install_path: str): message = "Creating installation directory: '" + install_path + "'" return MessageCode.CREATING_INSTALL_PATH, message
PypiClean
/MUSE_OS-1.0.2.tar.gz/MUSE_OS-1.0.2/src/muse/sectors/sector.py
from __future__ import annotations from typing import ( Any, Callable, Iterator, Mapping, Optional, Sequence, Text, Tuple, Union, cast, ) import pandas as pd import xarray as xr from muse.agents import AbstractAgent from muse.production import PRODUCTION_SIGNATURE from muse.sectors.abstract import AbstractSector from muse.sectors.register import register_sector from muse.sectors.subsector import Subsector @register_sector(name="default") class Sector(AbstractSector): # type: ignore """Base class for all sectors.""" @classmethod def factory(cls, name: Text, settings: Any) -> Sector: from muse.interactions import factory as interaction_factory from muse.outputs.sector import factory as ofactory from muse.production import factory as pfactory from muse.readers import read_timeslices from muse.readers.toml import read_technodata from muse.utilities import nametuple_to_dict sector_settings = getattr(settings.sectors, name)._asdict() for attribute in ("name", "type", "priority", "path"): sector_settings.pop(attribute, None) timeslices = read_timeslices( sector_settings.pop("timeslice_levels", None) ).get_index("timeslice") technologies = read_technodata(settings, name, settings.time_framework) if "subsectors" not in sector_settings: raise RuntimeError(f"Missing 'subsectors' section in sector {name}") if len(sector_settings["subsectors"]._asdict()) == 0: raise RuntimeError(f"Empty 'subsectors' section in sector {name}") subsectors = [ Subsector.factory( subsec_settings, technologies, regions=settings.regions, current_year=int(min(settings.time_framework)), name=subsec_name, ) for subsec_name, subsec_settings in sector_settings.pop("subsectors") ._asdict() .items() ] are_disjoint_commodities = sum((len(s.commodities) for s in subsectors)) == len( set().union(*(set(s.commodities) for s in subsectors)) # type: ignore ) if not are_disjoint_commodities: raise RuntimeError("Subsector commodities are not disjoint") outputs = ofactory(*sector_settings.pop("outputs", []), sector_name=name) supply_args = sector_settings.pop( "supply", sector_settings.pop("dispatch_production", {}) ) if isinstance(supply_args, Text): supply_args = {"name": supply_args} else: supply_args = nametuple_to_dict(supply_args) supply = pfactory(**supply_args) interactions = interaction_factory(sector_settings.pop("interactions", None)) for attr in ("technodata", "commodities_out", "commodities_in"): sector_settings.pop(attr, None) return cls( name, technologies, subsectors=subsectors, timeslices=timeslices, supply_prod=supply, outputs=outputs, interactions=interactions, **sector_settings, ) def __init__( self, name: Text, technologies: xr.Dataset, subsectors: Sequence[Subsector] = [], timeslices: Optional[pd.MultiIndex] = None, technodata_timeslices: xr.Dataset = None, interactions: Optional[Callable[[Sequence[AbstractAgent]], None]] = None, interpolation: Text = "linear", outputs: Optional[Callable] = None, supply_prod: Optional[PRODUCTION_SIGNATURE] = None, ): from muse.interactions import factory as interaction_factory from muse.outputs.sector import factory as ofactory from muse.production import maximum_production self.name: Text = name """Name of the sector.""" self.subsectors: Sequence[Subsector] = list(subsectors) """Subsectors controlled by this object.""" self.technologies: xr.Dataset = technologies """Parameters describing the sector's technologies.""" self.timeslices: Optional[pd.MultiIndex] = timeslices """Timeslice at which this sector operates. If None, it will operate using the timeslice of the input market. """ self.interpolation: Mapping[Text, Any] = { "method": interpolation, "kwargs": {"fill_value": "extrapolate"}, } """Interpolation method and arguments when computing years.""" if interactions is None: interactions = interaction_factory() self.interactions = interactions """Interactions between agents. Called right before computing new investments, this function should manage any interactions between agents, e.g. passing assets from *new* agents to *retro* agents, and maket make-up from *retro* to *new*. Defaults to doing nothing. The function takes the sequence of agents as input, and returns nothing. It is expected to modify the agents in-place. See Also -------- :py:mod:`muse.interactions` contains MUSE's base interactions """ self.outputs: Callable = ( cast(Callable, ofactory()) if outputs is None else outputs ) """A function for outputing data for post-mortem analysis.""" self.supply_prod = ( supply_prod if supply_prod is not None else maximum_production ) """ Computes production as used to return the supply to the MCA. It can be anything registered with :py:func:`@register_production<muse.production.register_production>`. """ @property def forecast(self): """Maximum forecast horizon across agents. If no agents with a "forecast" attribute are found, defaults to 5. It cannot be lower than 1 year. """ forecasts = [ getattr(agent, "forecast") for agent in self.agents if hasattr(agent, "forecast") ] if len(forecasts) == 0: return 5 return max(1, max(forecasts)) def next( self, mca_market: xr.Dataset, time_period: Optional[int] = None, current_year: Optional[int] = None, ) -> xr.Dataset: """Advance sector by one time period. Args: mca_market: Market with ``demand``, ``supply``, and ``prices``. time_period: Length of the time period in the framework. Defaults to the range of ``mca_market.year``. Returns: A market containing the ``supply`` offered by the sector, it's attendant ``consumption`` of fuels and materials and the associated ``costs``. """ from logging import getLogger if time_period is None: time_period = int(mca_market.year.max() - mca_market.year.min()) if current_year is None: current_year = int(mca_market.year.min()) getLogger(__name__).info(f"Running {self.name} for year {current_year}") # > to sector timeslice market = self.convert_market_timeslice( mca_market.sel( commodity=self.technologies.commodity, region=self.technologies.region ).interp( year=sorted( { current_year, current_year + time_period, current_year + self.forecast, } ), **self.interpolation, ), self.timeslices, ) # > agent interactions self.interactions(list(self.agents)) # > investment years = sorted( set( market.year.data.tolist() + self.capacity.installed.data.tolist() + self.technologies.year.data.tolist() ) ) technologies = self.technologies.interp(year=years, **self.interpolation) for subsector in self.subsectors: subsector.invest( technologies, market, time_period=time_period, current_year=current_year ) # > output to mca output_data = self.market_variables(market, technologies) # < output to mca self.outputs(output_data, self.capacity, technologies) # > to mca timeslices result = output_data.copy(deep=True) if "dst_region" in result: exclude = ["dst_region", "commodity", "year", "timeslice"] prices = market.prices.expand_dims(dst_region=market.prices.region.values) sup, prices = xr.broadcast(result.supply, prices) sup = sup.fillna(0.0) con, prices = xr.broadcast(result.consumption, prices) con = con.fillna(0.0) supply = result.supply.sum("region").rename(dst_region="region") consumption = con.sum("dst_region") assert len(supply.region) == len(consumption.region) # Need to reindex costs to avoid nans for non-producing regions costs0, prices = xr.broadcast(result.costs, prices, exclude=exclude) # Fulfil nans with price values costs0 = costs0.reindex_like(prices).fillna(prices) costs0 = costs0.where(costs0 > 0, prices) # Find where sup >0 (exporter) # Importers have nans and average over exporting price costs = ((costs0 * sup) / sup.sum("dst_region")).fillna( costs0.mean("region") ) # Take average over dst regions costs = costs.where(costs > 0, prices).mean("dst_region") result = xr.Dataset( dict(supply=supply, consumption=consumption, costs=costs) ) result = self.convert_market_timeslice(result, mca_market.timeslice) result["comm_usage"] = technologies.comm_usage.sel(commodity=result.commodity) result.set_coords("comm_usage") # < to mca timeslices return result def market_variables( self, market: xr.Dataset, technologies: xr.Dataset ) -> xr.Dataset: """Computes resulting market: production, consumption, and costs.""" from muse.commodities import is_pollutant from muse.quantities import ( annual_levelized_cost_of_energy, consumption, supply_cost, ) from muse.timeslices import QuantityType, convert_timeslice from muse.utilities import broadcast_techs def group_assets(x: xr.DataArray) -> xr.DataArray: return xr.Dataset(dict(x=x)).groupby("region").sum("asset").x years = market.year.values capacity = self.capacity.interp(year=years, **self.interpolation) result = xr.Dataset() supply = self.supply_prod( market=market, capacity=capacity, technologies=technologies ) if "timeslice" in market.prices.dims and "timeslice" not in supply.dims: supply = convert_timeslice(supply, market.timeslice, QuantityType.EXTENSIVE) consume = consumption(technologies, supply, market.prices) technodata = cast(xr.Dataset, broadcast_techs(technologies, supply)) costs = supply_cost( supply.where(~is_pollutant(supply.comm_usage), 0), annual_levelized_cost_of_energy( market.prices.sel(region=supply.region), technodata ), asset_dim="asset", ) if len(supply.region.dims) == 0: result = xr.Dataset( dict( supply=supply, consumption=consume, costs=costs, ) ) result = result.sum("asset") result = result.expand_dims(region=[result.region.values]) else: result = xr.Dataset( dict( supply=group_assets(supply), consumption=group_assets(consume), costs=costs, ) ) return result @property def capacity(self) -> xr.DataArray: """Aggregates capacity across agents. The capacities are aggregated leaving only two dimensions: asset (technology, installation date, region), year. """ from muse.utilities import filter_input, reduce_assets traded = [ u.assets.capacity for u in self.agents if "dst_region" in u.assets.capacity.dims ] nontraded = [ u.assets.capacity for u in self.agents if "dst_region" not in u.assets.capacity.dims ] if not traded: full_list = [ list(nontraded[i].year.values) for i in range(len(nontraded)) if "year" in nontraded[i].dims ] flat_list = [item for sublist in full_list for item in sublist] years = sorted(list(set(flat_list))) nontraded = [ filter_input(u.assets.capacity, year=years) for u in self.agents if "dst_region" not in u.assets.capacity.dims ] return reduce_assets(nontraded) if not nontraded: full_list = [ list(traded[i].year.values) for i in range(len(traded)) if "year" in traded[i].dims ] flat_list = [item for sublist in full_list for item in sublist] years = sorted(list(set(flat_list))) traded = [ filter_input(u.assets.capacity, year=years) for u in self.agents if "dst_region" in u.assets.capacity.dims ] return reduce_assets(traded) traded_results = reduce_assets(traded) nontraded_results = reduce_assets(nontraded) return reduce_assets( [ traded_results, nontraded_results * (nontraded_results.region == traded_results.dst_region), ] ) @property def agents(self) -> Iterator[AbstractAgent]: """Iterator over all agents in the sector.""" for subsector in self.subsectors: yield from subsector.agents @staticmethod def convert_market_timeslice( market: xr.Dataset, timeslice: pd.MultiIndex, intensive: Union[Text, Tuple[Text]] = "prices", ) -> xr.Dataset: """Converts market from one to another timeslice.""" from muse.timeslices import QuantityType, convert_timeslice if isinstance(intensive, Text): intensive = (intensive,) timesliced = {d for d in market.data_vars if "timeslice" in market[d].dims} intensives = convert_timeslice( market[list(timesliced.intersection(intensive))], timeslice, QuantityType.INTENSIVE, ) extensives = convert_timeslice( market[list(timesliced.difference(intensives.data_vars))], timeslice, QuantityType.EXTENSIVE, ) others = market[list(set(market.data_vars).difference(timesliced))] return xr.merge([intensives, extensives, others])
PypiClean
/NehorayRapid-0.0.1-py3-none-any.whl/RapidBase/import_all.py
import RapidBase.Basic_Import_Libs from RapidBase.Basic_Import_Libs import * from RapidBase.MISC_REPOS.pyloess_regression.pyloess.TorchLoess import * ############# My Scripts: ############# #(1). Utils: import RapidBase.Utils import RapidBase.Utils.Tensor_Manipulation.Pytorch_Numpy_Utils # import RapidBase.Utils.Adding_Noise_To_Image import RapidBase.Utils.IO.GPU_Utils import RapidBase.Utils.IO.Imshow_and_Plots import RapidBase.Utils.IO.Klepto import RapidBase.Utils.Tensor_Manipulation.linspace_arange import RapidBase.Utils.MISCELENEOUS import RapidBase.Utils.IO.Path_and_Reading_utils import RapidBase.Utils.IO.tic_toc import RapidBase.Utils.Tensor_Manipulation.Array_Tensor_Manipulation import RapidBase.Utils.Classical_DSP.Fitting_And_Optimization import RapidBase.Utils.Registration.Transforms_Grids import RapidBase.Utils.Registration.Warping_Shifting import RapidBase.Utils.Classical_DSP.FFT_utils import RapidBase.Utils.Registration.Optical_Flow_utils import RapidBase.Utils.Registration.Registration_Utils import RapidBase.Utils.Classical_DSP.Signal_Generation import RapidBase.Utils.Classical_DSP.Noise_Estimation import RapidBase.Utils.Classical_DSP.Add_Noise import RapidBase.Utils.Classical_DSP.Convolution_Utils import RapidBase.Utils.Classical_DSP.Classic_ISP import RapidBase.Utils.Classical_DSP.blob_detection from RapidBase.Utils.Tensor_Manipulation.Pytorch_Numpy_Utils import * # from RapidBase.Utils.Adding_Noise_To_Image import * from RapidBase.Utils.IO.GPU_Utils import * from RapidBase.Utils.IO.Imshow_and_Plots import * from RapidBase.Utils.IO.Klepto import * from RapidBase.Utils.Tensor_Manipulation.linspace_arange import * from RapidBase.Utils.MISCELENEOUS import * from RapidBase.Utils.IO.Path_and_Reading_utils import * from RapidBase.Utils.IO.tic_toc import * from RapidBase.Utils.Tensor_Manipulation.Array_Tensor_Manipulation import * from RapidBase.Utils.Classical_DSP.Fitting_And_Optimization import * from RapidBase.Utils.Classical_DSP.FFT_utils import * from RapidBase.Utils.Classical_DSP.Signal_Generation import * from RapidBase.Utils.Registration.Transforms_Grids import * from RapidBase.Utils.Registration.Registration_Utils import * from RapidBase.Utils.Registration.Optical_Flow_utils import * from RapidBase.Utils.Registration.Warping_Shifting import * from RapidBase.Utils.Classical_DSP.Noise_Estimation import * from RapidBase.Utils.Classical_DSP.Add_Noise import * from RapidBase.Utils.Classical_DSP.Convolution_Utils import * from RapidBase.Utils.Classical_DSP.Classic_ISP import * from RapidBase.Utils.Classical_DSP.blob_detection import * # import RapidBase.Utils_Import_Libs # from RapidBase.Utils_Import_Libs import * #(2). Transforms: # import RapidBase.Transforms.Albumentations_utils # import RapidBase.Transforms.IMGAUG_utils # from RapidBase.Transforms.Albumentations_utils import * # from RapidBase.Transforms.IMGAUG_utils import * # import RapidBase.Transforms_Import_Libs # from RapidBase.Transforms_Import_Libs import * #(3). Optimizers: import RapidBase.TrainingCore.Optimizers_and_LR_Schedulers from RapidBase.TrainingCore.Optimizers_and_LR_Schedulers import * #(4). Losses: import RapidBase.TrainingCore.losses from RapidBase.TrainingCore.losses import * #(6). DataSets: import RapidBase.TrainingCore.Basic_DataSets from RapidBase.TrainingCore.Basic_DataSets import * #(5). Layers: import RapidBase.MISC_REPOS.Layers.Activations import RapidBase.MISC_REPOS.Layers.Basic_Layers import RapidBase.MISC_REPOS.Layers.Conv_Blocks import RapidBase.MISC_REPOS.Layers.DownSample_UpSample import RapidBase.MISC_REPOS.Layers.Memory_and_RNN import RapidBase.MISC_REPOS.Layers.Refinement_Modules import RapidBase.MISC_REPOS.Layers.Special_Layers import RapidBase.MISC_REPOS.Layers.Unet_UpDown_Blocks import RapidBase.MISC_REPOS.Layers.Wrappers from RapidBase.MISC_REPOS.Layers.Activations import * from RapidBase.MISC_REPOS.Layers.Basic_Layers import * from RapidBase.MISC_REPOS.Layers.Conv_Blocks import * from RapidBase.MISC_REPOS.Layers.DownSample_UpSample import * from RapidBase.MISC_REPOS.Layers.Memory_and_RNN import * from RapidBase.MISC_REPOS.Layers.Refinement_Modules import * from RapidBase.MISC_REPOS.Layers.Special_Layers import * from RapidBase.MISC_REPOS.Layers.Unet_UpDown_Blocks import * from RapidBase.MISC_REPOS.Layers.Wrappers import * # import RapidBase.Basic_Import_Libs # from RapidBase.Basic_Import_Libs import * from RapidBase.Anvil.import_all import * # #(1). Utils: # import RapidBase.Utils.Pytorch_Utils # import RapidBase.Utils.Adding_Noise_To_Image # import RapidBase.Utils.GPU_Utils # import RapidBase.Utils.Imshow_and_Plots # import RapidBase.Utils.Klepto # import RapidBase.Utils.linspace_arange # import RapidBase.Utils.MISCELENEOUS # import RapidBase.Utils.Path_and_Reading_utils # import RapidBase.Utils.tic_toc # from RapidBase.Utils.Pytorch_Utils import * # from RapidBase.Utils.Adding_Noise_To_Image import * # from RapidBase.Utils.IO.GPU_Utils import * # from RapidBase.Utils.IO.Imshow_and_Plots import * # from RapidBase.Utils.IO.Klepto import * # from RapidBase.Utils.Tensor_Manipulation.linspace_arange import * # from RapidBase.Utils.MISCELENEOUS import * # from RapidBase.Utils.IO.Path_and_Reading_utils import * # from RapidBase.Utils.IO.tic_toc import *
PypiClean
/APASVO-0.0.6.tar.gz/APASVO-0.0.6/apasvo/gui/views/svwidget.py
from PySide import QtGui from PySide import QtCore import matplotlib matplotlib.rcParams['backend'] = 'qt4agg' matplotlib.rcParams['backend.qt4'] = 'PySide' matplotlib.rcParams['patch.antialiased'] = False matplotlib.rcParams['figure.dpi'] = 65 matplotlib.rcParams['agg.path.chunksize'] = 80000 from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas from matplotlib.ticker import FuncFormatter import matplotlib.pyplot as plt import numpy as np import datetime from apasvo.gui.views import takanamidialog from apasvo.gui.views import settingsdialog from apasvo.picking import envelope as env from apasvo.picking import apasvotrace as rc from apasvo.utils import plotting from apasvo.utils import clt from apasvo._version import _application_name from apasvo._version import _organization class SpanSelector(QtCore.QObject): """Allows the user to manually select a piece of a seismic signal on a SignalViewerWidget object. Attributes: xleft: Current selection lower limit (measured in h-axis units). xright: Current selection upper limit (measured in h-axis units). xmin: Minimum selection lower limit allowed (in h-axis units). xmax: Maximum selection upper limit allowed (in h-axis units). active: Indicates whether the selector object is active or not. minstep: Minimun selection step allowed. Signals: toogled: 'active' state changes. valueChanged: 'xleft', 'xright' values changes. """ toggled = QtCore.Signal(bool) valueChanged = QtCore.Signal(float, float) right_clicked = QtCore.Signal() def __init__(self, fig, fs=50.0, xmin=0.0, xmax=0.0): super(SpanSelector, self).__init__() self.fig = fig self._xleft_in_samples = 0 self._xright_in_samples = 0 self.fs = fs self._xmin_in_samples = int(xmin * self.fs) self._xmax_in_samples = int(xmax * self.fs) self.active = False self.enabled = True self.selectors = [ax.axvspan(0, 1, fc='LightCoral', ec='r', alpha=0.7, picker=5) for ax in self.fig.axes] for s in self.selectors: s.set_visible(False) self.pick_threshold = None self.press_selector = None self.canvas = self.fig.canvas self.canvas.mpl_connect('pick_event', self.on_pick) self.canvas.mpl_connect('button_press_event', self.onpress) self.canvas.mpl_connect('button_release_event', self.onrelease) self.canvas.mpl_connect('motion_notify_event', self.onmove) self.background = None self.animated = False @property def xleft(self): return self._xleft_in_samples / self.fs @property def xright(self): return self._xright_in_samples / self.fs @property def xmin(self): return self._xmin_in_samples / self.fs @property def xmax(self): return self._xmax_in_samples / self.fs @xmin.setter def xmin(self, value): self._xmin_in_samples = int(value * self.fs) @xmax.setter def xmax(self, value): self._xmax_in_samples = int(value * self.fs) def on_pick(self, pick_event): if self.active: if pick_event.mouseevent.button == 3: # Right button clicked if pick_event.artist in self.selectors: if not self.canvas.widgetlock.locked(): self.right_clicked.emit() def onpress(self, event): if self.enabled: if event.button == 1: # Left button clicked if not self.canvas.widgetlock.locked(): self.canvas.widgetlock(self) if self.active: self.set_active(False) self.press_selector = event # Start animation self._set_animated(True) xpos = self.get_xdata(self.press_selector) self.set_selector_limits(xpos, xpos, adjust_to_viewport=True) def onrelease(self, event): if self.canvas.widgetlock.isowner(self): self.press_selector = None # End animation self._set_animated(False) self.canvas.widgetlock.release(self) def onmove(self, event): if self.press_selector is not None: xleft = self.get_xdata(self.press_selector) xright = self.get_xdata(event) if xright < xleft: xleft, xright = xright, xleft if not self.active: self.set_active(True) self.set_selector_limits(xleft, xright, adjust_to_viewport=True) def get_xdata(self, event): inv = self.fig.axes[0].transData.inverted() xdata, _ = inv.transform((event.x, event.y)) return xdata def set_selector_limits(self, xleft, xright, adjust_to_viewport=False): xleft = int(xleft * self.fs) xright = int(xright * self.fs) if (xleft, xright) != (self._xleft_in_samples, self._xright_in_samples): if adjust_to_viewport: xmin, xmax = self.fig.axes[0].get_xlim() xmin, xmax = int(xmin * self.fs), int(xmax * self.fs) if xleft < xmin: xleft = xmin elif xleft > xmax: xleft = xmax if xright > xmax: xright = xmax elif xright < xmin: xright = xmin if xleft < self._xmin_in_samples: xleft = self._xmin_in_samples if xright > self._xmax_in_samples: xright = self._xmax_in_samples self._xleft_in_samples, self._xright_in_samples = xleft, xright for s in self.selectors: s.xy[:2, 0] = self.xleft s.xy[2:4, 0] = self.xright self.valueChanged.emit(self.xleft, self.xright) self.draw() def get_selector_limits(self): return self.xleft, self.xright def set_selection_limits(self, xmin, xmax): self.xmin, self.xmax = xmin, xmax def get_selection_limits(self): return self.xmin, self.xmax def set_active(self, value): if value != self.active: self.active = value self.toggled.emit(value) for s in self.selectors: s.set_visible(value) self.draw() def set_enabled(self, value): if value != self.enabled: self.enabled = value for s in self.selectors: if value == True: s.set_edgecolor('Red') s.set_facecolor('LightCoral') else: s.set_edgecolor('DarkSlateGray') s.set_facecolor('Gray') def draw(self): if self.animated: self._draw_animate() else: self.canvas.draw_idle() def _draw_animate(self): self.canvas.restore_region(self.background) if self.active: for s in self.selectors: if s.get_axes().get_visible(): self.fig.draw_artist(s) self.canvas.blit(self.fig.bbox) def _set_animated(self, value): if self.animated != value: self.animated = value for s in self.selectors: s.set_animated(value) if self.animated == True: self.canvas.draw() self.background = self.canvas.copy_from_bbox(self.fig.bbox) class EventMarker(QtCore.QObject): """Plots a vertical line marker to indicate the arrival time of a detected event on a SignalViewerWidget object. Attributes: event: Marked event. Signals: valueChanged: 'event' arrival time changed. """ event_selected = QtCore.Signal(rc.ApasvoEvent) right_clicked = QtCore.Signal(rc.ApasvoEvent) def __init__(self, fig, minimap, document, event, color='b', selected_color='r'): super(EventMarker, self).__init__() self.fig = fig self.minimap = minimap self.event = event self.document = document self.position = self.event.stime self.selected = False self.color = color self.selected_color = selected_color self.markers = [] # draw markers pos = self.event.stime / self.event.trace.fs for ax in self.fig.axes: marker = ax.axvline(pos) marker.set(color=self.color, ls='--', lw=2, picker=5) self.markers.append(marker) # draw minimap marker self.minimap.create_marker(event.resource_id.uuid, pos, color=self.color, ls='-', lw=1) # draw label bbox = dict(boxstyle="round", fc="LightCoral", ec="r", alpha=0.8) self.position_label = self.fig.text(0, 0, "Time: 00:00:00.000 seconds\nCF value: 0.000", bbox=bbox) self.position_label.set_visible(False) self.canvas = self.fig.canvas self.canvas.mpl_connect('pick_event', self.onpick) self.canvas.mpl_connect('button_release_event', self.onrelease) self.canvas.mpl_connect('motion_notify_event', self.onmove) self.pick_event = None # Animation related attrs. self.background = None self.animated = False self.draw() def onpick(self, pick_event): if pick_event.artist in self.markers: if not self.canvas.widgetlock.locked(): if pick_event.mouseevent.button == 1: # left button clicked self.canvas.widgetlock(self) self.pick_event = pick_event xfig, yfig = self._event_to_fig_coords(pick_event.mouseevent) self.position_label.set_position((xfig, yfig)) self.event_selected.emit(self.event) self.draw() elif pick_event.mouseevent.button == 3: # Right button clicked self.event_selected.emit(self.event) self.draw() self.right_clicked.emit(self.event) def onrelease(self, mouse_event): if self.canvas.widgetlock.isowner(self): self.position_label.set_visible(False) self.pick_event = None # End animation self.draw() self._set_animated(False) self.canvas.widgetlock.release(self) if self.position != self.event.stime: self.document.editEvent(self.event, stime=self.position, evaluation_mode=rc.mode_manual, method=rc.method_other) def onmove(self, mouse_event): if self.pick_event is not None: xdata = self.get_xdata(mouse_event) self.set_position(xdata) xfig, yfig = self._event_to_fig_coords(mouse_event) self.position_label.set_position((xfig, yfig)) self.position_label.set_visible(True) self._set_animated(True) self.draw() def get_xdata(self, event): inv = self.fig.axes[0].transData.inverted() xdata, _ = inv.transform((event.x, event.y)) return xdata def _event_to_fig_coords(self, event): inv = self.fig.transFigure.inverted() return inv.transform((event.x, event.y)) def set_position(self, value): time_in_samples = int(value * self.event.trace.fs) if time_in_samples != self.position: if 0 <= time_in_samples <= len(self.event.trace.signal): self.position = time_in_samples time_in_seconds = time_in_samples / float(self.event.trace.fs) for marker in self.markers: marker.set_xdata(time_in_seconds) if 0 <= self.position < len(self.event.trace.cf): cf_value = self.event.trace.cf[self.position] else: cf_value = np.nan self.position_label.set_text("Time: %s seconds.\nCF value: %.6g" % (clt.float_secs_2_string_date(time_in_seconds, starttime=self.event.trace.starttime), cf_value)) self.minimap.set_marker_position(self.event.resource_id.uuid, time_in_seconds) def remove(self): for ax, marker in zip(self.fig.axes, self.markers): ax.lines.remove(marker) self.minimap.delete_marker(self.event.resource_id.uuid) self.draw() def set_selected(self, value): if self.selected != value: self.selected = value color = self.selected_color if self.selected else self.color for marker in self.markers: marker.set(color=color) self.minimap.set_marker(self.event.resource_id.uuid, color=color) def update(self): if self.event.stime != self.position: self.set_position(self.event.stime / float(self.event.trace.fs)) self.draw() def draw(self): if self.animated: self._draw_animate() else: self.canvas.draw_idle() self.minimap.draw() def _draw_animate(self): self.canvas.restore_region(self.background) for marker in self.markers: if marker.get_axes().get_visible() and marker.get_visible(): self.fig.draw_artist(marker) if self.position_label.get_visible(): self.fig.draw_artist(self.position_label) self.canvas.blit(self.fig.bbox) def _set_animated(self, value): if self.animated != value: self.animated = value for marker in self.markers: marker.set_animated(value) self.position_label.set_animated(value) if self.animated == True: self.canvas.draw() self.background = self.canvas.copy_from_bbox(self.fig.bbox) class ThresholdMarker(QtCore.QObject): """Plots an horizontal line marker on a SignalViewerWidget to indicate a selected threshold value for the computed characteristic function. Attributes: threshold: A threshold value. Default: 0.0. active: Indicates whether the marker is active or not. Signals: thresholdChanged: 'threshold' value changed. """ thresholdChanged = QtCore.Signal(float) def __init__(self, ax, threshold=0.0): super(ThresholdMarker, self).__init__() self.ax = ax self.threshold = threshold self.active = False # Set threshold line self.figThreshold = self.ax.axhline(self.threshold) self.figThreshold.set(color='b', ls='--', lw=2, alpha=0.8, picker=5) self.figThreshold.set_visible(False) # Set threshold label bbox = dict(boxstyle="round", fc="Lightblue", ec="b", alpha=0.8) self.figThresholdLabel = self.ax.text(0, 0, "0.00", bbox=bbox) self.figThresholdLabel.set_visible(False) self.pick_threshold = None self.canvas = self.ax.figure.canvas self.canvas.mpl_connect('pick_event', self.onpick) self.canvas.mpl_connect('button_release_event', self.onrelease) self.canvas.mpl_connect('motion_notify_event', self.onmove) # Animation related attrs. self.background = None self.animated = False def onpick(self, event): if self.active: if event.mouseevent.button == 1: # left button clicked if event.artist == self.figThreshold: if not self.canvas.widgetlock.locked(): self.canvas.widgetlock(self) self.pick_threshold = event xdata, ydata = self.get_data(event.mouseevent) # Draw legend self.figThresholdLabel.set_position((xdata, ydata)) self.figThresholdLabel.set_visible(True) self.draw() def onrelease(self, event): if self.canvas.widgetlock.isowner(self): self.figThresholdLabel.set_visible(False) self.pick_threshold = None # End animation self._set_animated(False) self.draw() self.canvas.widgetlock.release(self) def onmove(self, event): if self.pick_threshold is not None: xdata, ydata = self.get_data(event) self.set_threshold(round(ydata, 2)) # Draw legend self.figThresholdLabel.set_position((xdata, ydata)) self._set_animated(True) self.draw() def get_data(self, event): inv = self.ax.transData.inverted() xdata, ydata = inv.transform((event.x, event.y)) ymin, ymax = self.ax.get_ylim() xmin, xmax = self.ax.get_xlim() if ydata < ymin: ydata = ymin elif ydata > ymax: ydata = ymax if ydata < 0.0: ydata = 0.0 if xdata < xmin: xdata = xmin elif xdata > xmax: xdata = xmax return xdata, ydata def set_threshold(self, value): if self.threshold != value: if value >= 0: self.threshold = value self.thresholdChanged.emit(self.threshold) self.figThreshold.set_ydata(self.threshold) self.figThresholdLabel.set_text("Threshold: %.2f" % self.threshold) self.draw() def set_visible(self, value): if self.active != value: self.figThreshold.set_visible(value) self.active = value self.draw() def get_visible(self): return self.active def draw(self): if self.animated: self._draw_animate() else: self.canvas.draw_idle() def _draw_animate(self): self.canvas.restore_region(self.background) if self.ax.get_visible() and self.figThreshold.get_visible(): self.ax.draw_artist(self.figThreshold) if self.figThresholdLabel.get_visible(): self.ax.draw_artist(self.figThresholdLabel) self.canvas.blit(self.ax.bbox) def _set_animated(self, value): if self.animated != value: self.animated = value self.figThreshold.set_animated(value) self.figThresholdLabel.set_animated(value) if self.animated == True: self.canvas.draw() self.background = self.canvas.copy_from_bbox(self.ax.bbox) class PlayBackMarker(QtCore.QObject): """Plots a vertical line marker on a SignalViewerWidget when signal is played to indicate the current position. Attributes: position: Current position of the marker. active: Indicates whether the marker is active or not. """ def __init__(self, fig, parent, position=0.0, active=False): super(PlayBackMarker, self).__init__() self.fig = fig self.parent = parent self.position = position self.active = active # Set lines self.markers = [] for ax in self.fig.axes: marker = ax.axvline(self.position) marker.set(color='k', lw=1, alpha=0.6) marker.set_visible(self.active) self.markers.append(marker) self.canvas = self.fig.canvas if self.active: self.parent.draw() def set_position(self, value): if value != self.position: self.position = value for marker in self.markers: marker.set_xdata(self.position) if self.active: self.parent.draw() def set_visible(self, value): if value != self.active: self.active = value for marker in self.markers: marker.set_visible(self.active) self.parent.draw() def get_visible(self): return self.active class MiniMap(QtGui.QWidget): """Shows the entire signal and allows the user to navigate through it. Provides an scrollable selector over the entire signal. Attributes: xmin: Selector lower limit (measured in h-axis units). xmax: Selector upper limit (measured in h-axis units). step: Selector length (measured in h-axis units). """ def __init__(self, parent, ax, record=None): super(MiniMap, self).__init__(parent) self.ax = ax self.xmin = 0.0 self.xmax = 0.0 self.step = 10.0 self.xrange = np.array([]) self.minimapFig = plt.figure() self.minimapFig.set_figheight(0.75) self.minimapFig.add_axes((0, 0, 1, 1)) self.minimapCanvas = FigureCanvas(self.minimapFig) self.minimapCanvas.setFixedHeight(64) self.minimapSelector = self.minimapFig.axes[0].axvspan(0, self.step, color='gray', alpha=0.5, animated=True) self.minimapSelection = self.minimapFig.axes[0].axvspan(0, self.step, color='LightCoral', alpha = 0.5, animated=True) self.minimapSelection.set_visible(False) self.minimapBackground = [] self.minimapSize = (self.minimapFig.bbox.width, self.minimapFig.bbox.height) self.press_selector = None self.playback_marker = None self.minimapCanvas.mpl_connect('button_press_event', self.onpress) self.minimapCanvas.mpl_connect('button_release_event', self.onrelease) self.minimapCanvas.mpl_connect('motion_notify_event', self.onmove) # Animation related attrs. self.background = None self.animated = False # Set the layout self.layout = QtGui.QVBoxLayout(self) self.layout.addWidget(self.minimapCanvas) # Animation related attributes self.parentViewer = parent # Set Markers dict self.markers = {} self.record = None if record is not None: self.set_record(record) def set_record(self, record, step): self.record = record self.step = step self.xrange = np.linspace(0, len(self.record.signal) / self.record.fs, num=len(self.record.signal), endpoint=False) self.xmin = self.xrange[0] self.xmax = self.xrange[-1] self.markers = {} ax = self.minimapFig.axes[0] ax.lines = [] formatter = FuncFormatter(lambda x, pos: str(datetime.timedelta(seconds=x))) ax.xaxis.set_major_formatter(formatter) ax.grid(True, which='both') # Set dataseries to plot xmin = self.xmin * self.record.fs xmax = self.xmax * self.record.fs pixel_width = np.ceil(self.minimapFig.get_figwidth() * self.minimapFig.get_dpi()) x_data, y_data = plotting.reduce_data(self.xrange, self.record.signal, pixel_width, xmin, xmax) # self._plot_data.set_xdata(x_data) # self._plot_data.set_ydata(y_data) ax.plot(x_data, y_data, color='black', rasterized=True) ax.set_xlim(self.xmin, self.xmax) plotting.adjust_axes_height(ax) # Set the playback marker self.playback_marker = PlayBackMarker(self.minimapFig, self) self.playback_marker.markers[0].set_animated(True) # Draw canvas self.minimapCanvas.draw() self.minimapBackground = self.minimapCanvas.copy_from_bbox(self.minimapFig.bbox) self.draw_animate() def onpress(self, event): self.press_selector = event xdata = round(self.get_xdata(event), 2) xmin = round(xdata - (self.step / 2.0), 2) xmax = round(xdata + (self.step / 2.0), 2) self.parentViewer._set_animated(True) self.set_selector_limits(xmin, xmax) def onrelease(self, event): self.press_selector = None # Finish parent animation self.parentViewer._set_animated(False) def onmove(self, event): if self.press_selector is not None: xdata = round(self.get_xdata(event), 2) xmin = round(xdata - (self.step / 2.0), 2) xmax = round(xdata + (self.step / 2.0), 2) self.set_selector_limits(xmin, xmax) def get_xdata(self, event): inv = self.minimapFig.axes[0].transData.inverted() xdata, _ = inv.transform((event.x, event.y)) return xdata def set_selector_limits(self, xmin, xmax): step = xmax - xmin if step >= self.xmax - self.xmin: xleft = self.xmin xright = self.xmax if xmin < self.xmin: xleft = self.xmin xright = self.step elif xmax > self.xmax: xleft = self.xmax - step xright = self.xmax else: xleft = xmin xright = xmax if (xleft, xright) != (self.minimapSelector.xy[1, 0], self.minimapSelector.xy[2, 0]): self.step = step self.minimapSelector.xy[:2, 0] = xleft self.minimapSelector.xy[2:4, 0] = xright self.ax.set_xlim(xleft, xright) self.draw_animate() else: self.parentViewer.draw() def get_selector_limits(self): return self.minimapSelector.xy[0, 0], self.minimapSelector.xy[2, 0] def draw(self): self.draw_animate() def draw_animate(self): size = self.minimapFig.bbox.width, self.minimapFig.bbox.height if size != self.minimapSize: self.minimapSize = size self.minimapCanvas.draw() self.minimapBackground = self.minimapCanvas.copy_from_bbox(self.minimapFig.bbox) self.minimapCanvas.restore_region(self.minimapBackground) self.minimapFig.draw_artist(self.minimapSelection) self.minimapFig.draw_artist(self.minimapSelector) self.minimapFig.draw_artist(self.playback_marker.markers[0]) for marker in self.markers.values(): self.minimapFig.draw_artist(marker) self.minimapCanvas.blit(self.minimapFig.bbox) def set_visible(self, value): self.minimapCanvas.setVisible(value) def get_visible(self): return self.minimapCanvas.isVisible() def set_selection_limits(self, xleft, xright): self.minimapSelection.xy[:2, 0] = xleft self.minimapSelection.xy[2:4, 0] = xright self.draw_animate() def set_selection_visible(self, value): self.minimapSelection.set_visible(value) self.draw_animate() def create_marker(self, key, position, **kwargs): if self.xmin <= position <= self.xmax: marker = self.minimapFig.axes[0].axvline(position, animated=True) self.markers[key] = marker self.markers[key].set(**kwargs) def set_marker_position(self, key, value): marker = self.markers.get(key) if marker is not None: if self.xmin <= value <= self.xmax: marker.set_xdata(value) def set_marker(self, key, **kwargs): marker = self.markers.get(key) if marker is not None: kwargs.pop("animated", None) # marker's animated property must be always true to be drawn properly marker.set(**kwargs) def delete_marker(self, key): marker = self.markers.get(key) if marker is not None: self.minimapFig.axes[0].lines.remove(marker) self.markers.pop(key) class SignalViewerWidget(QtGui.QWidget): """Shows different visualizations of a seismic signal (magnitude, envelope, spectrogram, characteristic function). Allows the user to manipulate it (navigate through it, zoom in/out, edit detected events, select threshold value, etc...) """ CF_loaded = QtCore.Signal(bool) event_selected = QtCore.Signal(rc.ApasvoEvent) def __init__(self, parent, document=None): super(SignalViewerWidget, self).__init__(parent) self.document = document self.xmin = 0.0 self.xmax = 0.0 self.xleft = 0.0 self.xright = 0.0 self.time = np.array([]) self.fs = 0.0 self.signal = None self.envelope = None self.cf = None self.time = None self._signal_data = None self._envelope_data = None self._cf_data = None self.fig, _ = plt.subplots(3, 1) self.signal_ax = self.fig.axes[0] self.cf_ax = self.fig.axes[1] self.specgram_ax = self.fig.axes[2] self.canvas = FigureCanvas(self.fig) self.canvas.setSizePolicy(QtGui.QSizePolicy(QtGui.QSizePolicy.Policy.Expanding, QtGui.QSizePolicy.Policy.Expanding)) self.canvas.setMinimumHeight(320) self.graphArea = QtGui.QScrollArea(self) self.graphArea.setWidget(self.canvas) self.graphArea.setWidgetResizable(True) self.eventMarkers = {} self.last_right_clicked_event = None self.thresholdMarker = None self.playback_marker = None self.selector = SpanSelector(self.fig) self.minimap = MiniMap(self, self.signal_ax, None) # Load Spectrogram settings self.update_specgram_settings() # Animation related attributes self.background = None self.animated = False # Create context menus self.event_context_menu = QtGui.QMenu(self) self.takanami_on_event_action = QtGui.QAction("Apply Takanami to Event", self) self.takanami_on_event_action.setStatusTip("Refine event position by using Takanami algorithm") self.event_context_menu.addAction(self.takanami_on_event_action) self.takanami_on_event_action.triggered.connect(self.apply_takanami_to_selected_event) self.selection_context_menu = QtGui.QMenu(self) self.create_event_action = QtGui.QAction("Create New Event on Selection", self) self.create_event_action.setStatusTip("Create a new event on selection") self.takanami_on_selection_action = QtGui.QAction("Apply Takanami to Selection", self) self.takanami_on_selection_action.setStatusTip("Apply Takanami algorithm to selection") self.selection_context_menu.addAction(self.create_event_action) self.selection_context_menu.addAction(self.takanami_on_selection_action) self.create_event_action.triggered.connect(self.create_event_on_selection) self.takanami_on_selection_action.triggered.connect(self.apply_takanami_to_selection) # format axes formatter = FuncFormatter(lambda x, pos: clt.float_secs_2_string_date(x, self.document.record.starttime)) for ax in self.fig.axes: ax.callbacks.connect('xlim_changed', self.on_xlim_change) ax.xaxis.set_major_formatter(formatter) plt.setp(ax.get_xticklabels(), visible=True) ax.grid(True, which='both') self.specgram_ax.callbacks.connect('ylim_changed', self.on_ylim_change) self.specgram_ax.set_xlabel('Time (seconds)') plt.setp(self.signal_ax.get_yticklabels(), visible=False) #self.signal_ax.set_ylabel('Signal Amp.') self.cf_ax.set_ylabel('CF Amp.') self.specgram_ax.set_ylabel('Frequency (Hz)') # Set the layout self.layout = QtGui.QVBoxLayout(self) self.layout.addWidget(self.graphArea) self.layout.addWidget(self.minimap) self.selector.toggled.connect(self.minimap.set_selection_visible) self.selector.valueChanged.connect(self.minimap.set_selection_limits) self.selector.right_clicked.connect(self.on_selector_right_clicked) if self.document is not None: self.set_record(document) @property def data_loaded(self): return self.document is not None def set_record(self, document, step=120.0): self.document = document self.fs = self.document.record.fs self.signal = self.document.record.signal self.envelope = env.envelope(self.signal) self.cf = self.document.record.cf self.time = np.linspace(0, len(self.signal) / self.fs, num=len(self.signal), endpoint=False) self.xmax = self.time[-1] # Draw minimap self.minimap.minimapSelector.set(visible=False) # Hide minimap selector while loading self.minimap.set_record(self.document.record, step) # Plot signal step_samples = step * self.fs self._signal_data = self.signal_ax.plot(self.time[:step_samples], self.signal[:step_samples], color='black', rasterized=True)[0] # Plot envelope self._envelope_data = self.signal_ax.plot(self.time[:step_samples], self.envelope[:step_samples], color='red', rasterized=True)[0] # Adjust y axis for signal plot signal_yaxis_max_value = max(np.max(self.signal), np.max(self.envelope)) signal_yaxis_min_value = np.min(self.signal) plotting.adjust_axes_height(self.signal_ax, max_value=signal_yaxis_max_value, min_value=signal_yaxis_min_value) # Plot CF cf_loaded = (self.cf.size != 0) self.set_cf_visible(cf_loaded) self.CF_loaded.emit(cf_loaded) cf_step_samples = min(step_samples,len(self.cf)) self._cf_data = self.cf_ax.plot(self.time[:cf_step_samples], self.cf[:cf_step_samples], color='black', rasterized=True)[0] # Adjust y axis for CF plot if cf_loaded: plotting.adjust_axes_height(self.cf_ax, max_value=np.max(self.cf), min_value=np.min(self.cf)) self.thresholdMarker = ThresholdMarker(self.cf_ax) # Plot espectrogram plotting.plot_specgram(self.specgram_ax, self.signal, self.fs, nfft=self.specgram_windowlen, noverlap=self.specgram_noverlap, window=self.specgram_window) # Set the span selector self.selector.fs = self.fs self.selector.set_active(False) self.selector.set_selection_limits(self.xmin, self.xmax) # Set the playback marker self.playback_marker = PlayBackMarker(self.fig, self) # Set the initial xlimits self.set_xlim(0, step) self.subplots_adjust() # Set event markers self.eventMarkers = {} for event in self.document.record.events: self.create_event(event) # Now activate selector again on minimap self.minimap.minimapSelector.set(visible=True) self.minimap.draw() def unset_record(self): self.document = None self.signal = None self.envelope = None self.cf = None self.time = None self._signal_data = None self._envelope_data = None self._cf_data = None self.xmin, self.xmax = 0.0, 0.0 self.eventMarkers = {} # Clear axes self.signal_ax.lines = [] self.cf_ax.lines = [] self.specgram_ax.lines = [] self.specgram_ax.images = [] self.CF_loaded.emit(False) def update_cf(self): if self.data_loaded: self.cf = self.document.record.cf self._cf_data.set_xdata(self.time[:len(self.cf)]) self._cf_data.set_ydata(self.cf) plotting.adjust_axes_height(self.cf_ax) cf_loaded = (self.cf.size != 0) self.CF_loaded.emit(cf_loaded) self.set_cf_visible(cf_loaded) self.draw() def create_events(self, new_events_set): for event in new_events_set.get(self.document.record.uuid, []): self.create_event(event) def create_event(self, event): event_id = event.resource_id.uuid if event_id not in self.eventMarkers: marker = EventMarker(self.fig, self.minimap, self.document, event) self.eventMarkers[event_id] = marker marker.event_selected.connect(self.event_selected.emit) marker.right_clicked.connect(self.on_event_right_clicked) def delete_events(self, new_events_set): for event in new_events_set.get(self.document.record.uuid, []): self.delete_event(event) def delete_event(self, event): event_id = event.resource_id.uuid self.eventMarkers[event_id].remove() self.eventMarkers.pop(event_id) def update_event(self, event): self.eventMarkers[event.resource_id.uuid].update() def set_xlim(self, l, r): xmin = max(0, l) xmax = min(self.xmax, r) self.signal_ax.set_xlim(xmin, xmax) def on_xlim_change(self, ax): xmin, xmax = ax.get_xlim() if (self.xleft, self.xright) != (xmin, xmax): self.xleft, self.xright = xmin, xmax if self.xmin <= xmin <= xmax <= self.xmax: # Update minimap selector if (xmin, xmax) != self.minimap.get_selector_limits(): self.minimap.set_selector_limits(xmin, xmax) # Update axes for axes in self.fig.axes: if ax != axes: axes.set_xlim(xmin, xmax) # Update data xmin = int(max(0, xmin) * self.fs) xmax = int(min(self.xmax, xmax) * self.fs) pixel_width = np.ceil(self.fig.get_figwidth() * self.fig.get_dpi()) if self._signal_data is not None: x_data, y_data = plotting.reduce_data(self.time, self.signal, pixel_width, xmin, xmax) self._signal_data.set_xdata(x_data) self._signal_data.set_ydata(y_data) if self._envelope_data is not None: x_data, y_data = plotting.reduce_data(self.time, self.envelope, pixel_width, xmin, xmax) self._envelope_data.set_xdata(x_data) self._envelope_data.set_ydata(y_data) if self._cf_data is not None and self.cf_ax.get_visible(): x_data, y_data = plotting.reduce_data(self.time[:len(self.cf)], self.cf, pixel_width, xmin, xmax) self._cf_data.set_xdata(x_data) self._cf_data.set_ydata(y_data) # Draw graph self.draw() else: xmin = max(self.xmin, xmin) xmax = min(self.xmax, xmax) ax.set_xlim(xmin, xmax) def on_ylim_change(self, ax): if self.data_loaded: if ax == self.specgram_ax: ymin, ymax = ax.get_ylim() nyquist_freq = (self.fs / 2.0) if ymin < 0.0: ax.set_ylim(0.0, ymax) elif ymax > nyquist_freq: ax.set_ylim(ymin, nyquist_freq) def set_event_selection(self, events): event_id_list = [event.resource_id.uuid for event in events] for event_id in self.eventMarkers: self.eventMarkers[event_id].set_selected(event_id in event_id_list) self.draw() self.minimap.draw() def set_position(self, pos): """""" xmin, xmax = self.signal_ax.get_xlim() mrange = xmax - xmin l, r = pos - mrange / 2.0, pos + mrange / 2.0 if l < self.xmin: l, r = self.xmin, mrange elif r > self.xmax: l, r = self.xmax - mrange, self.xmax self.set_xlim(l, r) def goto_event(self, event): if event.resource_id.uuid in self.eventMarkers: self.set_position(event.stime / self.fs) def showEvent(self, event): self.draw() self.minimap.draw_animate() def resizeEvent(self, event): self.draw() self.minimap.draw_animate() def set_signal_amplitude_visible(self, show_sa): if self._signal_data is not None and self._envelope_data is not None: if self._signal_data.get_visible() != show_sa: self._signal_data.set_visible(show_sa) show_axis = (self._signal_data.get_visible() + self._envelope_data.get_visible()) self.signal_ax.set_visible(show_axis) if self.data_loaded: self.subplots_adjust() self.draw() def set_signal_envelope_visible(self, show_se): if self._signal_data is not None and self._envelope_data is not None: if self._envelope_data.get_visible() != show_se: self._envelope_data.set_visible(show_se) show_axis = (self._signal_data.get_visible() + self._envelope_data.get_visible()) self.signal_ax.set_visible(show_axis) if self.data_loaded: self.subplots_adjust() self.draw() def set_cf_visible(self, show_cf): if self.cf_ax.get_visible() != show_cf: if self.data_loaded: if len(self.cf) <= 0: self.cf_ax.set_visible(False) else: self.cf_ax.set_visible(show_cf) self.subplots_adjust() self.draw() def set_espectrogram_visible(self, show_eg): if self.specgram_ax.get_visible() != show_eg: self.specgram_ax.set_visible(show_eg) if self.data_loaded: self.subplots_adjust() self.draw() def set_minimap_visible(self, show_mm): if self.minimap.get_visible() != show_mm: self.minimap.set_visible(show_mm) self.minimap.draw_animate() def set_threshold_visible(self, show_thr): if self.thresholdMarker: if self.thresholdMarker.get_visible() != show_thr: self.thresholdMarker.set_visible(show_thr) self.draw() def subplots_adjust(self): visible_subplots = [ax for ax in self.fig.get_axes() if ax.get_visible()] for i, ax in enumerate(visible_subplots): correct_geometry = (len(visible_subplots), 1, i + 1) if correct_geometry != ax.get_geometry(): ax.change_geometry(len(visible_subplots), 1, i + 1) # Adjust space between subplots self.fig.subplots_adjust(left=0.06, right=0.95, bottom=0.14, top=0.95, hspace=0.22) def get_selector_limits(self): return self.selector.get_selector_limits() def set_selector_limits(self, xleft, xright): self.selector.set_selector_limits(xleft, xright) def set_selection_enabled(self, value): self.selector.set_enabled(value) def set_playback_position(self, position): if self.playback_marker is not None: self.playback_marker.set_position(position) self.minimap.playback_marker.set_position(position) def set_playback_marker_visible(self, show_marker): if self.playback_marker is not None: self.playback_marker.set_visible(show_marker) self.minimap.playback_marker.set_visible(show_marker) def on_event_right_clicked(self, event): self.last_right_clicked_event = event self.event_context_menu.exec_(QtGui.QCursor.pos()) def apply_takanami_to_selected_event(self): takanamidialog.TakanamiDialog(self.document, seismic_event=self.last_right_clicked_event).exec_() def apply_takanami_to_selection(self): xleft, xright = self.get_selector_limits() takanamidialog.TakanamiDialog(self.document, xleft, xright).exec_() def create_event_on_selection(self): xleft, xright = self.get_selector_limits() xleft, xright = xleft * self.fs, xright * self.fs cf = self.cf[xleft:xright] if cf.size > 0: time = (xleft + np.argmax(cf)) else: time = (xleft + ((xright - xleft) / 2.0)) self.document.createEvent(time=time) def draw(self): if self.animated: self._draw_animate() else: self.canvas.draw_idle() def _draw_animate(self): self.canvas.restore_region(self.background) for artist in self._get_animated_artists(): if artist.get_visible(): ax = artist.get_axes() if ax is not None: if artist.get_axes().get_visible(): self.fig.draw_artist(artist) else: self.fig.draw_artist(artist) self.canvas.blit(self.fig.bbox) def _set_animated(self, value): if self.animated != value: self.animated = value for artist in self._get_animated_artists(): artist.set_animated(value) if self.animated == True: images = [] for ax in self.fig.axes: images.extend(ax.images) for image in images: image.set_visible(False) self.canvas.draw() self.background = self.canvas.copy_from_bbox(self.fig.bbox) for image in images: image.set_visible(True) def _get_animated_artists(self): artists = [] for ax in self.fig.axes: artists.extend(ax.images) artists.extend(ax.lines) artists.append(ax.xaxis) artists.append(ax.yaxis) artists.extend(ax.patches) artists.extend(ax.spines.values()) for artist in artists: yield artist def update_specgram_settings(self): # load specgram settings settings = QtCore.QSettings(_organization, _application_name) settings.beginGroup("specgram_settings") self.specgram_windowlen = int(settings.value('window_len', settingsdialog.SPECGRAM_WINDOW_LENGTHS[4])) self.specgram_noverlap = int(settings.value('noverlap', self.specgram_windowlen / 2)) self.specgram_window = settings.value('window', plotting.SPECGRAM_WINDOWS[2]) settings.endGroup() if self.data_loaded: # Plot espectrogram self.specgram_ax.images = [] # Save x-axis limits limits = self.signal_ax.get_xlim() # Draw spectrogram plotting.plot_specgram(self.specgram_ax, self.signal, self.fs, nfft=self.specgram_windowlen, noverlap=self.specgram_noverlap, window=self.specgram_window) # Restore x-axis limits self.signal_ax.set_xlim(*limits) def paintEvent(self, paintEvent): super(SignalViewerWidget, self).paintEvent(paintEvent) def on_selector_right_clicked(self): xleft, xright = self.get_selector_limits() self.takanami_on_selection_action.setEnabled((xright - xleft) >= (takanamidialog.MINIMUM_MARGIN_IN_SECS * 2)) self.selection_context_menu.exec_(QtGui.QCursor.pos())
PypiClean
/Office365_REST_with_timeout-0.1.1-py3-none-any.whl/office365/onenote/pages/page.py
from office365.onenote.entity_schema_object_model import OnenoteEntitySchemaObjectModel from office365.onenote.notebooks.notebook import Notebook from office365.onenote.pages.page_links import PageLinks from office365.onenote.sections.section import OnenoteSection from office365.runtime.client_result import ClientResult from office365.runtime.client_value_collection import ClientValueCollection from office365.runtime.http.http_method import HttpMethod from office365.runtime.queries.service_operation_query import ServiceOperationQuery from office365.runtime.resource_path import ResourcePath class OnenotePage(OnenoteEntitySchemaObjectModel): """A page in a OneNote notebook.""" def get_content(self): """Download the page's HTML content. """ result = ClientResult(self.context) qry = ServiceOperationQuery(self, "content", None, None, None, result) def _construct_query(request): """ :type request: office365.runtime.http.request_options.RequestOptions """ request.method = HttpMethod.Get self.context.before_execute(_construct_query) self.context.add_query(qry) return result @property def content_url(self): """The URL for the page's HTML content. Read-only. :rtype: str or None """ return self.properties.get("contentUrl", None) @property def links(self): """Links for opening the page. The oneNoteClientURL link opens the page in the OneNote native client if it 's installed. The oneNoteWebUrl link opens the page in OneNote on the web. Read-only. """ return self.properties.get("links", PageLinks()) @property def user_tags(self): """Links for opening the page. The oneNoteClientURL link opens the page in the OneNote native client if it 's installed. The oneNoteWebUrl link opens the page in OneNote on the web. Read-only. """ return self.properties.get("userTags", ClientValueCollection(str)) @property def parent_notebook(self): """The notebook that contains the page. Read-only. :rtype: Notebook """ return self.get_property('parentNotebook', Notebook(self.context, ResourcePath("parentNotebook", self.resource_path))) @property def parent_section(self): """The section that contains the page. Read-only. :rtype: OnenoteSection """ return self.get_property('parentSection', OnenoteSection(self.context, ResourcePath("parentSection", self.resource_path))) def get_property(self, name, default_value=None): if default_value is None: property_mapping = { "userTags": self.user_tags, "parentSection": self.parent_section, "parentNotebook": self.parent_notebook } default_value = property_mapping.get(name, None) return super(OnenotePage, self).get_property(name, default_value)
PypiClean
/FiPy-3.4.4.tar.gz/FiPy-3.4.4/fipy/viewers/mayaviViewer/mayaviDaemon.py
from __future__ import division from __future__ import unicode_literals __docformat__ = 'restructuredtext' # Standard imports. import os import signal import sys # Enthought library imports try: from mayavi.plugins.app import Mayavi from mayavi.sources.vtk_file_reader import VTKFileReader from pyface.timer.api import Timer from mayavi import mlab from tvtk.api import tvtk except ImportError as e: from enthought.mayavi.plugins.app import Mayavi from enthought.mayavi.sources.vtk_file_reader import VTKFileReader from enthought.pyface.timer.api import Timer from enthought.mayavi import mlab # FiPy library imports from fipy.tools.numerix import array, concatenate, where, zeros __all__ = ["MayaviDaemon"] from future.utils import text_to_native_str __all__ = [text_to_native_str(n) for n in __all__] ###################################################################### class MayaviDaemon(Mayavi): """Given a file name and a mayavi2 data reader object, this class polls the file for any changes and automatically updates the mayavi pipeline. """ _viewers = [] def parse_command_line(self, argv): """Parse command line options. Parameters ---------- argv : :obj:`list` of :obj:`str` The command line arguments """ from optparse import OptionParser usage = "usage: %prog [options]" parser = OptionParser(usage) parser.add_option("-l", "--lock", action="store", dest="lock", type="string", default=None, help="path of lock file") parser.add_option("-c", "--cell", action="store", dest="cell", type="string", default=None, help="path of cell vtk file") parser.add_option("-f", "--face", action="store", dest="face", type="string", default=None, help="path of face vtk file") parser.add_option("--xmin", action="store", dest="xmin", type="float", default=None, help="minimum x value") parser.add_option("--xmax", action="store", dest="xmax", type="float", default=None, help="maximum x value") parser.add_option("--ymin", action="store", dest="ymin", type="float", default=None, help="minimum y value") parser.add_option("--ymax", action="store", dest="ymax", type="float", default=None, help="maximum y value") parser.add_option("--zmin", action="store", dest="zmin", type="float", default=None, help="minimum z value") parser.add_option("--zmax", action="store", dest="zmax", type="float", default=None, help="maximum z value") parser.add_option("--datamin", action="store", dest="datamin", type="float", default=None, help="minimum data value") parser.add_option("--datamax", action="store", dest="datamax", type="float", default=None, help="maximum data value") parser.add_option("--fps", action="store", dest="fps", type="float", default=1.0, help="frames per second to attempt to display") (options, args) = parser.parse_args(argv) self._lockfname = options.lock self._cellfname = options.cell self._facefname = options.face self._bounds = [options.xmin, options.xmax, options.ymin, options.ymax, options.zmin, options.zmax] self._datamin = options.datamin self._datamax = options.datamax self._fps = options.fps @staticmethod def _examine_data(source, datatype, bounds): """Determine contents of source Parameters ---------- source : tvtk.DataSet datatype : str either "cell_data" or "point_data" bounds : array_like boundaries of existing data sets Returns ------- has : dict whether each rank is present in data set bounds : array_like boundaries of data sets """ ranks = ["scalars", "vectors", "tensors"] has = dict((rank, False) for rank in ranks) if source is not None: # Newer versions of mayavi (> 4.7?) store AssignAttribute objects # in outputs, so this clumsy bit is to extract the underlying # DataSet objects. # This is a clear sign that we're using this completely wrong, # but, eh, who cares? sourceoutputs = [out if isinstance(out, tvtk.DataSet) else out.trait_get()['output'] for out in source.outputs] for rank in ranks: tmp = [out.trait_get()[datatype].trait_get()[rank] for out in sourceoutputs] tmp = [out for out in tmp if out is not None] has[rank] = (len(tmp) > 0) bounds = concatenate((bounds, [out.bounds for out in sourceoutputs]), axis=0) return has, bounds def run(self): MayaviDaemon._viewers.append(self) mlab.clf() self.cellsource = self.setup_source(self._cellfname) self.has_cell, bounds = self._examine_data(source=self.cellsource, datatype="cell_data", bounds=zeros((0, 6), 'l')) self.facesource = self.setup_source(self._facefname) self.has_face, bounds = self._examine_data(source=self.facesource, datatype="point_data", bounds=bounds) boundsmin = bounds.min(axis=0) boundsmax = bounds.max(axis=0) bounds = (boundsmin[0], boundsmax[1], boundsmin[2], boundsmax[3], boundsmin[4], boundsmax[5]) self._bounds = where(self._bounds == array((None,)), bounds, self._bounds).astype(float) self.view_data() # Poll the lock file. self.timer = Timer(1000 / self._fps, self.poll_file) def __del__(self): dir = None for fname in [self._cellfname, self._facefname, self._lockfname]: if fname and os.path.isfile(fname): os.unlink(fname) if not dir: dir = os.path.dirname(fname) if dir: os.rmdir(dir) @staticmethod def _sigint_handler(signum, frame): for viewer in MayaviDaemon._viewers: viewer.__del__() raise SystemExit("MayaviDaemon cleaned up") def poll_file(self): if os.path.isfile(self._lockfname): self.update_pipeline(self.cellsource) self.update_pipeline(self.facesource) with open(self._lockfname, 'r') as lock: filename = lock.read() if len(filename) > 0: mlab.savefig(filename) os.unlink(self._lockfname) def update_pipeline(self, source): """Override this to do something else if needed. """ if source is not None: source.scene.disable_render = True source.scene.anti_aliasing_frames = 0 # Force the reader to re-read the file. source.reader.modified() source.update() # Propagate the changes in the pipeline. source.data_changed = True source.scene.disable_render = False def setup_source(self, fname): """Given a VTK file name `fname`, this creates a mayavi2 reader for it and adds it to the pipeline. It returns the reader created. """ if fname is None: return None source = VTKFileReader() source.initialize(fname) mlab.pipeline.add_dataset(source) return source def clip_data(self, src): if hasattr(mlab.pipeline, "data_set_clipper"): clip = mlab.pipeline.data_set_clipper(src) clip.filter.inside_out = True clip.widget.widget_mode = 'Box' clip.widget.widget.place_factor = 1. clip.widget.widget.place_widget(self._bounds) clip.widget.update_implicit_function() clip.widget.visible = False else: import warnings warnings.warn("Mayavi r24017 or newer needed for data_set_clipper()", UserWarning, stacklevel=2) clip = src return clip def _view_data(self, source, has, has_scale_bar, cell_data=False): """Determine contents of source Parameters ---------- source : tvtk.DataSet has : dict whether each rank is present in data set has_scale_bar : bool whether a scale bar has already been created cell_data : bool whether source contains cell_data that may need conversion to point_data Returns ------- has_scale_bar : bool whether a scale bar has been created """ if source is not None: clip = self.clip_data(source) if has["scalars"]: s = mlab.pipeline.surface(clip, vmin=self._datamin, vmax=self._datamax) if not has_scale_bar: s.module_manager.scalar_lut_manager.show_scalar_bar = True has_scale_bar = True if cell_data: clip = mlab.pipeline.cell_to_point_data(clip) if has["vectors"]: v = mlab.pipeline.vectors(clip, vmin=self._datamin, vmax=self._datamax) if not has_scale_bar: v.module_manager.scalar_lut_manager.show_scalar_bar = True has_scale_bar = True return has_scale_bar def view_data(self): """Sets up the mayavi pipeline for the visualization. """ has_scale_bar = self._view_data(source=self.cellsource, has=self.has_cell, has_scale_bar=False, cell_data=True) has_scale_bar = self._view_data(source=self.facesource, has=self.has_face, has_scale_bar=has_scale_bar) signal.signal(signal.SIGINT, MayaviDaemon._sigint_handler) try: signal.signal(signal.SIGHUP, MayaviDaemon._sigint_handler) except AttributeError: # not available on Windows pass signal.signal(signal.SIGTERM, MayaviDaemon._sigint_handler) def main(argv=None): """Simple helper to start up the mayavi application. This returns the running application.""" m = MayaviDaemon() m.main(argv) return m if __name__ == '__main__': main(sys.argv[1:])
PypiClean
/NeodroidVision-0.3.0-py36-none-any.whl/neodroidvision/mixed/architectures/self_attention_network/self_attention_modules/functions/subtraction2_zeropad.py
__author__ = "heider" __doc__ = r""" Created on 26/01/2022 """ import torch from torch.autograd import Function from torch.nn.modules.utils import _pair from .self_attention_utilities import ( CUDA_NUM_THREADS, Stream, get_blocks_, get_dtype_str, kernel_loop, load_kernel, ) _subtraction2_zeropad_forward_kernel = ( kernel_loop + r""" extern "C" __global__ void subtraction2_zeropad_forward_kernel( const ${Dtype}* bottom1_data, const ${Dtype}* bottom2_data, ${Dtype}* top_data) { CUDA_KERNEL_LOOP(index, ${nthreads}) { const int n = index / ${input_channels} / ${top_height} / ${top_width}; const int c = (index / ${top_height} / ${top_width}) % ${input_channels}; const int h = (index / ${top_width}) % ${top_height}; const int w = index % ${top_width}; const int h_in_center = -${pad_h} + h * ${stride_h} + (${kernel_h} - 1) / 2 * ${dilation_h}; const int w_in_center = -${pad_w} + w * ${stride_w} + (${kernel_w} - 1) / 2 * ${dilation_w}; const int offset_center = ((n * ${input_channels} + c) * ${bottom_height} + h_in_center) * ${bottom_width} + w_in_center; for (int kh = 0; kh < ${kernel_h}; ++kh) { for (int kw = 0; kw < ${kernel_w}; ++kw) { const int h_in = -${pad_h} + h * ${stride_h} + kh * ${dilation_h}; const int w_in = -${pad_w} + w * ${stride_w} + kw * ${dilation_w}; const int offset_top = ((n * ${input_channels} + c) * ${kernel_h} * ${kernel_w} + (kh * ${kernel_w} + kw)) * ${top_height} * ${top_width} + h * ${top_width} + w; if ((h_in >= 0) && (h_in < ${bottom_height}) && (w_in >= 0) && (w_in < ${bottom_width})) { const int offset_bottom = ((n * ${input_channels} + c) * ${bottom_height} + h_in) * ${ bottom_width} + w_in; top_data[offset_top] = bottom1_data[offset_center] - bottom2_data[offset_bottom]; } else top_data[offset_top] = bottom1_data[offset_center]; } } } } """ ) _subtraction2_zeropad_input1_backward_kernel = ( kernel_loop + r""" extern "C" __global__ void subtraction2_zeropad_input1_backward_kernel( const ${Dtype}* const top_diff, ${Dtype}* bottom_diff) { CUDA_KERNEL_LOOP(index, ${nthreads}) { const int n = index / ${input_channels} / ${bottom_height} / ${bottom_width}; const int c = (index / ${bottom_height} / ${bottom_width}) % ${input_channels}; const int h = (index / ${bottom_width}) % ${bottom_height}; const int w = index % ${bottom_width}; ${Dtype} value = 0; if (((h % ${stride_h}) == 0) && ((w % ${stride_w}) == 0)) { const int h_out = h / ${stride_h}; const int w_out = w / ${stride_w}; for (int kh = 0; kh < ${kernel_h}; ++kh) { for (int kw = 0; kw < ${kernel_w}; ++kw) { const int offset_top = ((n * ${input_channels} + c) * ${kernel_h} * ${kernel_w} + (kh * ${kernel_w} + kw)) * ${top_height} * ${top_width} + h_out * ${top_width} + w_out; value += top_diff[offset_top]; } } } bottom_diff[index] = value; } } """ ) _subtraction2_zeropad_input2_backward_kernel = ( kernel_loop + r""" extern "C" __global__ void subtraction2_zeropad_input2_backward_kernel( const ${Dtype}* const top_diff, ${Dtype}* bottom_diff) { CUDA_KERNEL_LOOP(index, ${nthreads}) { const int n = index / ${input_channels} / ${bottom_height} / ${bottom_width}; const int c = (index / ${bottom_height} / ${bottom_width}) % ${input_channels}; const int h = (index / ${bottom_width}) % ${bottom_height}; const int w = index % ${bottom_width}; ${Dtype} value = 0; for (int kh = 0; kh < ${kernel_h}; ++kh) { for (int kw = 0; kw < ${kernel_w}; ++kw) { const int h_out_s = h + ${pad_h} - kh * ${dilation_h}; const int w_out_s = w + ${pad_w} - kw * ${dilation_w}; if (((h_out_s % ${stride_h}) == 0) && ((w_out_s % ${stride_w}) == 0)) { const int h_out = h_out_s / ${stride_h}; const int w_out = w_out_s / ${stride_w}; if ((h_out >= 0) && (h_out < ${top_height}) && (w_out >= 0) && (w_out < ${top_width})) { const int offset_top = ((n * ${input_channels} + c) * ${kernel_h} * ${kernel_w} + (kh * ${kernel_w} + kw)) * ${top_height} * ${top_width} + h_out * ${top_width} + w_out; value += -top_diff[offset_top]; } } } } bottom_diff[index] = value; } } """ ) __all__ = ["Subtraction2Zeropad", "subtraction2_zeropad"] class Subtraction2Zeropad(Function): @staticmethod def forward(ctx, input1, input2, kernel_size, stride, padding, dilation): """ Args: ctx: input1: input2: kernel_size: stride: padding: dilation: Returns: """ kernel_size, stride, padding, dilation = ( _pair(kernel_size), _pair(stride), _pair(padding), _pair(dilation), ) ctx.kernel_size, ctx.stride, ctx.padding, ctx.dilation = ( kernel_size, stride, padding, dilation, ) assert input1.dim() == 4 and input1.is_cuda batch_size, input_channels, input_height, input_width = input1.size() output_height = int( (input_height + 2 * padding[0] - (dilation[0] * (kernel_size[0] - 1) + 1)) / stride[0] + 1 ) output_width = int( (input_width + 2 * padding[1] - (dilation[1] * (kernel_size[1] - 1) + 1)) / stride[1] + 1 ) output = input1.new( batch_size, input_channels, kernel_size[0] * kernel_size[1], output_height * output_width, ) n = output.numel() // output.shape[2] with torch.cuda.device_of(input1): f = load_kernel( "subtraction2_zeropad_forward_kernel", _subtraction2_zeropad_forward_kernel, Dtype=get_dtype_str(input1), nthreads=n, num=batch_size, input_channels=input_channels, bottom_height=input_height, bottom_width=input_width, top_height=output_height, top_width=output_width, kernel_h=kernel_size[0], kernel_w=kernel_size[1], stride_h=stride[0], stride_w=stride[1], dilation_h=dilation[0], dilation_w=dilation[1], pad_h=padding[0], pad_w=padding[1], ) f( block=(CUDA_NUM_THREADS, 1, 1), grid=(get_blocks_(n), 1, 1), args=[input1.data_ptr(), input2.data_ptr(), output.data_ptr()], stream=Stream(ptr=torch.cuda.current_stream().cuda_stream), ) ctx.save_for_backward(input1, input2) return output @staticmethod def backward(ctx, grad_output): """ Args: ctx: grad_output: Returns: """ kernel_size, stride, padding, dilation = ( ctx.kernel_size, ctx.stride, ctx.padding, ctx.dilation, ) input1, input2 = ctx.saved_tensors assert grad_output.is_cuda if not grad_output.is_contiguous(): grad_output = grad_output.contiguous() batch_size, input_channels, input_height, input_width = input1.size() output_height = int( (input_height + 2 * padding[0] - (dilation[0] * (kernel_size[0] - 1) + 1)) / stride[0] + 1 ) output_width = int( (input_width + 2 * padding[1] - (dilation[1] * (kernel_size[1] - 1) + 1)) / stride[1] + 1 ) grad_input1, grad_input2 = None, None opt = dict( Dtype=get_dtype_str(grad_output), num=batch_size, input_channels=input_channels, bottom_height=input_height, bottom_width=input_width, top_height=output_height, top_width=output_width, kernel_h=kernel_size[0], kernel_w=kernel_size[1], stride_h=stride[0], stride_w=stride[1], dilation_h=dilation[0], dilation_w=dilation[1], pad_h=padding[0], pad_w=padding[1], ) with torch.cuda.device_of(input1): if ctx.needs_input_grad[0]: grad_input1 = input1.new(input1.size()) n = grad_input1.numel() opt["nthreads"] = n f = load_kernel( "subtraction2_zeropad_input1_backward_kernel", _subtraction2_zeropad_input1_backward_kernel, **opt ) f( block=(CUDA_NUM_THREADS, 1, 1), grid=(get_blocks_(n), 1, 1), args=[grad_output.data_ptr(), grad_input1.data_ptr()], stream=Stream(ptr=torch.cuda.current_stream().cuda_stream), ) with torch.cuda.device_of(input2): if ctx.needs_input_grad[1]: grad_input2 = input2.new(input2.size()) n = grad_input2.numel() opt["nthreads"] = n f = load_kernel( "subtraction2_zeropad_input2_backward_kernel", _subtraction2_zeropad_input2_backward_kernel, **opt ) f( block=(CUDA_NUM_THREADS, 1, 1), grid=(get_blocks_(n), 1, 1), args=[grad_output.data_ptr(), grad_input2.data_ptr()], stream=Stream(ptr=torch.cuda.current_stream().cuda_stream), ) return grad_input1, grad_input2, None, None, None, None def subtraction2_zeropad( input1, input2, kernel_size=3, stride=1, padding=0, dilation=1 ): """ Args: input1: input2: kernel_size: stride: padding: dilation: Returns: """ assert input1.dim() == 4 if input1.is_cuda: out = Subtraction2Zeropad.apply( input1, input2, kernel_size, stride, padding, dilation ) else: raise NotImplementedError return out if __name__ == "__main__": def test_subtraction2_zeropad(): import os os.environ["CUDA_VISIBLE_DEVICES"] = "0" kernel_size, stride, dilation = 5, 4, 2 padding = (dilation * (kernel_size - 1) + 1) // 2 n, c, in_height, in_width = 2, 8, 9, 9 out_height = int( (in_height + 2 * padding - (dilation * (kernel_size - 1) + 1)) / stride + 1 ) out_width = int( (in_width + 2 * padding - (dilation * (kernel_size - 1) + 1)) / stride + 1 ) x1 = torch.randn(n, c, in_height, in_width, requires_grad=True).double().cuda() x2 = torch.randn(n, c, in_height, in_width, requires_grad=True).double().cuda() y1 = subtraction2_zeropad( x1, x2, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, ) unfold_i = torch.nn.Unfold( kernel_size=1, dilation=dilation, padding=0, stride=stride ) unfold_j = torch.nn.Unfold( kernel_size=kernel_size, dilation=dilation, padding=padding, stride=stride ) y2 = unfold_i(x1).view(n, c, 1, out_height * out_width) - unfold_j(x2).view( n, c, kernel_size**2, out_height * out_width ) # y2 = unfold_i(x[..., kernel_size//2:-(kernel_size//2), kernel_size//2:-(kernel_size//2)]).view(n, c, # 1, out_height * out_width) - unfold_j(x).view(n, c, kernel_size**2, out_height * out_width) assert (y1 - y2).abs().max() < 1e-9 gx11 = torch.autograd.grad(y1.mean(), x1, retain_graph=True)[0] gx12 = torch.autograd.grad(y1.mean(), x2, retain_graph=True)[0] gx21 = torch.autograd.grad(y2.mean(), x1, retain_graph=True)[0] gx22 = torch.autograd.grad(y2.mean(), x2, retain_graph=True)[0] assert (gx11 - gx21).abs().max() < 1e-9 assert (gx12 - gx22).abs().max() < 1e-9 from functools import partial assert torch.autograd.gradcheck( partial( subtraction2_zeropad, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, ), (x1, x2), ) print("test case passed") test_subtraction2_zeropad()
PypiClean
/BloomSky-API-0.3.1.tar.gz/BloomSky-API-0.3.1/docs/index.rst
.. bloomsky_api documentation master file, created by sphinx-quickstart on Tue Jul 9 22:26:36 2013. You can adapt this file completely to your liking, but it should at least contain the root `toctree` directive. Welcome to BloomSky API's documentation! ====================================== Contents: .. toctree:: :maxdepth: 2 readme installation usage contributing authors history Indices and tables ================== * :ref:`genindex` * :ref:`modindex` * :ref:`search`
PypiClean
/Mathics_Django-6.0.0-py3-none-any.whl/mathics_django/web/media/js/mathjax/jax/output/HTML-CSS/fonts/Latin-Modern/fontdata.js
(function(r,f,E){var B="2.7.9";var c="LatinModernMathJax_Alphabets",x="LatinModernMathJax_Arrows",z="LatinModernMathJax_DoubleStruck",C="LatinModernMathJax_Fraktur",i="LatinModernMathJax_Latin",w="LatinModernMathJax_Main",o="LatinModernMathJax_Marks",y="LatinModernMathJax_Misc",F="LatinModernMathJax_Monospace",A="LatinModernMathJax_NonUnicode",t="LatinModernMathJax_Normal",D="LatinModernMathJax_Operators",a="LatinModernMathJax_SansSerif",q="LatinModernMathJax_Script",b="LatinModernMathJax_Shapes",n="LatinModernMathJax_Size1",m="LatinModernMathJax_Size2",l="LatinModernMathJax_Size3",j="LatinModernMathJax_Size4",h="LatinModernMathJax_Size5",g="LatinModernMathJax_Size6",e="LatinModernMathJax_Size7",v="LatinModernMathJax_Symbols",p="LatinModernMathJax_Variants";var s="H",d="V",u={load:"extra",dir:s},k={load:"extra",dir:d};r.Augment({FONTDATA:{version:B,TeX_factor:1.091,baselineskip:1.2,lineH:0.8,lineD:0.2,hasStyleChar:true,FONTS:{LatinModernMathJax_Alphabets:"Alphabets/Regular/Main.js",LatinModernMathJax_Arrows:"Arrows/Regular/Main.js",LatinModernMathJax_DoubleStruck:"DoubleStruck/Regular/Main.js",LatinModernMathJax_Fraktur:"Fraktur/Regular/Main.js",LatinModernMathJax_Latin:"Latin/Regular/Main.js",LatinModernMathJax_Main:"Main/Regular/Main.js",LatinModernMathJax_Marks:"Marks/Regular/Main.js",LatinModernMathJax_Misc:"Misc/Regular/Main.js",LatinModernMathJax_Monospace:"Monospace/Regular/Main.js",LatinModernMathJax_NonUnicode:"NonUnicode/Regular/Main.js",LatinModernMathJax_Normal:"Normal/Regular/Main.js",LatinModernMathJax_Operators:"Operators/Regular/Main.js",LatinModernMathJax_SansSerif:"SansSerif/Regular/Main.js",LatinModernMathJax_Script:"Script/Regular/Main.js",LatinModernMathJax_Shapes:"Shapes/Regular/Main.js",LatinModernMathJax_Size1:"Size1/Regular/Main.js",LatinModernMathJax_Size2:"Size2/Regular/Main.js",LatinModernMathJax_Size3:"Size3/Regular/Main.js",LatinModernMathJax_Size4:"Size4/Regular/Main.js",LatinModernMathJax_Size5:"Size5/Regular/Main.js",LatinModernMathJax_Size6:"Size6/Regular/Main.js",LatinModernMathJax_Size7:"Size7/Regular/Main.js",LatinModernMathJax_Symbols:"Symbols/Regular/Main.js",LatinModernMathJax_Variants:"Variants/Regular/Main.js"},VARIANT:{normal:{fonts:[w,t,F,i,c,o,x,D,v,b,y,p,A,n]},bold:{fonts:[w,t,F,i,c,o,x,D,v,b,y,p,A,n],bold:true,offsetA:119808,offsetG:120488,offsetN:120782},italic:{fonts:[w,t,F,i,c,o,x,D,v,b,y,p,A,n],italic:true,offsetA:119860,offsetG:120546,remap:{119893:8462}},"bold-italic":{fonts:[w,t,F,i,c,o,x,D,v,b,y,p,A,n],bold:true,italic:true,offsetA:119912,offsetG:120604},"double-struck":{fonts:[z],offsetA:120120,offsetN:120792,remap:{120122:8450,120127:8461,120133:8469,120135:8473,120136:8474,120137:8477,120145:8484}},fraktur:{fonts:[C],offsetA:120068,remap:{120070:8493,120075:8460,120076:8465,120085:8476,120093:8488}},"bold-fraktur":{fonts:[C],bold:true,offsetA:120172},script:{fonts:[q],italic:true,offsetA:119964,remap:{119965:8492,119968:8496,119969:8497,119971:8459,119972:8464,119975:8466,119976:8499,119981:8475,119994:8495,119996:8458,120004:8500}},"bold-script":{fonts:[q],bold:true,italic:true,offsetA:120016},"sans-serif":{fonts:[a],offsetA:120224,offsetN:120802},"bold-sans-serif":{fonts:[a],bold:true,offsetA:120276,offsetN:120812,offsetG:120662},"sans-serif-italic":{fonts:[a],italic:true,offsetA:120328},"sans-serif-bold-italic":{fonts:[a],bold:true,italic:true,offsetA:120380,offsetG:120720},monospace:{fonts:[F],offsetA:120432,offsetN:120822},"-Latin-Modern-variant":{fonts:[p,w,t,F,i,c,o,x,D,v,b,y,A,n]},"-tex-caligraphic":{fonts:[w,t,F,i,c,o,x,D,v,b,y,p,A,n],italic:true},"-tex-oldstyle":{fonts:[w,t,F,i,c,o,x,D,v,b,y,p,A,n]},"-tex-caligraphic-bold":{fonts:[w,t,F,i,c,o,x,D,v,b,y,p,A,n],italic:true,bold:true},"-tex-oldstyle-bold":{fonts:[w,t,F,i,c,o,x,D,v,b,y,p,A,n],bold:true},"-tex-mathit":{fonts:[w,t,F,i,c,o,x,D,v,b,y,p,A,n],italic:true,noIC:true},"-largeOp":{fonts:[n,w]},"-smallOp":{}},RANGES:[{name:"alpha",low:97,high:122,offset:"A",add:26},{name:"Alpha",low:65,high:90,offset:"A"},{name:"number",low:48,high:57,offset:"N"},{name:"greek",low:945,high:969,offset:"G",add:26},{name:"Greek",low:913,high:1014,offset:"G",remap:{1013:52,977:53,1008:54,981:55,1009:56,982:57,1012:17}}],RULECHAR:8722,REMAP:{10:32,9666:9664,12296:10216,12297:10217,10072:8739,9656:9654,978:933,9652:9650,9653:9651,65079:9182,65080:9183,697:8242,9723:9633,9724:9632,9662:9660,8254:773,9663:9661},REMAPACCENT:{"\u007E":"\u0303","\u2192":"\u20D7","\u0060":"\u0300","\u005E":"\u0302","\u00B4":"\u0301","\u2032":"\u0301","\u2035":"\u0300"},REMAPACCENTUNDER:{},DELIMITERS:{40:{dir:d,HW:[[0.996,w],[1.094,n],[1.194,m],[1.444,l],[1.792,j],[2.092,h],[2.392,g],[2.99,e]],stretch:{bot:[9117,v],ext:[9116,v],top:[9115,v]}},41:{dir:d,HW:[[0.996,w],[1.094,n],[1.194,m],[1.444,l],[1.792,j],[2.092,h],[2.392,g],[2.99,e]],stretch:{bot:[9120,v],ext:[9119,v],top:[9118,v]}},45:{alias:8722,dir:s},47:{dir:d,HW:[[1,w],[1.31,n],[1.716,m],[1.771,m,1.032],[2.248,l],[2.944,j],[3.858,h],[5.054,g],[6.62,e]]},61:{dir:s,HW:[[0.666,w]],stretch:{left:[57344,e],rep:[57345,e],right:[57346,e]}},91:{dir:d,HW:[[1,w],[1.1,n],[1.2,m],[1.45,l],[1.8,j],[2.1,h],[2.4,g],[3,e]],stretch:{bot:[9123,v],ext:[9122,v],top:[9121,v]}},92:{dir:d,HW:[[1,w],[1.31,n],[1.716,m],[1.771,m,1.032],[2.248,l],[2.944,j],[3.858,h],[5.054,g],[6.62,e]]},93:{dir:d,HW:[[1,w],[1.1,n],[1.2,m],[1.45,l],[1.8,j],[2.1,h],[2.4,g],[3,e]],stretch:{bot:[9126,v],ext:[9125,v],top:[9124,v]}},94:{alias:770,dir:s},95:{alias:818,dir:s},123:{dir:d,HW:[[1,w],[1.1,n],[1.2,m],[1.45,l],[1.8,j],[2.1,h],[2.4,g],[3,e]],stretch:{bot:[9129,v],ext:[57347,e],mid:[9128,v],top:[9127,v]}},124:{dir:d,HW:[[1,w],[1.202,n],[1.444,m],[1.734,l],[2.084,j],[2.502,h],[3.004,g],[3.606,e]],stretch:{bot:[57348,e],ext:[57349,e],top:[57350,e]}},125:{dir:d,HW:[[1,w],[1.1,n],[1.2,m],[1.45,l],[1.8,j],[2.1,h],[2.4,g],[3,e]],stretch:{bot:[9133,v],ext:[57351,e],mid:[9132,v],top:[9131,v]}},126:{alias:771,dir:s},175:{alias:818,dir:s},710:{alias:770,dir:s},713:{alias:8722,dir:s},732:{alias:771,dir:s},770:{dir:s,HW:[[0.364,w],[0.644,n],[0.768,m],[0.919,l],[1.1,j],[1.32,h],[1.581,g],[1.896,e]]},771:{dir:s,HW:[[0.37,w],[0.652,n],[0.778,m],[0.931,l],[1.115,j],[1.335,h],[1.599,g],[1.915,e]]},773:{dir:s,HW:[[0.392,o],[0.568,n]],stretch:{left:[57595,e],rep:[57596,e],right:[57597,e]}},774:u,780:{dir:s,HW:[[0.364,w],[0.644,n],[0.768,m],[0.919,l],[1.1,j],[1.32,h],[1.581,g],[1.896,e]]},785:u,812:u,813:u,814:u,815:u,816:u,818:{dir:s,HW:[[0.392,o],[0.568,n]],stretch:{left:[57589,e],rep:[57590,e],right:[57591,e]}},819:u,831:u,8213:{alias:8722,dir:s},8214:{dir:d,HW:[[1,w],[1.202,n],[1.444,m],[1.734,l],[2.084,j],[2.502,h],[3.004,g],[3.606,e]],stretch:{bot:[57642,e],ext:[57643,e],top:[57644,e]}},8215:{alias:8722,dir:s},8254:{alias:8722,dir:s},8260:{dir:d,HW:[[1,w],[1.31,n],[1.716,m],[2.248,l],[2.944,j],[3.858,h],[5.054,g],[6.62,e]]},8400:u,8401:u,8406:u,8407:u,8417:u,8425:u,8428:u,8429:u,8430:u,8431:u,8592:{dir:s,HW:[[0.885,w],[1.351,n]],stretch:{left:[57379,e],rep:[57380,e],right:[57381,e]}},8593:{dir:d,HW:[[0.882,w],[1.348,n]],stretch:{bot:[57385,e],ext:[57386,e],top:[57387,e]}},8594:{dir:s,HW:[[0.885,w],[1.351,n]],stretch:{left:[57382,e],rep:[57383,e],right:[57384,e]}},8595:{dir:d,HW:[[0.882,w],[1.348,n]],stretch:{bot:[57388,e],ext:[57389,e],top:[57390,e]}},8596:{dir:s,HW:[[0.884,w],[1.33,n]],stretch:{left:[57399,e],rep:[57400,e],right:[57401,e]}},8597:{dir:d,HW:[[1.014,w],[1.014,n]],stretch:{bot:[57402,e],ext:[57403,e],top:[57404,e]}},8598:k,8599:k,8600:k,8601:k,8602:u,8603:u,8606:u,8607:k,8608:u,8609:k,8610:u,8611:u,8612:{dir:s,HW:[[0.865,x],[1.331,n]],stretch:{left:[57427,e],rep:[57428,e],right:[57429,e]}},8613:k,8614:{dir:s,HW:[[0.865,w],[1.331,n]],stretch:{left:[57430,e],rep:[57431,e],right:[57432,e]}},8615:k,8617:u,8618:u,8619:u,8620:u,8621:u,8622:u,8624:k,8625:k,8626:k,8627:k,8630:u,8631:u,8636:u,8637:u,8638:k,8639:k,8640:u,8641:u,8642:k,8643:k,8644:u,8645:k,8646:u,8647:u,8648:k,8649:u,8650:k,8651:u,8652:u,8653:u,8654:u,8655:u,8656:{dir:s,HW:[[0.879,w],[1.345,n]],stretch:{left:[57511,e],rep:[57512,e],right:[57513,e]}},8657:{dir:d,HW:[[0.879,w],[1.345,n]],stretch:{bot:[57517,e],ext:[57518,e],top:[57519,e]}},8658:{dir:s,HW:[[0.879,w],[1.345,n]],stretch:{left:[57514,e],rep:[57515,e],right:[57516,e]}},8659:{dir:d,HW:[[0.879,w],[1.345,n]],stretch:{bot:[57520,e],ext:[57521,e],top:[57522,e]}},8660:{dir:s,HW:[[0.956,w],[1.422,n]],stretch:{left:[57523,e],rep:[57524,e],right:[57525,e]}},8661:{dir:d,HW:[[0.956,w],[1.422,n]],stretch:{bot:[57526,e],ext:[57527,e],top:[57528,e]}},8662:k,8663:k,8664:k,8665:k,8666:u,8667:u,8668:u,8669:u,8678:u,8679:k,8680:u,8681:k,8691:k,8693:k,8694:u,8719:k,8720:k,8721:k,8722:{dir:s,HW:[],stretch:{rep:[8722,w,0,0,0,-0.31,-0.31]}},8725:{alias:8260,dir:d},8730:{dir:d,HW:[[1,w],[1.2,n],[1.8,m],[2.4,l],[3,j]],stretch:{bot:[9143,v],ext:[57651,e],top:[57652,e]}},8739:{dir:d,HW:[[1,w],[1.202,n],[1.444,m],[1.734,l],[2.084,j],[2.502,h],[3.004,g],[3.606,e]],stretch:{bot:[57348,e],ext:[57349,e],top:[57350,e]}},8741:{dir:d,HW:[[1,w],[1.202,n],[1.444,m],[1.734,l],[2.084,j],[2.502,h],[3.004,g],[3.606,e]],stretch:{bot:[57642,e],ext:[57643,e],top:[57644,e]}},8747:k,8748:k,8749:k,8750:k,8751:k,8752:k,8753:k,8754:k,8755:k,8801:u,8803:u,8866:k,8867:k,8868:k,8869:k,8896:k,8897:k,8898:k,8899:k,8968:{dir:d,HW:[[1,w],[1.1,n],[1.2,m],[1.45,l],[1.8,j],[2.1,h],[2.4,g],[3,e]],stretch:{ext:[9122,v],top:[9121,v]}},8969:{dir:d,HW:[[1,w],[1.1,n],[1.2,m],[1.45,l],[1.8,j],[2.1,h],[2.4,g],[3,e]],stretch:{ext:[9125,v],top:[9124,v]}},8970:{dir:d,HW:[[1,w],[1.1,n],[1.2,m],[1.45,l],[1.8,j],[2.1,h],[2.4,g],[3,e]],stretch:{bot:[9123,v],ext:[9122,v]}},8971:{dir:d,HW:[[1,w],[1.1,n],[1.2,m],[1.45,l],[1.8,j],[2.1,h],[2.4,g],[3,e]],stretch:{bot:[9126,v],ext:[9125,v]}},8978:{alias:9180,dir:s},8994:{alias:9180,dir:s},8995:{alias:9181,dir:s},9001:{dir:d,HW:[[1,v],[1.1,n],[1.2,m],[1.45,l],[1.8,j],[2.1,h],[2.4,g],[3,e]]},9002:{dir:d,HW:[[1,v],[1.1,n],[1.2,m],[1.45,l],[1.8,j],[2.1,h],[2.4,g],[3,e]]},9130:{dir:d,HW:[[0.748,v]],stretch:{ext:[9130,v]}},9135:{alias:8722,dir:s},9136:{dir:d,HW:[[0.75,v,null,9127]],stretch:{top:[9127,v],ext:[9130,v],bot:[9133,v]}},9137:{dir:d,HW:[[0.75,v,null,9131]],stretch:{top:[9131,v],ext:[9130,v],bot:[9129,v]}},9140:u,9141:u,9168:{dir:d,HW:[[1,w,null,124],[1.309,w,1.309,124],[1.771,w,1.771,124],[2.233,w,2.233,124],[2.695,w,2.695,124]],stretch:{ext:[124,w]}},9180:u,9181:u,9182:{dir:s,HW:[[0.492,w],[0.993,n],[1.494,m],[1.996,l],[2.498,j],[3,h],[3.502,g],[4.006,e]],stretch:{left:[57613,e],rep:[57614,e],mid:[57615,e],right:[57616,e]}},9183:{dir:s,HW:[[0.492,w],[0.993,n],[1.494,m],[1.996,l],[2.498,j],[3,h],[3.502,g],[4.006,e]],stretch:{left:[57617,e],rep:[57618,e],mid:[57619,e],right:[57620,e]}},9184:u,9185:u,9472:{alias:8722,dir:s},10145:u,10214:k,10215:k,10216:{dir:d,HW:[[1,w],[1.1,n],[1.2,m],[1.45,l],[1.8,j],[2.1,h],[2.4,g],[3,e]]},10217:{dir:d,HW:[[1,w],[1.1,n],[1.2,m],[1.45,l],[1.8,j],[2.1,h],[2.4,g],[3,e]]},10218:k,10219:k,10222:{dir:d,HW:[[1.024,w],[1.126,n],[1.228,m],[1.482,l],[1.836,j],[2.14,h],[2.444,g],[3.052,e]],stretch:{bot:[57601,e],ext:[57602,e],top:[57603,e]}},10223:{dir:d,HW:[[1.024,w],[1.126,n],[1.228,m],[1.482,l],[1.836,j],[2.14,h],[2.444,g],[3.052,e]],stretch:{bot:[57604,e],ext:[57605,e],top:[57606,e]}},10229:{alias:8592,dir:s},10230:{alias:8594,dir:s},10231:{alias:8596,dir:s},10232:{alias:8656,dir:s},10233:{alias:8658,dir:s},10234:{alias:8660,dir:s},10235:{alias:8612,dir:s},10236:{alias:8614,dir:s},10237:{alias:10502,dir:s},10238:{alias:10503,dir:s},10502:{dir:s,HW:[[0.879,x],[1.325,n]],stretch:{left:[57541,e],rep:[57542,e],right:[57543,e]}},10503:{dir:s,HW:[[0.879,x],[1.325,n]],stretch:{left:[57544,e],rep:[57545,e],right:[57546,e]}},10752:k,10753:k,10754:k,10755:k,10756:k,10757:k,10758:k,10761:k,10764:k,10769:k,11012:u,11013:u,11014:k,11015:k,11020:u,11021:k,11057:u,12296:{alias:10216,dir:d},12297:{alias:10217,dir:d},65079:{alias:9182,dir:s},65080:{alias:9183,dir:s}}}});MathJax.Hub.Register.LoadHook(r.fontDir+"/Main/Regular/Main.js",function(){r.FONTDATA.FONTS[w][8722][0]=r.FONTDATA.FONTS[w][43][0];r.FONTDATA.FONTS[w][8722][1]=r.FONTDATA.FONTS[w][43][1]});MathJax.Hub.Register.LoadHook(r.fontDir+"/Size7/Regular/Main.js",function(){var G;G=r.FONTDATA.DELIMITERS[9182].stretch.rep[0];r.FONTDATA.FONTS[e][G][0]+=200;r.FONTDATA.FONTS[e][G][1]+=200;G=r.FONTDATA.DELIMITERS[9183].stretch.rep[0];r.FONTDATA.FONTS[e][G][0]+=200;r.FONTDATA.FONTS[e][G][1]+=200});MathJax.Hub.Register.LoadHook(r.fontDir+"/Size1/Regular/Main.js",function(){r.FONTDATA.FONTS[n][8747][2]-=425;r.FONTDATA.FONTS[n][8747][5]={rfix:-425};r.FONTDATA.FONTS[n][8748][2]-=425;r.FONTDATA.FONTS[n][8748][5]={rfix:-425};r.FONTDATA.FONTS[n][8749][2]-=425;r.FONTDATA.FONTS[n][8749][5]={rfix:-425};r.FONTDATA.FONTS[n][8750][2]-=425;r.FONTDATA.FONTS[n][8750][5]={rfix:-425};r.FONTDATA.FONTS[n][8751][2]-=425;r.FONTDATA.FONTS[n][8751][5]={rfix:-425};r.FONTDATA.FONTS[n][8752][2]-=425;r.FONTDATA.FONTS[n][8752][5]={rfix:-425};r.FONTDATA.FONTS[n][8753][2]-=425;r.FONTDATA.FONTS[n][8753][5]={rfix:-425};r.FONTDATA.FONTS[n][8754][2]-=425;r.FONTDATA.FONTS[n][8754][5]={rfix:-425};r.FONTDATA.FONTS[n][8755][2]-=425;r.FONTDATA.FONTS[n][8755][5]={rfix:-425};r.FONTDATA.FONTS[n][10764][2]-=425;r.FONTDATA.FONTS[n][10764][5]={rfix:-425};r.FONTDATA.FONTS[n][10769][2]-=425;r.FONTDATA.FONTS[n][10769][5]={rfix:-425}});E.loadComplete(r.fontDir+"/fontdata.js")})(MathJax.OutputJax["HTML-CSS"],MathJax.ElementJax.mml,MathJax.Ajax);
PypiClean
/Auxjad-1.0.0.tar.gz/Auxjad-1.0.0/auxjad/core/TenneySelector.py
import random from typing import Any, Optional, Union class TenneySelector(): r"""An implementation of the Dissonant Counterpoint Algorithm by James Tenney. This class can be used to randomly select elements from an input :obj:`list`, giving more weight to elements which have not been selected in recent iterations. In other words, Tenney's algorithm uses feedback in order to lower the weight of recently selected elements. This implementation is based on the paper: Polansky, L., A. Barnett, and M. Winter (2011). 'A Few More Words About James Tenney: Dissonant Counterpoint and Statistical Feedback'. In: Journal of Mathematics and Music 5(2). pp. 63--82. Basic usage: The selector should be initialised with a :obj:`list`. The elements of this :obj:`list` can be of any type. >>> selector = auxjad.TenneySelector(['A', 'B', 'C', 'D', 'E', 'F']) >>> selector.contents ['A', 'B', 'C', 'D', 'E', 'F'] Applying the :func:`len()` function to the selector will return the length of the input :obj:`list`. >>> len(selector) 6 When no other keyword arguments are used, the default probabilities of each element in the :obj:`list` is ``1.0``. Probabilities are not normalised. >>> selector.probabilities [1.0, 1.0, 1.0, 1.0, 1.0, 1.0] >>> selector.previous_index None Calling the selector will output one of its elements, selected according to the current probability values. >>> selector() C Alternatively, use the :func:`next()` function or :meth:`__next__` method to get the next result. >>> selector.__next__() A >>> next(selector) D After each call, the object updates all probability values, setting the previously selected element's probability at ``0.0`` and raising all other probabilities according to a growth function (more on this below). >>> result = '' >>> for _ in range(30): ... result += selector() >>> result EDFACEABAFDCEDAFADCBFEDABEDFEC From the result above it is possible to see that there are no immediate repetitions of elements (since once selected, their probability is always set to 0.0 and will take at least one iteration to grow to a non-zero value). Checking the :attr:`probabilities` and :attr:`previous_index` properties will return us their current values. >>> selector.probabilities [6.0, 5.0, 0.0, 3.0, 1.0, 2.0] >>> selector.previous_index 2 :attr:`previous_result` and :attr:`previous_index`: Use the read-only properties :attr:`previous_result` and :attr:`previous_index` to output the previous result and its index. Default values for both is ``None``. >>> selector = auxjad.TenneySelector(['A', 'B', 'C', 'D', 'E', 'F']) >>> selector.previous_index None >>> selector.previous_result None >>> selector() C >>> selector.previous_index 2 >>> selector.previous_result C Arguments and properties: This class can take two optional keywords argument during its instantiation, namely :attr:`weights` and :attr:`curvature`. :attr:`weights` takes a :obj:`list` of :obj:`float` with the individual weights of each element; by default, all weights are set to ``1.0``. These weights affects the effective probability of each element. The other argument, :attr:`curvature`, is the exponent of the growth function for all elements. The growth function takes as input the number of iterations since an element has been last selected, and raise this number by the curvature value. If :attr:`curvature` is set to ``1.0`` (which is its default value), the growth is linear with each iteration. If set to a value larger than ``0.0`` and less than ``1.0``, the growth is negative (or concave), so that the chances of an element which is not being selected will grow at ever smaller rates as the number of iterations it has not been selected increase. If the :attr:`curvature` is set to ``1.0``, the growth is linear with the number of iterations. If the :attr:`curvature` is larger than ``1.0``, the curvature is positive (or convex) and the growth will accelerate as the number of iterations an element has not been selected grows. Setting the curvature to ``0.0`` will result in an static probability vector with all values set to ``1.0``, except for the previously selected one which will be set to ``0.0``; this will result in a uniformly random selection without repetition. With linear curvature (default value of ``1.0``): >>> selector = auxjad.TenneySelector(['A', 'B', 'C', 'D', 'E', 'F']) >>> selector.curvature 1.0 >>> selector.weights [1.0, 1.0, 1.0, 1.0, 1.0, 1.0] >>> selector.probabilities [1.0, 1.0, 1.0, 1.0, 1.0, 1.0] >>> selector() 'B' >>> selector.curvature 1.0 >>> selector.weights [1.0, 1.0, 1.0, 1.0, 1.0, 1.0] >>> selector.probabilities [2.0, 0.0, 2.0, 2.0, 2.0, 2.0] Convex :attr:`curvature`: Using a convex :attr:`curvature` (i.e. greater than ``0.0`` and less than ``1.0``): >>> selector = auxjad.TenneySelector(['A', 'B', 'C', 'D', 'E', 'F'], ... curvature=0.2, ... ) >>> selector.curvature 0.2 >>> selector.weights [1.0, 1.0, 1.0, 1.0, 1.0, 1.0] >>> selector.probabilities [1.0, 1.0, 1.0, 1.0, 1.0, 1.0] >>> selector() 'C' >>> selector.curvature 0.2 >>> selector.weights [1.0, 1.0, 1.0, 1.0, 1.0, 1.0] >>> selector.probabilities [1.148698354997035, 1.148698354997035, 0.0, 1.148698354997035, 1.148698354997035, 1.148698354997035] With a convex curvature, the growth of the probability of each non-selected term gets smaller as the number of times it is not selected increases. The smaller the curvature is, the less difference there will be between any non-previously selected elements. This results in sequences which have more chances of a same element being near each other. In the sequence below, note how there are many cases of a same element being separated only by a single other one, such as ``'ACA'`` in index ``6``. >>> result = '' >>> for _ in range(30): ... result += selector() >>> result DACBEDFACABDACECBEFAEDBAFBABFD Checking the probability values at this point outputs: >>> selector.probabilities [1.2457309396155174, 1.148698354997035, 1.6952182030724354, 0.0, 1.5518455739153598, 1.0] As we can see, all non-zero values are relatively close to each other, which is why there is a high chance of an element being selected again just two iterations apart. Concave :attr:`curvature`: Using a concave :attr:`curvature` (i.e. greater than ``1.0``): >>> selector = auxjad.TenneySelector(['A', 'B', 'C', 'D', 'E', 'F'], ... curvature=15.2, ... ) >>> selector.curvature 0.2 >>> selector.weights [1.0, 1.0, 1.0, 1.0, 1.0, 1.0] >>> selector.probabilities [1.0, 1.0, 1.0, 1.0, 1.0, 1.0] >>> selector() 'C' >>> selector.curvature 0.2 >>> selector.weights [1.0, 1.0, 1.0, 1.0, 1.0, 1.0] >>> selector.probabilities [37640.547696542824, 37640.547696542824, 37640.547696542824, 0.0, 37640.547696542824, 37640.547696542824] With a concave curvature, the growth of the probability of each non-selected term gets larger as the number of times it is not selected increases. The larger the curvature is, the larger difference there will be between any non-previously selected elements. This results in sequences which have less chances of a same element being near each other. In the sequence below, with a curvature of ``15.2``, note how the elements are as far apart from each other, resulting in a repeating string of ``'DFAECB'``. >>> result = '' >>> for _ in range(30): ... result += selector() >>> result DFAECBDFAECBDFAECBDFAECBDFAECB Checking the probability values at this point outputs: >>> selector.probabilities [17874877.39956566, 0.0, 1.0, 42106007735.02238, 37640.547696542824, 1416810830.8957152] As we can see, the non-zero values vary wildly. The higher the curvature, the higher the difference between these values, making some of them much more likely to be selected. :attr:`curvature` property: To change the curvature value at any point, simply set the property :attr:`curvature` to a different value. >>> selector = auxjad.TenneySelector(['A', 'B', 'C', 'D', 'E', 'F']) >>> selector.curvature 1.0 >>> selector.curvature = 0.25 >>> selector.curvature 0.25 :attr:`weights`: Each element can also have a fixed weight to themselves. This will affect the probability calculation. The example below uses the default linear curvature. >>> selector = auxjad.TenneySelector( ... ['A', 'B', 'C', 'D', 'E', 'F'], ... weights=[1.0, 1.0, 5.0, 5.0, 10.0, 20.0], ... ) >>> selector.weights [1.0, 1.0, 5.0, 5.0, 10.0, 20.0] >>> selector.probabilities [1.0, 1.0, 5.0, 5.0, 10.0, 20.0] >>> result = '' >>> for _ in range(30): ... result += selector() >>> result FBEFECFDEADFEDFEDBFECDAFCEDCFE >>> selector.weights [1.0, 1.0, 5.0, 5.0, 10.0, 20.0] >>> selector.probabilities [7.0, 12.0, 10.0, 15.0, 0.0, 20.0] Set :attr:`weights` to ``None`` to reset it to a uniform distribution. >>> selector = auxjad.TenneySelector( ... ['A', 'B', 'C', 'D', 'E', 'F'], ... weights=[1.0, 1.0, 5.0, 5.0, 10.0, 20.0], ... ) >>> selector.weights [1.0, 1.0, 5.0, 5.0, 10.0, 20.0] >>> selector.weights = None >>> selector.weights [1.0, 1.0, 1.0, 1.0, 1.0, 1.0] :meth:`reset_probabilities`: To reset the probability distribution of all elements to its initial value (an uniform distribution), use the method :meth:`reset_probabilities`. >>> selector = auxjad.TenneySelector(['A', 'B', 'C', 'D', 'E', 'F']) >>> for _ in range(30): ... selector() >>> selector.probabilities [4.0, 3.0, 1.0, 0.0, 5.0, 2.0] >>> selector.reset_probabilities() >>> selector.probabilities [1.0, 1.0, 1.0, 1.0, 1.0, 1.0] :attr:`contents`: The instances of this class can be indexed and sliced. This allows reading, assigning, or deleting values from :attr:`contents`. Replacing elements by assignment will not affect the :attr:`probabilities` property, and the new elements will have the same probability as the ones it replaced. Deleting element will delete the probability of that index. >>> selector = auxjad.TenneySelector(['A', 'B', 'C', 'D', 'E', 'F']) >>> for _ in range(30): ... selector() >>> selector.probabilities [3.0, 2.0, 1.0, 7.0, 5.0, 0.0] >>> selector[2] 'C' >>> selector[1:4] ['B', 'C', 'D'] >>> selector[2] = 'foo' >>> selector.contents ['A', 'B', 'foo', 'D', 'E', 'F'] >>> selector[:] = ['foo', 'bar', 'X', 'Y', 'Z', '...'] >>> selector.contents ['foo', 'bar', 'X', 'Y', 'Z', '...'] >>> selector.probabilities [3.0, 2.0, 1.0, 7.0, 5.0, 0.0] >>> del selector[0:2] >>> selector.contents ['X', 'Y', 'Z', '...'] >>> selector.probabilities [1.0, 7.0, 5.0, 0.0] You can also check if the instance contains a specific element. In the case of the selector above, we have: >>> 'X' in selector True >>> 'A' in selector False Changing :attr:`contents` resets :attr:`probabilities` and :attr:`weights`: A new :obj:`list` of an arbitrary length can be set at any point using the property :attr:`contents`. Do notice that both :attr:`probabilities` and :attr:`weights` will be reset at that point. >>> selector = auxjad.TenneySelector( ... ['A', 'B', 'C', 'D', 'E', 'F'], ... weights=[1.0, 1.0, 5.0, 5.0, 10.0, 20.0], ... ) >>> for _ in range(30): ... selector() >>> len(selector) 6 >>> selector.contents ['A', 'B', 'C', 'D', 'E', 'F'] >>> selector.weights [1.0, 1.0, 5.0, 5.0, 10.0, 20.0] >>> selector.probabilities [8.0, 2.0, 5.0, 15.0, 50.0, 0.0] >>> selector.contents = [2, 4, 6, 8] >>> len(selector) 4 >>> selector.contents [2, 4, 6, 8] >>> selector.weights [1.0, 1.0, 1.0, 1.0] >>> selector.probabilities [1.0, 1.0, 1.0, 1.0] """ ### CLASS VARIABLES ### __slots__ = ('_contents', '_weights', '_curvature', '_counter', '_previous_index', '_probabilities', ) ### INITIALISER ### def __init__(self, contents: list[Any], *, weights: Optional[list] = None, curvature: float = 1.0, ) -> None: r'Initialises self.' if not isinstance(contents, list): raise TypeError("'contents' must be 'list'") if weights is not None: if not isinstance(weights, list): raise TypeError("'weights' must be 'list'") if not len(contents) == len(weights): raise ValueError("'weights' must have the same length " "as 'contents'") if not all(isinstance(weight, (int, float)) for weight in weights): raise TypeError("'weights' elements must be " "'int' or 'float'") if not isinstance(curvature, float): raise TypeError("'curvature' must be 'float'") if curvature < 0.0: raise ValueError("'curvature' must be larger than 0.0") self._contents = contents[:] if weights is not None: self._weights = weights[:] else: self._weights = [1.0 for _ in range(self.__len__())] self._curvature = curvature self._counter = [1 for _ in range(self.__len__())] self._generate_probabilities() self._previous_index = None ### SPECIAL METHODS ### def __repr__(self) -> str: r'Returns interpreter representation of :attr:`contents`.' return str(self._contents) def __len__(self) -> int: r'Returns the length of :attr:`contents`.' return len(self._contents) def __call__(self) -> Any: r"""Calls the selection process and outputs one element of :attr:`contents`. """ self._previous_index = random.choices( [n for n in range(self.__len__())], weights=self._probabilities, )[0] self._regenerate_counts() self._generate_probabilities() return self._contents[self._previous_index] def __next__(self) -> Any: r"""Calls the selection process and outputs one element of :attr:`contents`. """ return self.__call__() def __getitem__(self, key: int, ) -> Any: r"""Returns one or more elements of :attr:`contents` through indexing or slicing. """ return self._contents[key] def __setitem__(self, key: int, value: Any, ) -> None: r"""Assigns values to one or more elements of :attr:`contents` through indexing or slicing. """ self._contents[key] = value def __delitem__(self, key: int, ) -> None: r"""Deletes one or more elements of :attr:`contents` through indexing or slicing. """ del self._contents[key] del self._weights[key] del self._probabilities[key] del self._counter[key] ### PUBLIC METHODS ### def reset_probabilities(self) -> None: r"""Resets the probability distribution of all elements to an uniform distribution. """ self._counter = [1 for _ in range(self.__len__())] self._generate_probabilities() ### PRIVATE METHODS ### def _regenerate_counts(self) -> None: r"""Increases the count of all elements except for the previously selected one, whose count is reset to zero. """ for i in range(self.__len__()): if i == self._previous_index: self._counter[i] = 0 else: self._counter[i] += 1 def _generate_probabilities(self, *, reset: bool = False, ) -> None: r"""Generates the probabilities given the weights of the elements as well as their count numbers (which are fed into the growth function). """ if not isinstance(reset, bool): raise TypeError("'reset' must be 'bool") if reset: self._counter = [1 for _ in range(self.__len__())] self._probabilities = [] for weight, count in zip(self._weights, self._counter): self._probabilities.append(weight * self._growth_function(count)) def _growth_function(self, count: int, ) -> float: r'Applies the growth exponent given a number of counts.' return count ** self._curvature ### PUBLIC PROPERTIES ### @property def contents(self) -> list[Any]: r'The :obj:`list` from which the selector picks elements.' return self._contents @contents.setter def contents(self, contents: list[Any], ) -> None: if not isinstance(contents, list): raise TypeError("'contents' must be 'list") self._contents = contents[:] self._weights = [1.0 for _ in range(self.__len__())] self._generate_probabilities(reset=True) @property def weights(self) -> list[Union[float, int]]: r'The :obj:`list` with weights for each element of :attr:`contents`.' return self._weights @weights.setter def weights(self, weights: Optional[list[Union[float, int]]], ) -> None: if weights is not None: if not isinstance(weights, list): raise TypeError("'weights' must be 'list'") if not self.__len__() == len(weights): raise ValueError("'weights' must have the same length as the " "contents of the object") if not all(isinstance(weight, (int, float)) for weight in weights): raise TypeError("'weights' elements must be 'int' or 'float'") self._weights = weights[:] else: self._weights = [1.0 for _ in range(self.__len__())] self._generate_probabilities(reset=True) @property def curvature(self) -> float: r'The exponent of the growth function.' return self._curvature @curvature.setter def curvature(self, curvature: float, ) -> None: if not isinstance(curvature, float): raise TypeError("'curvature' must be 'float'") if curvature < 0.0: raise ValueError("'curvature' must be larger than 0.0") self._curvature = curvature self._generate_probabilities() @property def previous_index(self) -> Union[int, None]: r"""Read-only property, returns the index of the previously output element. """ return self._previous_index @property def previous_result(self) -> Any: r'Read-only property, returns the previously output element.' if self._previous_index is not None: return self._contents[self._previous_index] else: return self._previous_index @property def probabilities(self) -> list[float]: r'Read-only property, returns the probabilities vector.' return self._probabilities
PypiClean
/Aruna-Python-API-1.1.0rc6.tar.gz/Aruna-Python-API-1.1.0rc6/aruna/api/storage/services/v1/info_service_pb2_grpc.py
"""Client and server classes corresponding to protobuf-defined services.""" import grpc from aruna.api.storage.services.v1 import info_service_pb2 as aruna_dot_api_dot_storage_dot_services_dot_v1_dot_info__service__pb2 class ResourceInfoServiceStub(object): """ResourceInfoService This is a generic service that contains utility functions these functions are used to query additional meta-information about resources """ def __init__(self, channel): """Constructor. Args: channel: A grpc.Channel. """ self.GetResourceHierarchy = channel.unary_unary( '/aruna.api.storage.services.v1.ResourceInfoService/GetResourceHierarchy', request_serializer=aruna_dot_api_dot_storage_dot_services_dot_v1_dot_info__service__pb2.GetResourceHierarchyRequest.SerializeToString, response_deserializer=aruna_dot_api_dot_storage_dot_services_dot_v1_dot_info__service__pb2.GetResourceHierarchyResponse.FromString, ) class ResourceInfoServiceServicer(object): """ResourceInfoService This is a generic service that contains utility functions these functions are used to query additional meta-information about resources """ def GetResourceHierarchy(self, request, context): """GetResourceHierarchy This requests a hierarchy based on one resource (object / objectgroup or collection) and returns a hierarchy with all associated higherlevel objects up to projects. Needs projects level read access. """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def add_ResourceInfoServiceServicer_to_server(servicer, server): rpc_method_handlers = { 'GetResourceHierarchy': grpc.unary_unary_rpc_method_handler( servicer.GetResourceHierarchy, request_deserializer=aruna_dot_api_dot_storage_dot_services_dot_v1_dot_info__service__pb2.GetResourceHierarchyRequest.FromString, response_serializer=aruna_dot_api_dot_storage_dot_services_dot_v1_dot_info__service__pb2.GetResourceHierarchyResponse.SerializeToString, ), } generic_handler = grpc.method_handlers_generic_handler( 'aruna.api.storage.services.v1.ResourceInfoService', rpc_method_handlers) server.add_generic_rpc_handlers((generic_handler,)) # This class is part of an EXPERIMENTAL API. class ResourceInfoService(object): """ResourceInfoService This is a generic service that contains utility functions these functions are used to query additional meta-information about resources """ @staticmethod def GetResourceHierarchy(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target, '/aruna.api.storage.services.v1.ResourceInfoService/GetResourceHierarchy', aruna_dot_api_dot_storage_dot_services_dot_v1_dot_info__service__pb2.GetResourceHierarchyRequest.SerializeToString, aruna_dot_api_dot_storage_dot_services_dot_v1_dot_info__service__pb2.GetResourceHierarchyResponse.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata) class StorageInfoServiceStub(object): """StorageInfoService This is a generic service that contains utility functions these functions are used to query additional meta-information about the status of the overall storage architecture """ def __init__(self, channel): """Constructor. Args: channel: A grpc.Channel. """ self.GetStorageVersion = channel.unary_unary( '/aruna.api.storage.services.v1.StorageInfoService/GetStorageVersion', request_serializer=aruna_dot_api_dot_storage_dot_services_dot_v1_dot_info__service__pb2.GetStorageVersionRequest.SerializeToString, response_deserializer=aruna_dot_api_dot_storage_dot_services_dot_v1_dot_info__service__pb2.GetStorageVersionResponse.FromString, ) self.GetStorageStatus = channel.unary_unary( '/aruna.api.storage.services.v1.StorageInfoService/GetStorageStatus', request_serializer=aruna_dot_api_dot_storage_dot_services_dot_v1_dot_info__service__pb2.GetStorageStatusRequest.SerializeToString, response_deserializer=aruna_dot_api_dot_storage_dot_services_dot_v1_dot_info__service__pb2.GetStorageStatusResponse.FromString, ) class StorageInfoServiceServicer(object): """StorageInfoService This is a generic service that contains utility functions these functions are used to query additional meta-information about the status of the overall storage architecture """ def GetStorageVersion(self, request, context): """GetStorageVersion Status: ALPHA A request to get the current version of the server application String representation and https://semver.org/ """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def GetStorageStatus(self, request, context): """GetStorageStatus Status: ALPHA A request to get the current status of the storage components by location(s) """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def add_StorageInfoServiceServicer_to_server(servicer, server): rpc_method_handlers = { 'GetStorageVersion': grpc.unary_unary_rpc_method_handler( servicer.GetStorageVersion, request_deserializer=aruna_dot_api_dot_storage_dot_services_dot_v1_dot_info__service__pb2.GetStorageVersionRequest.FromString, response_serializer=aruna_dot_api_dot_storage_dot_services_dot_v1_dot_info__service__pb2.GetStorageVersionResponse.SerializeToString, ), 'GetStorageStatus': grpc.unary_unary_rpc_method_handler( servicer.GetStorageStatus, request_deserializer=aruna_dot_api_dot_storage_dot_services_dot_v1_dot_info__service__pb2.GetStorageStatusRequest.FromString, response_serializer=aruna_dot_api_dot_storage_dot_services_dot_v1_dot_info__service__pb2.GetStorageStatusResponse.SerializeToString, ), } generic_handler = grpc.method_handlers_generic_handler( 'aruna.api.storage.services.v1.StorageInfoService', rpc_method_handlers) server.add_generic_rpc_handlers((generic_handler,)) # This class is part of an EXPERIMENTAL API. class StorageInfoService(object): """StorageInfoService This is a generic service that contains utility functions these functions are used to query additional meta-information about the status of the overall storage architecture """ @staticmethod def GetStorageVersion(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target, '/aruna.api.storage.services.v1.StorageInfoService/GetStorageVersion', aruna_dot_api_dot_storage_dot_services_dot_v1_dot_info__service__pb2.GetStorageVersionRequest.SerializeToString, aruna_dot_api_dot_storage_dot_services_dot_v1_dot_info__service__pb2.GetStorageVersionResponse.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def GetStorageStatus(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target, '/aruna.api.storage.services.v1.StorageInfoService/GetStorageStatus', aruna_dot_api_dot_storage_dot_services_dot_v1_dot_info__service__pb2.GetStorageStatusRequest.SerializeToString, aruna_dot_api_dot_storage_dot_services_dot_v1_dot_info__service__pb2.GetStorageStatusResponse.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
PypiClean
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/builders/preprocessor_builder.py
import tensorflow.compat.v1 as tf from object_detection.core import preprocessor from object_detection.protos import preprocessor_pb2 def _get_step_config_from_proto(preprocessor_step_config, step_name): """Returns the value of a field named step_name from proto. Args: preprocessor_step_config: A preprocessor_pb2.PreprocessingStep object. step_name: Name of the field to get value from. Returns: result_dict: a sub proto message from preprocessor_step_config which will be later converted to a dictionary. Raises: ValueError: If field does not exist in proto. """ for field, value in preprocessor_step_config.ListFields(): if field.name == step_name: return value raise ValueError('Could not get field %s from proto!' % step_name) def _get_dict_from_proto(config): """Helper function to put all proto fields into a dictionary. For many preprocessing steps, there's an trivial 1-1 mapping from proto fields to function arguments. This function automatically populates a dictionary with the arguments from the proto. Protos that CANNOT be trivially populated include: * nested messages. * steps that check if an optional field is set (ie. where None != 0). * protos that don't map 1-1 to arguments (ie. list should be reshaped). * fields requiring additional validation (ie. repeated field has n elements). Args: config: A protobuf object that does not violate the conditions above. Returns: result_dict: |config| converted into a python dictionary. """ result_dict = {} for field, value in config.ListFields(): result_dict[field.name] = value return result_dict # A map from a PreprocessingStep proto config field name to the preprocessing # function that should be used. The PreprocessingStep proto should be parsable # with _get_dict_from_proto. PREPROCESSING_FUNCTION_MAP = { 'normalize_image': preprocessor.normalize_image, 'random_pixel_value_scale': preprocessor.random_pixel_value_scale, 'random_image_scale': preprocessor.random_image_scale, 'random_rgb_to_gray': preprocessor.random_rgb_to_gray, 'random_adjust_brightness': preprocessor.random_adjust_brightness, 'random_adjust_contrast': preprocessor.random_adjust_contrast, 'random_adjust_hue': preprocessor.random_adjust_hue, 'random_adjust_saturation': preprocessor.random_adjust_saturation, 'random_distort_color': preprocessor.random_distort_color, 'random_crop_to_aspect_ratio': preprocessor.random_crop_to_aspect_ratio, 'random_black_patches': preprocessor.random_black_patches, 'random_jpeg_quality': preprocessor.random_jpeg_quality, 'random_downscale_to_target_pixels': preprocessor.random_downscale_to_target_pixels, 'random_patch_gaussian': preprocessor.random_patch_gaussian, 'rgb_to_gray': preprocessor.rgb_to_gray, 'scale_boxes_to_pixel_coordinates': (preprocessor.scale_boxes_to_pixel_coordinates), 'subtract_channel_mean': preprocessor.subtract_channel_mean, 'convert_class_logits_to_softmax': preprocessor.convert_class_logits_to_softmax, 'adjust_gamma': preprocessor.adjust_gamma, } # A map to convert from preprocessor_pb2.ResizeImage.Method enum to # tf.image.ResizeMethod. RESIZE_METHOD_MAP = { preprocessor_pb2.ResizeImage.AREA: tf.image.ResizeMethod.AREA, preprocessor_pb2.ResizeImage.BICUBIC: tf.image.ResizeMethod.BICUBIC, preprocessor_pb2.ResizeImage.BILINEAR: tf.image.ResizeMethod.BILINEAR, preprocessor_pb2.ResizeImage.NEAREST_NEIGHBOR: ( tf.image.ResizeMethod.NEAREST_NEIGHBOR), } def get_random_jitter_kwargs(proto): return { 'ratio': proto.ratio, 'jitter_mode': preprocessor_pb2.RandomJitterBoxes.JitterMode.Name(proto.jitter_mode ).lower() } def build(preprocessor_step_config): """Builds preprocessing step based on the configuration. Args: preprocessor_step_config: PreprocessingStep configuration proto. Returns: function, argmap: A callable function and an argument map to call function with. Raises: ValueError: On invalid configuration. """ step_type = preprocessor_step_config.WhichOneof('preprocessing_step') if step_type in PREPROCESSING_FUNCTION_MAP: preprocessing_function = PREPROCESSING_FUNCTION_MAP[step_type] step_config = _get_step_config_from_proto(preprocessor_step_config, step_type) function_args = _get_dict_from_proto(step_config) return (preprocessing_function, function_args) if step_type == 'random_horizontal_flip': config = preprocessor_step_config.random_horizontal_flip return (preprocessor.random_horizontal_flip, { 'keypoint_flip_permutation': tuple( config.keypoint_flip_permutation) or None, 'probability': config.probability or None, }) if step_type == 'random_vertical_flip': config = preprocessor_step_config.random_vertical_flip return (preprocessor.random_vertical_flip, { 'keypoint_flip_permutation': tuple( config.keypoint_flip_permutation) or None, 'probability': config.probability or None, }) if step_type == 'random_rotation90': config = preprocessor_step_config.random_rotation90 return (preprocessor.random_rotation90, { 'keypoint_rot_permutation': tuple( config.keypoint_rot_permutation) or None, 'probability': config.probability or None, }) if step_type == 'random_crop_image': config = preprocessor_step_config.random_crop_image return (preprocessor.random_crop_image, { 'min_object_covered': config.min_object_covered, 'aspect_ratio_range': (config.min_aspect_ratio, config.max_aspect_ratio), 'area_range': (config.min_area, config.max_area), 'overlap_thresh': config.overlap_thresh, 'clip_boxes': config.clip_boxes, 'random_coef': config.random_coef, }) if step_type == 'random_pad_image': config = preprocessor_step_config.random_pad_image min_image_size = None if (config.HasField('min_image_height') != config.HasField('min_image_width')): raise ValueError('min_image_height and min_image_width should be either ' 'both set or both unset.') if config.HasField('min_image_height'): min_image_size = (config.min_image_height, config.min_image_width) max_image_size = None if (config.HasField('max_image_height') != config.HasField('max_image_width')): raise ValueError('max_image_height and max_image_width should be either ' 'both set or both unset.') if config.HasField('max_image_height'): max_image_size = (config.max_image_height, config.max_image_width) pad_color = config.pad_color or None if pad_color: if len(pad_color) != 3: tf.logging.warn('pad_color should have 3 elements (RGB) if set!') pad_color = tf.cast([x for x in config.pad_color], dtype=tf.float32) return (preprocessor.random_pad_image, { 'min_image_size': min_image_size, 'max_image_size': max_image_size, 'pad_color': pad_color, }) if step_type == 'random_absolute_pad_image': config = preprocessor_step_config.random_absolute_pad_image max_height_padding = config.max_height_padding or 1 max_width_padding = config.max_width_padding or 1 pad_color = config.pad_color or None if pad_color: if len(pad_color) != 3: tf.logging.warn('pad_color should have 3 elements (RGB) if set!') pad_color = tf.cast([x for x in config.pad_color], dtype=tf.float32) return (preprocessor.random_absolute_pad_image, { 'max_height_padding': max_height_padding, 'max_width_padding': max_width_padding, 'pad_color': pad_color, }) if step_type == 'random_crop_pad_image': config = preprocessor_step_config.random_crop_pad_image min_padded_size_ratio = config.min_padded_size_ratio if min_padded_size_ratio and len(min_padded_size_ratio) != 2: raise ValueError('min_padded_size_ratio should have 2 elements if set!') max_padded_size_ratio = config.max_padded_size_ratio if max_padded_size_ratio and len(max_padded_size_ratio) != 2: raise ValueError('max_padded_size_ratio should have 2 elements if set!') pad_color = config.pad_color or None if pad_color: if len(pad_color) != 3: tf.logging.warn('pad_color should have 3 elements (RGB) if set!') pad_color = tf.cast([x for x in config.pad_color], dtype=tf.float32) kwargs = { 'min_object_covered': config.min_object_covered, 'aspect_ratio_range': (config.min_aspect_ratio, config.max_aspect_ratio), 'area_range': (config.min_area, config.max_area), 'overlap_thresh': config.overlap_thresh, 'clip_boxes': config.clip_boxes, 'random_coef': config.random_coef, 'pad_color': pad_color, } if min_padded_size_ratio: kwargs['min_padded_size_ratio'] = tuple(min_padded_size_ratio) if max_padded_size_ratio: kwargs['max_padded_size_ratio'] = tuple(max_padded_size_ratio) return (preprocessor.random_crop_pad_image, kwargs) if step_type == 'random_resize_method': config = preprocessor_step_config.random_resize_method return (preprocessor.random_resize_method, { 'target_size': [config.target_height, config.target_width], }) if step_type == 'resize_image': config = preprocessor_step_config.resize_image method = RESIZE_METHOD_MAP[config.method] return (preprocessor.resize_image, { 'new_height': config.new_height, 'new_width': config.new_width, 'method': method }) if step_type == 'random_self_concat_image': config = preprocessor_step_config.random_self_concat_image return (preprocessor.random_self_concat_image, { 'concat_vertical_probability': config.concat_vertical_probability, 'concat_horizontal_probability': config.concat_horizontal_probability }) if step_type == 'ssd_random_crop': config = preprocessor_step_config.ssd_random_crop if config.operations: min_object_covered = [op.min_object_covered for op in config.operations] aspect_ratio_range = [(op.min_aspect_ratio, op.max_aspect_ratio) for op in config.operations] area_range = [(op.min_area, op.max_area) for op in config.operations] overlap_thresh = [op.overlap_thresh for op in config.operations] clip_boxes = [op.clip_boxes for op in config.operations] random_coef = [op.random_coef for op in config.operations] return (preprocessor.ssd_random_crop, { 'min_object_covered': min_object_covered, 'aspect_ratio_range': aspect_ratio_range, 'area_range': area_range, 'overlap_thresh': overlap_thresh, 'clip_boxes': clip_boxes, 'random_coef': random_coef, }) return (preprocessor.ssd_random_crop, {}) if step_type == 'autoaugment_image': config = preprocessor_step_config.autoaugment_image return (preprocessor.autoaugment_image, { 'policy_name': config.policy_name, }) if step_type == 'drop_label_probabilistically': config = preprocessor_step_config.drop_label_probabilistically return (preprocessor.drop_label_probabilistically, { 'dropped_label': config.label, 'drop_probability': config.drop_probability, }) if step_type == 'remap_labels': config = preprocessor_step_config.remap_labels return (preprocessor.remap_labels, { 'original_labels': config.original_labels, 'new_label': config.new_label }) if step_type == 'ssd_random_crop_pad': config = preprocessor_step_config.ssd_random_crop_pad if config.operations: min_object_covered = [op.min_object_covered for op in config.operations] aspect_ratio_range = [(op.min_aspect_ratio, op.max_aspect_ratio) for op in config.operations] area_range = [(op.min_area, op.max_area) for op in config.operations] overlap_thresh = [op.overlap_thresh for op in config.operations] clip_boxes = [op.clip_boxes for op in config.operations] random_coef = [op.random_coef for op in config.operations] min_padded_size_ratio = [tuple(op.min_padded_size_ratio) for op in config.operations] max_padded_size_ratio = [tuple(op.max_padded_size_ratio) for op in config.operations] pad_color = [(op.pad_color_r, op.pad_color_g, op.pad_color_b) for op in config.operations] return (preprocessor.ssd_random_crop_pad, { 'min_object_covered': min_object_covered, 'aspect_ratio_range': aspect_ratio_range, 'area_range': area_range, 'overlap_thresh': overlap_thresh, 'clip_boxes': clip_boxes, 'random_coef': random_coef, 'min_padded_size_ratio': min_padded_size_ratio, 'max_padded_size_ratio': max_padded_size_ratio, 'pad_color': pad_color, }) return (preprocessor.ssd_random_crop_pad, {}) if step_type == 'ssd_random_crop_fixed_aspect_ratio': config = preprocessor_step_config.ssd_random_crop_fixed_aspect_ratio if config.operations: min_object_covered = [op.min_object_covered for op in config.operations] area_range = [(op.min_area, op.max_area) for op in config.operations] overlap_thresh = [op.overlap_thresh for op in config.operations] clip_boxes = [op.clip_boxes for op in config.operations] random_coef = [op.random_coef for op in config.operations] return (preprocessor.ssd_random_crop_fixed_aspect_ratio, { 'min_object_covered': min_object_covered, 'aspect_ratio': config.aspect_ratio, 'area_range': area_range, 'overlap_thresh': overlap_thresh, 'clip_boxes': clip_boxes, 'random_coef': random_coef, }) return (preprocessor.ssd_random_crop_fixed_aspect_ratio, {}) if step_type == 'ssd_random_crop_pad_fixed_aspect_ratio': config = preprocessor_step_config.ssd_random_crop_pad_fixed_aspect_ratio kwargs = {} aspect_ratio = config.aspect_ratio if aspect_ratio: kwargs['aspect_ratio'] = aspect_ratio min_padded_size_ratio = config.min_padded_size_ratio if min_padded_size_ratio: if len(min_padded_size_ratio) != 2: raise ValueError('min_padded_size_ratio should have 2 elements if set!') kwargs['min_padded_size_ratio'] = tuple(min_padded_size_ratio) max_padded_size_ratio = config.max_padded_size_ratio if max_padded_size_ratio: if len(max_padded_size_ratio) != 2: raise ValueError('max_padded_size_ratio should have 2 elements if set!') kwargs['max_padded_size_ratio'] = tuple(max_padded_size_ratio) if config.operations: kwargs['min_object_covered'] = [op.min_object_covered for op in config.operations] kwargs['aspect_ratio_range'] = [(op.min_aspect_ratio, op.max_aspect_ratio) for op in config.operations] kwargs['area_range'] = [(op.min_area, op.max_area) for op in config.operations] kwargs['overlap_thresh'] = [op.overlap_thresh for op in config.operations] kwargs['clip_boxes'] = [op.clip_boxes for op in config.operations] kwargs['random_coef'] = [op.random_coef for op in config.operations] return (preprocessor.ssd_random_crop_pad_fixed_aspect_ratio, kwargs) if step_type == 'random_square_crop_by_scale': config = preprocessor_step_config.random_square_crop_by_scale return preprocessor.random_square_crop_by_scale, { 'scale_min': config.scale_min, 'scale_max': config.scale_max, 'max_border': config.max_border, 'num_scales': config.num_scales } if step_type == 'random_scale_crop_and_pad_to_square': config = preprocessor_step_config.random_scale_crop_and_pad_to_square return preprocessor.random_scale_crop_and_pad_to_square, { 'scale_min': config.scale_min, 'scale_max': config.scale_max, 'output_size': config.output_size, } if step_type == 'random_jitter_boxes': config = preprocessor_step_config.random_jitter_boxes kwargs = get_random_jitter_kwargs(config) return preprocessor.random_jitter_boxes, kwargs raise ValueError('Unknown preprocessing step.')
PypiClean
/NlpToolkit-MorphologicalAnalysis-1.0.47.tar.gz/NlpToolkit-MorphologicalAnalysis-1.0.47/MorphologicalAnalysis/MorphologicalTag.py
from __future__ import annotations from enum import Enum, auto class MorphologicalTag(Enum): """ Noun : Alengir """ NOUN = auto() """ Adverb : Alelacele """ ADVERB = auto() """ Adjective : Alengirli """ ADJECTIVE = auto() """ Verb : Alıkoy """ VERB = auto() """ 1st person singular agreement : Ben gelirim """ A1SG = auto() """ 2nd person singular agreement : Sen gelirsin """ A2SG = auto() """ 3rd person singular agreement : O gelir """ A3SG = auto() """ 1st person plural agreement : Biz geliriz """ A1PL = auto() """ 2nd person plural agreement : Siz gelirsiniz """ A2PL = auto() """ 3rd person plural agreement : Onlar gelirler """ A3PL = auto() """ 1st person singular possessive : Benim """ P1SG = auto() """ 2nd person singular possessive :Senin """ P2SG = auto() """ 3rd person singular possessive : Onun """ P3SG = auto() """ 1st person plural possessive : Bizim """ P1PL = auto() """ 2nd person plural possessive : Sizin """ P2PL = auto() """ 3rd person plural possessive : Onların """ P3PL = auto() """ Proper noun : John """ PROPERNOUN = auto() """ None possessive : Ev """ PNON = auto() """ Nominative Case : Kedi uyuyor. """ NOMINATIVE = auto() """ With : Kalemle """ WITH = auto() """ Without : Dikişsiz """ WITHOUT = auto() """ Accusatıve : Beni """ ACCUSATIVE = auto() """ Dative case : Bana """ DATIVE = auto() """ Genitive : Benim """ GENITIVE = auto() """ Ablative : Okuldan """ ABLATIVE = auto() """ Perosnal pronoun : O """ PERSONALPRONOUN = auto() """ Zero Derivation : Kırmızıydı """ ZERO = auto() """ Ability = auto() possibility : Olabilir """ ABLE = auto() """ Negative : Yapama """ NEGATIVE = auto() """ Past tense : Gitti """ PASTTENSE = auto() """ Conjunction or disjunction : Ama = auto() ise """ CONJUNCTION = auto() """ Determiner : Birtakım """ DETERMINER = auto() """ Duplication : Çıtır çıtır """ DUPLICATION = auto() """ Interjection : Agucuk """ INTERJECTION = auto() """ Number : bir """ NUMBER = auto() """ Post posıtıon : Atfen """ POSTPOSITION = auto() """ Punctuation : + """ PUNCTUATION = auto() """ Question : Mı """ QUESTION = auto() """ Agent : Toplayıcı """ AGENT = auto() """ By doing so : Zıplayarak """ BYDOINGSO = auto() """ Cardinal : yüz = auto() bin """ CARDINAL = auto() """ Causative Form : Pişirmek """ CAUSATIVE = auto() """ Demonstrative pronoun : Bu = auto() şu """ DEMONSTRATIVEPRONOUN = auto() """ Distributive : altışar """ DISTRIBUTIVE = auto() """ Fit for : Ahmetlik """ FITFOR = auto() """ Future participle : Gülecek """ FUTUREPARTICIPLE = auto() """ Infinitive : Biri """ INFINITIVE = auto() """ Ness : Ağırbaşlılık """ NESS = auto() """ Ordinal Number : Altıncı """ ORDINAL = auto() """ Passive : Açıldı """ PASSIVE = auto() """ Past participle : Kırılmış """ PASTPARTICIPLE = auto() """ Present partıcıple : Sarılan """ PRESENTPARTICIPLE = auto() """ Question pronoun : Kim """ QUESTIONPRONOUN = auto() """ Quantitative pronoun : Each """ QUANTITATIVEPRONOUN = auto() """ Range : 1 - 3 """ RANGE = auto() """ Ratio : 1/2 """ RATIO = auto() """ Real : 1.0 """ REAL = auto() """ Reciprocal verb : Görüşmek """ RECIPROCAL = auto() """ Reflexive : Kendi """ REFLEXIVE = auto() """ Reflexive pronoun : Kendim """ REFLEXIVEPRONOUN = auto() """ Time : 14:28 """ TIME = auto() """ When : Okuyunca """ WHEN = auto() """ While : Gelirken """ WHILE = auto() """ Without having done so : Çaktırmadan """ WITHOUTHAVINGDONESO = auto() """ PC ablative : Başka """ PCABLATIVE = auto() """* PC accusative : Takiben """ PCACCUSATIVE = auto() """ PC dative : İlişkin """ PCDATIVE = auto() """ PC genitive : Yanısıra """ PCGENITIVE = auto() """ PC instrumental : Birlikte """ PCINSTRUMENTAL = auto() """ PC nominative """ PCNOMINATIVE = auto() """ Acquire : Kazanılan """ ACQUIRE = auto() """ Act of : Aldatmaca """ ACTOF = auto() """ After doing so : Yapıp """ AFTERDOINGSO = auto() """ Almost : Dikensi """ ALMOST = auto() """ As : gibi """ AS = auto() """ As if : Yaşarmışcasına """ ASIF = auto() """ Become : Abideleş """ BECOME = auto() """ Ever since : Çıkagel """ EVERSINCE = auto() """ Projection : Öpülesi """ FEELLIKE = auto() """ Hastility : Yapıver """ HASTILY = auto() """ In between : Arasında """ INBETWEEN = auto() """ Just like : Destansı """ JUSTLIKE = auto() """ -LY : Akıllıca """ LY = auto() """ Related to : Davranışsal """ RELATED = auto() """ Continuous : Yapadur """ REPEAT = auto() """ Since doing so : Amasyalı """ SINCE = auto() """ Since doing so : Amasyalı """ SINCEDOINGSO = auto() """ Start : Alıkoy """ START = auto() """ Stay : Bakakal """ STAY = auto() """ Equative : Öylece """ EQUATIVE = auto() """ Instrumental : Kışın = auto() arabayla """ INSTRUMENTAL = auto() """ Aorist Tense : Her hafta sonunda futbol oynarlar. """ AORIST = auto() """ Desire/Past Auxiliary : Çıkarsa """ DESIRE = auto() """ Future : Yağacak """ FUTURE = auto() """ Imperative : Otur! """ IMPERATIVE = auto() """ Narrative Past Tense : Oluşmuş """ NARRATIVE = auto() """ Necessity : Yapmalı """ NECESSITY = auto() """ Optative : Doğanaya """ OPTATIVE = auto() """ Past tense : Gitti """ PAST = auto() """ Present partıcıple : Sarılan """ PRESENT = auto() """ Progressive : Görüyorum """ PROGRESSIVE1 = auto() """ Progressive : Görmekteyim """ PROGRESSIVE2 = auto() """ Conditional : Gelirse """ CONDITIONAL = auto() """ Copula : Mavidir """ COPULA = auto() """ Positive : Gittim """ POSITIVE = auto() """ Pronoun : Ben """ PRONOUN = auto() """ Locative : Aşağıda """ LOCATIVE = auto() """ Relative : Gelenin """ RELATIVE = auto() """ Demonstrative : Bu """ DEMONSTRATIVE = auto() """ Infinitive2 : Gitme """ INFINITIVE2 = auto() """ Infinitive3 : Gidiş """ INFINITIVE3 = auto() """ Sentence beginning header """ BEGINNINGOFSENTENCE = auto() """ Sentence ending header """ ENDOFSENTENCE = auto() """ Title beginning header """ BEGINNINGOFTITLE = auto() """ Title ending header """ ENDOFTITLE = auto() """ Document beginning header """ BEGINNINGOFDOCUMENT = auto() """ Document ending header """ ENDOFDOCUMENT = auto() """ As long as : Yaşadıkça """ ASLONGAS = auto() """ Adamantly """ ADAMANTLY = auto() """ Percent : 15% """ PERCENT = auto() """ Without being able to have done so: kararlamadan """ WITHOUTBEINGABLETOHAVEDONESO = auto() """ Dimension : Küçücük """ DIMENSION = auto() """ Notable state : Anlaşılmazlık """ NOTABLESTATE = auto() """ Fraction : 1/2 """ FRACTION = auto() """ Hash tag : # """ HASHTAG = auto() """ E-mail : @ """ EMAIL = auto() """ Date : 11/06/2018 """ DATE = auto() """ Code : i7-9700K """ CODE = auto() """ Metric : 6cmx7cmx8cm """ METRIC = auto
PypiClean
/MaterialDjango-0.2.5.tar.gz/MaterialDjango-0.2.5/materialdjango/static/materialdjango/components/bower_components/intl-messageformat/src/compiler.js
export default Compiler; function Compiler(locales, formats, pluralFn) { this.locales = locales; this.formats = formats; this.pluralFn = pluralFn; } Compiler.prototype.compile = function (ast) { this.pluralStack = []; this.currentPlural = null; this.pluralNumberFormat = null; return this.compileMessage(ast); }; Compiler.prototype.compileMessage = function (ast) { if (!(ast && ast.type === 'messageFormatPattern')) { throw new Error('Message AST is not of type: "messageFormatPattern"'); } var elements = ast.elements, pattern = []; var i, len, element; for (i = 0, len = elements.length; i < len; i += 1) { element = elements[i]; switch (element.type) { case 'messageTextElement': pattern.push(this.compileMessageText(element)); break; case 'argumentElement': pattern.push(this.compileArgument(element)); break; default: throw new Error('Message element does not have a valid type'); } } return pattern; }; Compiler.prototype.compileMessageText = function (element) { // When this `element` is part of plural sub-pattern and its value contains // an unescaped '#', use a `PluralOffsetString` helper to properly output // the number with the correct offset in the string. if (this.currentPlural && /(^|[^\\])#/g.test(element.value)) { // Create a cache a NumberFormat instance that can be reused for any // PluralOffsetString instance in this message. if (!this.pluralNumberFormat) { this.pluralNumberFormat = new Intl.NumberFormat(this.locales); } return new PluralOffsetString( this.currentPlural.id, this.currentPlural.format.offset, this.pluralNumberFormat, element.value); } // Unescape the escaped '#'s in the message text. return element.value.replace(/\\#/g, '#'); }; Compiler.prototype.compileArgument = function (element) { var format = element.format; if (!format) { return new StringFormat(element.id); } var formats = this.formats, locales = this.locales, pluralFn = this.pluralFn, options; switch (format.type) { case 'numberFormat': options = formats.number[format.style]; return { id : element.id, format: new Intl.NumberFormat(locales, options).format }; case 'dateFormat': options = formats.date[format.style]; return { id : element.id, format: new Intl.DateTimeFormat(locales, options).format }; case 'timeFormat': options = formats.time[format.style]; return { id : element.id, format: new Intl.DateTimeFormat(locales, options).format }; case 'pluralFormat': options = this.compileOptions(element); return new PluralFormat( element.id, format.ordinal, format.offset, options, pluralFn ); case 'selectFormat': options = this.compileOptions(element); return new SelectFormat(element.id, options); default: throw new Error('Message element does not have a valid format type'); } }; Compiler.prototype.compileOptions = function (element) { var format = element.format, options = format.options, optionsHash = {}; // Save the current plural element, if any, then set it to a new value when // compiling the options sub-patterns. This conforms the spec's algorithm // for handling `"#"` syntax in message text. this.pluralStack.push(this.currentPlural); this.currentPlural = format.type === 'pluralFormat' ? element : null; var i, len, option; for (i = 0, len = options.length; i < len; i += 1) { option = options[i]; // Compile the sub-pattern and save it under the options's selector. optionsHash[option.selector] = this.compileMessage(option.value); } // Pop the plural stack to put back the original current plural value. this.currentPlural = this.pluralStack.pop(); return optionsHash; }; // -- Compiler Helper Classes -------------------------------------------------- function StringFormat(id) { this.id = id; } StringFormat.prototype.format = function (value) { if (!value) { return ''; } return typeof value === 'string' ? value : String(value); }; function PluralFormat(id, useOrdinal, offset, options, pluralFn) { this.id = id; this.useOrdinal = useOrdinal; this.offset = offset; this.options = options; this.pluralFn = pluralFn; } PluralFormat.prototype.getOption = function (value) { var options = this.options; var option = options['=' + value] || options[this.pluralFn(value - this.offset, this.useOrdinal)]; return option || options.other; }; function PluralOffsetString(id, offset, numberFormat, string) { this.id = id; this.offset = offset; this.numberFormat = numberFormat; this.string = string; } PluralOffsetString.prototype.format = function (value) { var number = this.numberFormat.format(value - this.offset); return this.string .replace(/(^|[^\\])#/g, '$1' + number) .replace(/\\#/g, '#'); }; function SelectFormat(id, options) { this.id = id; this.options = options; } SelectFormat.prototype.getOption = function (value) { var options = this.options; return options[value] || options.other; };
PypiClean
/MorningstarAutoTestingFramework-0.1.2.zip/MorningstarAutoTestingFramework-0.1.2/Src/UnConfirmCode/Demo2/main.py
import copy import diff from lxml import etree # Heuristic function for the diffing algorithm. def heuristic(d1, d2): tags1 = [e.tag for e in d1.xpath("//*")] tags2 = [e.tag for e in d2.xpath("//*")] commontags = set(tags1).intersection(set(tags2)) return len(tags1) + len(tags2) - 2 * len(commontags) # Domain sets. UNIT = set([None]) T = set(["b", "img"]) # Input documents. doc1 = etree.fromstring("<root>hello</root>") doc2 = etree.fromstring("<root>hi</root>") doc2 = etree.fromstring("<root><img/><b>hello</b></root>") # Transformation functions. def addtag_domain(document): return T def addtag(tag, position, document): doc = copy.deepcopy(document) selector = "(//*)[%d]" % position pos = doc.xpath(selector) if pos is not None: etree.SubElement(pos[0], tag) return doc raise Exception def deltag_domain(document): return UNIT def deltag(unit, position, document): doc = copy.deepcopy(document) selector = "(//*)[%d]" % position pos = doc.xpath(selector) if pos is not None: pos[0].clear() return doc raise Exception def updatetext_domain(document): return set([e.text for e in doc2.xpath("//*")]) def updatetext(text, position, document): doc = copy.deepcopy(document) selector = "(//*)[%d]" % position pos = doc.xpath(selector) if pos is not None: pos[0].text = text return doc raise Exception # Transformation functions set. F = [ (addtag_domain, addtag), (deltag_domain, deltag), (updatetext_domain, updatetext), ] # The delta between the input documents is calculated. delta = diff.delta(doc1, doc2, F, heuristic, debug=False) # If a delta is retrieved, it is printed out. if delta is not None: current = doc1 for f, value, position in delta: result = f(value, position, current) row = "{0}({1},{2})".format(f.__name__, value, position) print(row) current = result else: print("Goal document is unreachable from given source.")
PypiClean
/JSTools-1.0.tar.gz/JSTools-1.0/jstools/yuicompressor.py
from jstools import utils from paver.easy import path, info import tempfile import subprocess import os def compress(input, args, cfg): """ writes input to a tempfile, then runs yuicompressor on the tempfile """ paths = list(find_paths(args, cfg)) f, tpath = tempfile.mkstemp(suffix=".js") open(tpath, "w+").write(input) arg_string = "java -jar %s --type=js %s" %(paths.pop(0), tpath) new_env = dict(os.environ) if len(paths): new_env['CLASSPATH'] = paths.pop() elif "CLASSPATH" not in new_env: info("No CLASSPATH found in environment or configuration") proc = subprocess.Popen(arg_string.split(" "), stdout=subprocess.PIPE, env=new_env) out, err = proc.communicate() if err: raise OSError(err) path(tpath).unlink() return out def find_paths(args, cfg, limit=False): """ cascading lookup, first non null match wins arg: jarpath, None (assume environmental variable) arg: jarpath, classpath build config: jarpath, classpath user config: jarpath, classpath @@ set up doctest runner >>> from jstools import yuicompressor as yc >>> from ConfigParser import ConfigParser >>> cp = ConfigParser() >>> find_paths("yui", cp, limit=True) None, None, >>> find_paths("yui:/my/yui/jar", cp, limit=True) assert ret == "/my/yui/jar", None, >>> find_paths("yui:/my/yui/jar:/my/lib/jars", cp, limit=True) "/my/yui/jar", "/my/lib/jars", >>> find_paths("yui:/my/yui/jar:/my/lib/jars:/more/lib/jars", cp, limit=True) "/my/yui/jar", "/my/lib/jars:/more/lib/jars", >>> cp.add_section("meta") >>> cp.set("meta", "jarpath", "/conf/jarpath") >>> cp.set("meta", "classpath", "/conf/classpath") >>> find_paths("yui:/my/yui/jar", cp, limit=True) "/my/yui/jar", "/conf/classpath", >>> find_paths("yui", cp, limit=True) "/conf/classpath", "/conf/classpath", Lastly, if no jar or classpath is found in the build config or command line, we look for a global config file. Paver's yui install must be run to insure this is setup. >>> find_paths("yui", ConfigParser(), limit=True) "/conf/classpath", "/conf/classpath", """ path = None path = args.split(":") paths = dict(jarpath=None, classpath=None) if len(path) == 2: paths['jarpath']=path[1] elif len(path)==3: del path[0] jarpath = path.pop(0) classpath = ":".join(path) paths.update(dict(jarpath=jarpath, classpath=classpath)) if not all(paths.values()) and cfg.has_section("meta"): paths = nondestructive_populate(utils.SectionMap(cfg, "meta"), paths) if limit: # mainly for testing purposes return paths # move to implicit options if not all(paths.values()): gc = utils.retrieve_config("yui_compressor") if gc is not None: paths = nondestructive_populate(gc, paths) return paths['jarpath'], paths['classpath'], def nondestructive_populate(valmap, path_map): for key in list(path_map.keys()): if not path_map.get(key): path_map[key] = valmap.get(key) return path_map
PypiClean
/Gitissius-0.1.6.tar.gz/Gitissius-0.1.6/gitissius/database.py
import os.path import json import pickle import common import properties class DbObject(object): """ Issue Object. The Mother of All """ def __init__(self, *args, **kwargs): """ Issue Initializer """ self._properties += [properties.Id(name='id')] for item in self._properties: if item.name in kwargs.keys(): item.set_value(kwargs[item.name]) # random print order. override in children self._print_order = [] for item in self._properties: self._print_order.append(item.name) def printme(self): for name in self._print_order: prop = self.get_property(name) prop.printme() def printmedict(self): """ Return a dictionary with all properties after self.repr """ dic = {} for prop in self._properties: dic[prop.name] = prop.repr('value') return dic @property def path(self): assert False def get_property(self, name): for prop in self._properties: if prop.name == name: return prop raise Exception("Property not found") def interactive_edit(self): """ Interactive edit of issue properties. """ for name in self._print_order: prop = self.get_property(name) prop.interactive_edit() def serialize(self, indent=0): """ Return a json string containing all issue information """ data = {} for item in self._properties: item_data = item.serialize() data[item_data['name']] = item_data['value'] return json.dumps(data, indent=indent) @property def properties(self): data = {} for item in self._properties: data[item.name] = item return data def __str__(self): return self.get_property('title') class Issue(DbObject): def __init__(self, *args, **kwargs): self._properties = [ properties.Text(name='title', allow_empty=False), properties.Option(name='status', options={'new':{'shortcut':'n', 'color':common.get_fore_color('YELLOW')}, 'assigned':{'shortcut':'a', 'color':common.get_fore_color('GREEN')}, 'invalid':{'shortcut':'i', 'color':common.get_fore_color('WHITE')}, 'closed':{'shortcut':'c', 'color':common.get_fore_color('WHITE')} }, default='new'), properties.Option(name='type', options={'bug':{'shortcut':'b', 'color':common.get_fore_color('YELLOW')}, 'feature':{'shortcut':'f', 'color':common.get_fore_color('GREEN')} }, default='bug'), properties.Option(name='severity', options={'high':{'shortcut':'h', 'color':common.colorama.Fore.RED}, 'medium':{'shortcut':'m', 'color':common.colorama.Fore.YELLOW}, 'low':{'shortcut':'l', 'color':common.colorama.Fore.WHITE} }, default='low'), properties.Text(name='assigned_to', completion=common.get_commiters()), properties.Text(name='reported_from', completion=common.get_commiters(), default=common.current_user()), properties.Date(name='created_on', editable=False, auto_add_now=True), properties.Date(name='updated_on', editable=False, auto_now=True), properties.Description(name='description') ] self._comments = [] super(Issue, self).__init__(*args, **kwargs) self._print_order = ['id', 'title', 'type', 'severity', 'reported_from', 'assigned_to', 'created_on', 'updated_on', 'status', 'description' ] @property def path(self): id = self.get_property('id') return "{id!s}/issue".format(**{'id':id}) @property def comments(self): if not self._comments: self._build_commentsdb() return self._comments def delete(self): for comment in self.comments: comment.delete() del common.git_repo[self.path] def _build_commentsdb(self): id = self.get_property('id') comment_path = "{id}/comments/".format(**{'id':id}) for item in common.git_repo.keys(): if item.startswith(comment_path): obj = Comment.load(json.loads(common.git_repo[item])) self._comments.append(obj) self._comments.sort(key=lambda x: x.get_property('created_on').value) return self._comments @classmethod def load(cls, data): return Issue(**data) class Comment(DbObject): def __init__(self, *args, **kwargs): self._properties = [ properties.Text(name='reported_from', default=common.current_user(), completion=common.get_commiters(),), properties.Id(name="issue_id", auto=False), properties.Date(name="created_on", editable=False, auto_add_now=True), properties.Description(name="description"), ] super(Comment, self).__init__(*args, **kwargs) self._print_order = ['reported_from', 'created_on', 'description'] @property def path(self): issue_id = self.get_property('issue_id') return "{issueid!s}/comments/{commentid!s}".\ format(**{'issueid': issue_id, 'commentid': self.get_property('id') }) def delete(self): del common.git_repo[self.path] @classmethod def load(cls, data): return Comment(**data) class IssueManager(object): """ Issue manager object """ def __init__(self): self._issuedb = None @property def issuedb(self): if not self._issuedb: self._build_issuedb() return self._issuedb def _build_issuedb(self): self._issuedb = {} # get current head current_head = common.git_repo.current_head() # check if we have cache for current head path = os.path.join(common.find_repo_root(), '.git', 'gitissius.%s%s.cache' %\ (current_head, '.colorama' if common.colorama else '' ) ) loaded = False if os.path.exists(path): with open(path) as flp: try: self._issuedb = pickle.load(flp) loaded = True except: loaded = False if not loaded: for issue in common.git_repo.keys(): if not '/comments/' in issue: # making sure that we don't treat comments as issues obj = Issue.load(json.loads(common.git_repo[issue])) self._issuedb[str(obj.get_property('id'))] = obj # delete previous caches for fln in os.listdir(os.path.join(common.find_repo_root(), '.git') ): if fln.startswith('gitissius') and fln.endswith('.cache'): os.remove(os.path.join(common.find_repo_root(), '.git', fln) ) # create new with open(path, "wb") as flp: pickle.dump(self._issuedb, flp) def update_db(self): self._build_issuedb() def all(self, sort_key=None): return self.filter(sort_key=sort_key) def filter(self, rules=None, operator="and", sort_key=None): assert isinstance(rules, list) matching_keys = self.issuedb.keys() not_maching_keys = [] if rules: for rule in rules: name, value = rule.items()[0] # parse operators cmd = name.split("__") name = cmd[0] operators = [lambda x, y: x.lower() in y.lower()] if "exact" in cmd[1:]: operators += [lambda x, y: x == y] if "startswith" in cmd[1:]: operators += [lambda x, y: y.startswith(x)] for key in matching_keys: try: result = reduce(lambda x, y: x==y==True, map(lambda x: x(value, self.issuedb[key].\ properties[name].value ), operators ) ) if "not" in cmd[1:]: if result: not_maching_keys.append(key) else: if not result: not_maching_keys.append(key) except KeyError: print "Error searching" return [] for key in not_maching_keys: try: matching_keys.remove(key) except ValueError: continue issues = [] for key in matching_keys: issues.append(self.issuedb[key]) else: issues = [issue for issue in self.issuedb.values()] if sort_key: issues = self.order(issues, sort_key) return issues def order(self, issues, key): """ Short issues by key """ issues.sort(key=lambda x: x.get_property(key).value) return issues def get(self, issue_id): matching_keys = [] for key in self.issuedb.keys(): if key.startswith(issue_id): matching_keys.append(key) if len(matching_keys) == 0: raise common.IssueIDNotFound(issue_id) elif len(matching_keys) > 1: raise common.IssueIDConflict(map(lambda x: self.issuedb[x], matching_keys)) return self._issuedb[matching_keys[0]]
PypiClean
/NucDiff-2.0.3.tar.gz/NucDiff-2.0.3/nucdiff/class_nucmer.py
from __future__ import print_function import sys import subprocess import os import getopt import multiprocessing from copy import deepcopy from . import general from . import class_errors from . import class_interv_coord from . import ref_coord class Nucmer: def __init__(self, nuc_prefix, reference_file, contigs_file, working_dir,delta_file,coord_file): self.prefix=nuc_prefix self.ref=reference_file self.cont=contigs_file self.working_dir=working_dir if delta_file=='': self.delta_file='unknown' else: self.delta_file=delta_file if coord_file=='': self.coord_file='unknown' else: self.coord_file=coord_file self.intervals={} def RUN(self,nucmer_opt,filter_opt): if self.delta_file=='unknown': if '--maxmatch' in nucmer_opt: call_line='nucmer '+nucmer_opt+' --prefix='+self.prefix+' '+self.ref+' '+self.cont else: call_line='nucmer '+nucmer_opt+' --maxmatch --prefix='+self.prefix+' '+self.ref+' '+self.cont try: subprocess.check_call(call_line, shell=True) except subprocess.CalledProcessError: sys.exit('\nCould not run nucmer. Please, check nucmer input parameters values.') self.delta_file=self.prefix+'.delta' f=open(self.prefix+'.filter','w') call_line_list=[] call_line_list.append('delta-filter') for entry in filter_opt.split(' '): call_line_list.append(entry) if not '-q' in filter_opt: call_line_list.append('-q') call_line_list.append(self.prefix+'.delta') try: subprocess.check_call(call_line_list,stdout=f) except subprocess.CalledProcessError: sys.exit('\nCould not run delta-filter. Please, check delta-filter input parameters values.') f.close() f=open(self.prefix+'.coords','w') subprocess.check_call(['show-coords', '-qcldH', self.prefix+'.filter'], stdout=f) f.close() self.coord_file=self.prefix+'.coords' def PARSE(self): intervals_temp_dict={} f=open(self.coord_file) lines=f.readlines() f.close() if os.stat(self.coord_file)[6]!=0: snps_errors_list=[] start_pos=0 for i in range(len(lines)): temp=lines[i].split() ref_start=int(temp[0]) ref_end=int(temp[1]) cont_start=int(temp[3]) cont_end=int(temp[4]) len_ref=int(temp[11]) len_cont=int(temp[12]) ref_direction=int(temp[17]) cont_direction=int(temp[18]) ref_name=temp[19] cont_name=temp[20] proc_identity=float(temp[9]) if cont_name not in intervals_temp_dict: intervals_temp_dict[cont_name]=[] if cont_direction==1 and ref_direction==1: intervals_temp_dict[cont_name].append([cont_start, cont_end,ref_start,ref_end,cont_direction,ref_direction, len_cont, len_ref, ref_name,0,[]]) elif cont_direction==-1 and ref_direction==1: intervals_temp_dict[cont_name].append([cont_end,cont_start,ref_start,ref_end,cont_direction,ref_direction, len_cont, len_ref, ref_name,0,[]]) else: sys.exit('Error: unknown case during parsing coord_file') if proc_identity!=100: intervals_temp_dict[cont_name][-1][10].append(lines[i]) for cont_name in list(intervals_temp_dict.keys()): sort_intervals=sorted(intervals_temp_dict[cont_name], key=lambda inter:inter[0], reverse=False) self.intervals[cont_name]=sort_intervals def FIND_ERR_INSIDE_FRAG(self,proc_num,file_contigs): frag_dict={} num_all=0 for cont_name in list(self.intervals.keys()): frag_dict[cont_name]=[] for i in range(len(self.intervals[cont_name])): info=self.intervals[cont_name][i] frag_dict[cont_name].append([info[0], info[1], info[2], info[3], info[8], info[10],0,[]]) num_all+=1 FIND_SNPS(frag_dict, self.coord_file, self.delta_file,self.prefix, proc_num, file_contigs ) contigs_dict, contig_seqs, contig_names, contig_full_names_dict=general.READ_FASTA_ENTRY(file_contigs) temp_dif_gap=[] for cont_name in list(frag_dict.keys()): for i in range(len(frag_dict[cont_name])): if frag_dict[cont_name][i][:4]==self.intervals[cont_name][i][:4] and frag_dict[cont_name][i][4]==self.intervals[cont_name][i][8]: start_seq=frag_dict[cont_name][i][0] end_seq=frag_dict[cont_name][i][1] seq=contigs_dict[cont_name][start_seq-1 :end_seq-1 +1] gaps_list=FIND_GAPS(seq,start_seq,end_seq,frag_dict[cont_name][i][7]) if self.intervals[cont_name][i][10]!=[]: self.intervals[cont_name][i][10].pop(0) for gap in gaps_list: self.intervals[cont_name][i][10].append(gap) for err in frag_dict[cont_name][i][7]: self.intervals[cont_name][i][10].append(err) self.intervals[cont_name][i][10]=sorted(self.intervals[cont_name][i][10],key=lambda inter:inter[0], reverse=False) self.intervals[cont_name][i][10]=MERGE_GAPS(self.intervals[cont_name][i][10]) print('The difference detection inside fragments step is complete') def REMOVE_REF_OVERLAP(self): temp_interv_list=[] temp_errors_list=[] remove_list=[] new_list=[] for cont_name in list(self.intervals.keys()): max_gr=0 for entry in self.intervals[cont_name]: if entry[9][0]>max_gr: max_gr=entry[9][0] for gr_num in range(max_gr+1): for i in range(len(self.intervals[cont_name])): if self.intervals[cont_name][i][9][0]==gr_num: temp_interv_list.append([self.intervals[cont_name][i][0],self.intervals[cont_name][i][1],self.intervals[cont_name][i][2],self.intervals[cont_name][i][3],self.intervals[cont_name][i][4],i,self.intervals[cont_name][i][10]]) #print '\nnew_list' #for entry in temp_interv_list: # print entry[:10] #raw_input('rfj') if len(temp_interv_list)>1: temp_interv_list=sorted(temp_interv_list, key=lambda inter:inter[2], reverse=False) for i in range(len(temp_interv_list)-1): r1_end=temp_interv_list[i][3] r2_st=temp_interv_list[i+1][2] if r1_end+1>r2_st: # reference fragments overlap c2_dir=temp_interv_list[i+1][4] c2_st=temp_interv_list[i+1][0] c2_end=temp_interv_list[i+1][1] errors_2=temp_interv_list[i+1][6] r2_coord=r1_end if c2_dir==1: c2_coord, last_err_end=class_interv_coord.FIND_CONT_COORD_FORWARD_START(r2_st, c2_st, r2_coord, errors_2,c2_end) if c2_coord<c2_end: temp_interv_list[i+1][0]=c2_coord+1 temp_interv_list[i+1][2]=r2_coord+1 self.intervals[cont_name][temp_interv_list[i+1][5]][0]=c2_coord+1 self.intervals[cont_name][temp_interv_list[i+1][5]][2]=r2_coord+1 for entry in errors_2: if entry[0]>last_err_end: temp_errors_list.append(entry) del temp_interv_list[i+1][6][:] del self.intervals[cont_name][temp_interv_list[i+1][5]][10][:] for entry in temp_errors_list: self.intervals[cont_name][temp_interv_list[i+1][5]][10].append(entry) del temp_errors_list[:] else: remove_list.append(temp_interv_list[i+1][5]) else: #c2_dir==-1 c2_coord, last_err_end=class_interv_coord.FIND_CONT_COORD_REVERSE_END_SECOND(r2_st, c2_end, r2_coord, errors_2) if c2_coord>1: temp_interv_list[i+1][1]=c2_coord-1 temp_interv_list[i+1][2]=r2_coord+1 self.intervals[cont_name][temp_interv_list[i+1][5]][1]=c2_coord-1 self.intervals[cont_name][temp_interv_list[i+1][5]][2]=r2_coord+1 for entry in errors_2: if entry[0]<last_err_end: temp_errors_list.append(entry) del temp_interv_list[i+1][6][:] del self.intervals[cont_name][temp_interv_list[i+1][5]][10][:] for entry in temp_errors_list: self.intervals[cont_name][temp_interv_list[i+1][5]][10].append(entry) del temp_errors_list[:] else: remove_list.append(temp_interv_list[i+1][5]) for j in range(len(self.intervals[cont_name])): if not j in remove_list: new_list.append(self.intervals[cont_name][j]) del remove_list[:] del self.intervals[cont_name][:] for entry in new_list: self.intervals[cont_name].append(entry) del new_list[:] del temp_interv_list[:] def FIND_STRUCTURAL_ERRORS(self,contigs_dict, ref_dict,reloc_dist): errors_list=[] #-------1.find translocations structure_dict={} #end_err_dict={} #1.1. redefine num element values self.FIND_REF_ORDER_NUM() self.REMOVE_REF_OVERLAP() x=self.IMPROVE_PARSING_RESULTS(contigs_dict, ref_dict) for cont_name in list(self.intervals.keys()): #1.2 sort intervals by translocation groups (fill in 'blocks' field) structure_dict[cont_name]={} #print cont_name #for entry in self.intervals[cont_name]: # print entry[:10] #raw_input('rfj') cur_transl_group_num=0 cur_transl_group_name=0 structure_dict[cont_name][cur_transl_group_name]={'blocks':[self.intervals[cont_name][0]], 'output_line':[],'reason':[],'temp':-1} for i in range(len(self.intervals[cont_name])-1): if self.intervals[cont_name][i][9][0]==self.intervals[cont_name][i+1][9][0]: #from one ref seq structure_dict[cont_name][cur_transl_group_name]['blocks'].append(self.intervals[cont_name][i+1]) else: structure_dict[cont_name][cur_transl_group_name]['temp']=i cur_transl_group_num+=1 cur_transl_group_name=cur_transl_group_num structure_dict[cont_name][cur_transl_group_name]={'blocks':[self.intervals[cont_name][i+1]], 'output_line':[], 'reason':[], 'temp':-1} if len(list(structure_dict[cont_name].keys()))>1: #if there are translocation differences for transl_group in list(structure_dict[cont_name].keys()): #1.2 fill in the 'output_line' field c_st=structure_dict[cont_name][transl_group]['blocks'][0][0] r_st=structure_dict[cont_name][transl_group]['blocks'][0][2] r_name=structure_dict[cont_name][transl_group]['blocks'][0][8] c_end=structure_dict[cont_name][transl_group]['blocks'][-1][1] r_end=structure_dict[cont_name][transl_group]['blocks'][-1][3] structure_dict[cont_name][transl_group]['output_line']=[c_st,c_end,transl_group,c_end-c_st+1, r_name, r_st,r_end] #1.3. fill in the 'reason' field if it's necessary if structure_dict[cont_name][transl_group]['temp']!=-1: contig_seq=contigs_dict[cont_name] i=structure_dict[cont_name][transl_group]['temp'] c1_end=self.intervals[cont_name][i][1] c2_st=self.intervals[cont_name][i+1][0] if c1_end+1<c2_st: type_gap=class_interv_coord.ANALYSE_SPACE_SIMB(contig_seq, c1_end+1, c2_st-1) if type_gap=='gap': structure_dict[cont_name][transl_group]['reason'].append([c1_end+1, c2_st-1, 'translocation-wrong_scaffolding',c2_st-c1_end-1, 'transl' ]) elif type_gap=='mixed_fragment': structure_dict[cont_name][transl_group]['reason'].append([c1_end+1, c2_st-1, 'translocation-mixed_fragments',c2_st-c1_end-1, 'transl' ]) else: structure_dict[cont_name][transl_group]['reason'].append([c1_end+1, c2_st-1, 'translocation-insertion',c2_st-c1_end-1, 'transl' ]) elif c1_end+1==c2_st: structure_dict[cont_name][transl_group]['reason'].append([c1_end,c2_st,'translocation',0, 'transl']) else: structure_dict[cont_name][transl_group]['reason'].append([c2_st, c1_end,'translocation-overlap', c1_end-c2_st+1, 'transl']) r_end_1=self.intervals[cont_name][i][3] r_st_1=self.intervals[cont_name][i][2] if self.intervals[cont_name][i][4]==1: ref_overl_1=class_interv_coord.FIND_REF_COORD_FORWARD_END_1(r_end_1, c1_end, c2_st, self.intervals[cont_name][i][10], r_st_1) structure_dict[cont_name][transl_group]['reason'][-1].append([self.intervals[cont_name][i][8],ref_overl_1, r_end_1]) else: ref_overl_1=class_interv_coord.FIND_REF_COORD_REVERSE_END_1(r_st_1, c1_end, c2_st, self.intervals[cont_name][i][10],r_end_1) structure_dict[cont_name][transl_group]['reason'][-1].append([self.intervals[cont_name][i][8],r_st_1, ref_overl_1]) r_end_2=self.intervals[cont_name][i+1][3] r_st_2=self.intervals[cont_name][i+1][2] for err_ent in self.intervals[cont_name][i+1][10]: errors_list.append([err_ent[0],err_ent[1],err_ent[2],err_ent[3],err_ent[4]]) if self.intervals[cont_name][i+1][4]==1: ref_overl_2,x=class_interv_coord.FIND_REF_COORD_FORWARD_START(r_st_2, c2_st, c1_end, errors_list) ref_overl_2=min(r_end_2,ref_overl_2) structure_dict[cont_name][transl_group]['reason'][-1].append([self.intervals[cont_name][i+1][8],r_st_2, ref_overl_2]) else: ref_overl_2,x=class_interv_coord.FIND_REF_COORD_REVERSE_END(r_end_2, c2_st, c1_end, errors_list) ref_overl_2=max(r_st_2,ref_overl_2) structure_dict[cont_name][transl_group]['reason'][-1].append([self.intervals[cont_name][i+1][8],ref_overl_2, r_end_2]) for i in range(len(errors_list)): errors_list.pop(0) #1.4. delete the 'temp' field structure_dict[cont_name][transl_group].pop('temp', None) else: structure_dict[cont_name][0].pop('temp', None) #---------2.find misjoins and circular genome ends #2. find misjoin_groups inside translocation_groups misj_groups_dict={} temp_errors_misj=[] for cont_name in list(structure_dict.keys()): cur_misj_num=0 for transl_group in sorted(structure_dict[cont_name]): #2.1 find misjoin_groups. Delete temp variable temp_group num new_misj_groups=FIND_MISJOIN_GROUP(structure_dict[cont_name][transl_group]['blocks'],reloc_dist) el_num=-1 misj_groups_dict[cur_misj_num]={'blocks':[], 'output_line':[],'reason':[],'temp':-1} for i in range(len(new_misj_groups[0])): misj_groups_dict[cur_misj_num]['blocks'].append(new_misj_groups[0][i]) el_num+=1 for j in range(1,len(new_misj_groups)): misj_groups_dict[cur_misj_num]['temp']=el_num cur_misj_num+=1 misj_groups_dict[cur_misj_num]={'blocks':[], 'output_line':[],'reason':[],'temp':-1} for i in range(len(new_misj_groups[j])): misj_groups_dict[cur_misj_num]['blocks'].append(new_misj_groups[j][i]) el_num+=1 if len(list(misj_groups_dict.keys()))>1: #there are misjoin differences for misj_group in list(misj_groups_dict.keys()): #2.2 fill in the 'output_line' field c_st=misj_groups_dict[misj_group]['blocks'][0][0] r_st=misj_groups_dict[misj_group]['blocks'][0][2] r_name=misj_groups_dict[misj_group]['blocks'][0][8] c_end=misj_groups_dict[misj_group]['blocks'][-1][1] r_end=misj_groups_dict[misj_group]['blocks'][-1][3] misj_groups_dict[misj_group]['output_line']=[c_st,c_end,misj_group, c_end-c_st+1, r_name, r_st,r_end] #2.3 fill in the 'reason' field if necessary if misj_groups_dict[misj_group]['temp']!=-1: i=misj_groups_dict[misj_group]['temp'] a=class_interv_coord.Interv_coord(structure_dict[cont_name][transl_group]['blocks'][i]) b=class_interv_coord.Interv_coord(structure_dict[cont_name][transl_group]['blocks'][i+1]) interv_case, interv_type=a.FIND_CASE(b,reloc_dist) r1_st=structure_dict[cont_name][transl_group]['blocks'][i][2] r2_end=structure_dict[cont_name][transl_group]['blocks'][i+1][3] len_r2=structure_dict[cont_name][transl_group]['blocks'][i+1][7] r2_st=structure_dict[cont_name][transl_group]['blocks'][i+1][2] r1_end=structure_dict[cont_name][transl_group]['blocks'][i][3] len_r1=structure_dict[cont_name][transl_group]['blocks'][i][7] c1_end=structure_dict[cont_name][transl_group]['blocks'][i][1] c2_st=structure_dict[cont_name][transl_group]['blocks'][i+1][0] c2_end=structure_dict[cont_name][transl_group]['blocks'][i+1][1] if (interv_case in ['4.1','8.1','12.1']) and r1_st==1 and r2_end==len_r2: #circular genome start if interv_case=='4.1': c_space_len=c2_st-c1_end-1 contig_seq=contigs_dict[cont_name] errors_list_1=class_interv_coord.INSERTION_INSIDE_CONTIG(c1_end+1, c2_st-1,contig_seq, c_space_len,interv_case) for entry in errors_list_1: misj_groups_dict[misj_group]['reason'].append(entry) del errors_list_1[:] misj_groups_dict[misj_group]['reason'].append([c1_end, c1_end, 'circular_genome_start', 1, interv_case] ) elif interv_case=='8.1': misj_groups_dict[misj_group]['reason'].append([c1_end, c1_end, 'circular_genome_start', 1, interv_case]) elif interv_case=='12.1': misj_groups_dict[misj_group]['reason'].append([c1_end, c1_end, 'circular_genome_start', 1, interv_case]) c_len=c1_end-c2_st+1 misj_groups_dict[misj_group]['reason'].append([c1_end, c1_end, 'deletion-collapsed_repeat', c_len, interv_case]) corresp_ref_coord, last_err_end=class_interv_coord.FIND_REF_COORD_REVERSE_END(r2_end, c2_st, c1_end+1, structure_dict[cont_name][transl_group]['blocks'][i+1][10]) first_base=last_err_end+1 for entry in structure_dict[cont_name][transl_group]['blocks'][i+1][10]: if entry[0]>=first_base: temp_errors_misj.append(entry) if structure_dict[cont_name][transl_group]['blocks'][i+1][10]!=[]: if structure_dict[cont_name][transl_group]['blocks'][i+1][10]!=temp_errors_misj: del structure_dict[cont_name][transl_group]['blocks'][i+1][10][:] for entry in temp_errors_misj: structure_dict[cont_name][transl_group]['blocks'][i+1][10].append(entry) del temp_errors_misj[:] structure_dict[cont_name][transl_group]['blocks'][i+1][0]=structure_dict[cont_name][transl_group]['blocks'][i][1]+1 structure_dict[cont_name][transl_group]['blocks'][i+1][3]=corresp_ref_coord else: print('ERROR: need to describe this case (circular genome): '+interv_case) print() elif (interv_case in ['1.11', '5.11', '9.11']) and r2_st==1 and r1_end ==len_r1: #circular genome start if interv_case=='1.11': c_space_len=c2_st-c1_end-1 contig_seq=contigs_dict[cont_name] errors_list_1=class_interv_coord.INSERTION_INSIDE_CONTIG(c1_end+1, c2_st-1,contig_seq, c_space_len,interv_case) for entry in errors_list_1: misj_groups_dict[misj_group]['reason'].append(entry) del errors_list_1[:] misj_groups_dict[misj_group]['reason'].append([c2_st, c2_st, 'circular_genome_start', 1, interv_case]) elif interv_case=='5.11': misj_groups_dict[misj_group]['reason'].append([c2_st, c2_st, 'circular_genome_start', 1, interv_case]) elif interv_case=='9.11': c_space_len=c1_end-c2_st+1 misj_groups_dict[misj_group]['reason'].append([c2_st-1, c2_st-1,'deletion-collapsed_repeat',c_space_len,interv_case]) misj_groups_dict[misj_group]['reason'].append([c2_st, c2_st, 'circular_genome_start', 1, interv_case]) corresp_ref_coord, last_err_end=class_interv_coord.FIND_REF_COORD_FORWARD_END(r1_end, c1_end, c2_st-1,structure_dict[cont_name][transl_group]['blocks'][i][10]) first_base=last_err_end-1 for entry in structure_dict[cont_name][transl_group]['blocks'][i][10]: if entry[0]<=first_base: temp_errors_misj.append(entry) if structure_dict[cont_name][transl_group]['blocks'][i][10]!=[]: if structure_dict[cont_name][transl_group]['blocks'][i][10]!=temp_errors_misj: del structure_dict[cont_name][transl_group]['blocks'][i][10][:] for entry in temp_errors_misj: structure_dict[cont_name][transl_group]['blocks'][i][10].append(entry) del temp_errors_misj[:] structure_dict[cont_name][transl_group]['blocks'][i][1]=structure_dict[cont_name][transl_group]['blocks'][i+1][0]-1 structure_dict[cont_name][transl_group]['blocks'][i][3]=corresp_ref_coord misj_groups_dict[misj_group]['output_line'][1]=structure_dict[cont_name][transl_group]['blocks'][i+1][0]-1 misj_groups_dict[misj_group]['output_line'][6]=corresp_ref_coord else: print('ERROR: need to describe this case (circular genome): '+interv_case) print() else: #real misjoin if c1_end+1<c2_st: contig_seq=contigs_dict[cont_name] type_gap=class_interv_coord.ANALYSE_SPACE_SIMB(contig_seq, c1_end+1, c2_st-1) if type_gap=='gap': misj_groups_dict[misj_group]['reason'].append([c1_end+1, c2_st-1, 'misjoin-wrong_scaffolding',c2_st-c1_end-1, 'misj' ]) elif type_gap=='mixed_fragment': misj_groups_dict[misj_group]['reason'].append([c1_end+1, c2_st-1, 'misjoin-mixed_fragments',c2_st-c1_end-1, 'misj' ]) else: misj_groups_dict[misj_group]['reason'].append([c1_end+1, c2_st-1, 'misjoin-insertion',c2_st-c1_end-1, 'misj' ]) elif c1_end+1==c2_st: misj_groups_dict[misj_group]['reason'].append([c1_end,c2_st,'misjoin',0, 'misj']) else: misj_groups_dict[misj_group]['reason'].append([c2_st, c1_end,'misjoin-overlap', c1_end-c2_st+1, 'misj']) r_end_1=structure_dict[cont_name][transl_group]['blocks'][i][3] r_st_1=structure_dict[cont_name][transl_group]['blocks'][i][2] if self.intervals[cont_name][i][4]==1: ref_overl_1=class_interv_coord.FIND_REF_COORD_FORWARD_END_1(r_end_1, c1_end, c2_st, self.intervals[cont_name][i][10], r_st_1) misj_groups_dict[misj_group]['reason'][-1].append([structure_dict[cont_name][transl_group]['blocks'][i][8],ref_overl_1, r_end_1]) else: ref_overl_1=class_interv_coord.FIND_REF_COORD_REVERSE_END_1(r_st_1, c1_end, c2_st, self.intervals[cont_name][i][10],r_end_1) misj_groups_dict[misj_group]['reason'][-1].append([structure_dict[cont_name][transl_group]['blocks'][i][8],r_st_1, ref_overl_1]) r_end_2=structure_dict[cont_name][transl_group]['blocks'][i+1][3] r_st_2=structure_dict[cont_name][transl_group]['blocks'][i+1][2] for err_ent in self.intervals[cont_name][i+1][10]: errors_list.append([err_ent[0],err_ent[1],err_ent[2],err_ent[3],err_ent[4]]) if self.intervals[cont_name][i+1][4]==1: ref_overl_2,x=class_interv_coord.FIND_REF_COORD_FORWARD_START(r_st_2, c2_st, c1_end, errors_list) ref_overl_2=min(r_end_2,ref_overl_2) misj_groups_dict[misj_group]['reason'][-1].append([structure_dict[cont_name][transl_group]['blocks'][i+1][8],r_st_2, ref_overl_2]) else: ref_overl_2,x=class_interv_coord.FIND_REF_COORD_REVERSE_END(r_end_2, c2_st, c1_end, errors_list) ref_overl_2=max(r_st_2,ref_overl_2) misj_groups_dict[misj_group]['reason'][-1].append([structure_dict[cont_name][transl_group]['blocks'][i+1][8],ref_overl_2, r_end_2]) for i in range(len(errors_list)): errors_list.pop(0) #2.4 delete the 'temp' field misj_groups_dict[misj_group].pop('temp', None) #2.5 substitute a list of intervals in transl_group['blocks'] by dict of misj_groups structure_dict[cont_name][transl_group].pop('blocks',None) structure_dict[cont_name][transl_group]['blocks']={} for misj_group in list(misj_groups_dict.keys()): structure_dict[cont_name][transl_group]['blocks'][misj_group]={} for field in list(misj_groups_dict[misj_group].keys()): structure_dict[cont_name][transl_group]['blocks'][misj_group][field]=misj_groups_dict[misj_group][field] for key in list(misj_groups_dict.keys()): misj_groups_dict.pop(key, None) temp_block_list=[] for cont_name in list(structure_dict.keys()): for transl_group in list(structure_dict[cont_name].keys()): for misj_group in list(structure_dict[cont_name][transl_group]['blocks'].keys()): if len(structure_dict[cont_name][transl_group]['blocks'][misj_group]['blocks'])>1: for block in structure_dict[cont_name][transl_group]['blocks'][misj_group]['blocks']: temp_block_list.append(block) tmp_res=ref_coord.MERGE_BLOCK_MSJ(temp_block_list) structure_dict[cont_name][transl_group]['blocks'][misj_group]['bounds']=[tmp_res[0],tmp_res[1],tmp_res[2],tmp_res[3]] for i in range(len(temp_block_list)): temp_block_list.pop(0) else: structure_dict[cont_name][transl_group]['blocks'][misj_group]['bounds']=[] #-------3. simplify blocks before detecting transposition and inversions temp_errors=[] remove_list=[] nested_frag_list=[] for cont_name in sorted(structure_dict.keys()): for transl_group in sorted(structure_dict[cont_name].keys()): for misj_group in sorted(structure_dict[cont_name][transl_group]['blocks'].keys()): #3.1 take away overlaps between query fragments if len(structure_dict[cont_name][transl_group]['blocks'][misj_group]['blocks'])>1: for i in range(len(structure_dict[cont_name][transl_group]['blocks'][misj_group]['blocks'])-1): c1_end=structure_dict[cont_name][transl_group]['blocks'][misj_group]['blocks'][i][1] c2_st=structure_dict[cont_name][transl_group]['blocks'][misj_group]['blocks'][i+1][0] if c1_end+1>c2_st: # query fragments overlap c2_dir=structure_dict[cont_name][transl_group]['blocks'][misj_group]['blocks'][i+1][4] r2_st=structure_dict[cont_name][transl_group]['blocks'][misj_group]['blocks'][i+1][2] r2_end=structure_dict[cont_name][transl_group]['blocks'][misj_group]['blocks'][i+1][3] errors_2=structure_dict[cont_name][transl_group]['blocks'][misj_group]['blocks'][i+1][10] c2_coord=c1_end if c2_dir==1: r2_coord, last_err_end=class_interv_coord.FIND_REF_COORD_FORWARD_START(r2_st, c2_st, c2_coord, errors_2) structure_dict[cont_name][transl_group]['blocks'][misj_group]['blocks'][i+1][0]=c2_coord+1 structure_dict[cont_name][transl_group]['blocks'][misj_group]['blocks'][i+1][2]=r2_coord+1 else: #c2_dir==-1 r2_coord, last_err_end=class_interv_coord.FIND_REF_COORD_REVERSE_END(r2_end, c2_st, c2_coord, errors_2) structure_dict[cont_name][transl_group]['blocks'][misj_group]['blocks'][i+1][0]=c2_coord+1 structure_dict[cont_name][transl_group]['blocks'][misj_group]['blocks'][i+1][3]=r2_coord-1 for entry in errors_2: if entry[0]>last_err_end: temp_errors.append(entry) del structure_dict[cont_name][transl_group]['blocks'][misj_group]['blocks'][i+1][10][:] for entry in temp_errors: structure_dict[cont_name][transl_group]['blocks'][misj_group]['blocks'][i+1][10].append(entry) del temp_errors[:] #3.2 delete nested reference fragments if len(structure_dict[cont_name][transl_group]['blocks'][misj_group]['blocks'])>1: flag_rep=1 while flag_rep==1: #redefine num values num_entry=0 for entry in structure_dict[cont_name][transl_group]['blocks'][misj_group]['blocks']: entry[9]=num_entry num_entry+=1 temp_sorted=sorted(structure_dict[cont_name][transl_group]['blocks'][misj_group]['blocks'], key=lambda inter:inter[2], reverse=False) for i in range(len(temp_sorted)): structure_dict[cont_name][transl_group]['blocks'][misj_group]['blocks'][temp_sorted[i][9]][9]=i #detect nested reference fragments flag_rep=0 for i in range(len(structure_dict[cont_name][transl_group]['blocks'][misj_group]['blocks'])-1): r1_st=structure_dict[cont_name][transl_group]['blocks'][misj_group]['blocks'][i][2] r1_end=structure_dict[cont_name][transl_group]['blocks'][misj_group]['blocks'][i][3] for j in range(i+1, len(structure_dict[cont_name][transl_group]['blocks'][misj_group]['blocks'])): r2_st=structure_dict[cont_name][transl_group]['blocks'][misj_group]['blocks'][j][2] r2_end=structure_dict[cont_name][transl_group]['blocks'][misj_group]['blocks'][j][3] #find nested ref fragment if r1_st<=r2_st and r1_end>=r2_end: # Ri contains or equal to Rj nested_frag_list.append([j,i]) break elif r1_st>=r2_st and r1_end<=r2_end: #Rj contains Ri nested_frag_list.append([i,j]) break if len(nested_frag_list)==1: flag_rep=1 #delete all info about nested fragments j_what=nested_frag_list[0][0] structure_dict[cont_name][transl_group]['blocks'][misj_group]['blocks'].pop(j_what) nested_frag_list.pop(0) break #3.3 take away overlaps between reference fragments structure_dict[cont_name][transl_group]['blocks'][misj_group]['blocks']=sorted(structure_dict[cont_name][transl_group]['blocks'][misj_group]['blocks'], key=lambda inter:inter[2], reverse=False) if len(structure_dict[cont_name][transl_group]['blocks'][misj_group]['blocks'])>1: for i in range(len(structure_dict[cont_name][transl_group]['blocks'][misj_group]['blocks'])-1): r1_end=structure_dict[cont_name][transl_group]['blocks'][misj_group]['blocks'][i][3] r2_st=structure_dict[cont_name][transl_group]['blocks'][misj_group]['blocks'][i+1][2] if r1_end+1>r2_st: # reference fragments overlap c2_dir=structure_dict[cont_name][transl_group]['blocks'][misj_group]['blocks'][i+1][4] c2_st=structure_dict[cont_name][transl_group]['blocks'][misj_group]['blocks'][i+1][0] c2_end=structure_dict[cont_name][transl_group]['blocks'][misj_group]['blocks'][i+1][1] errors_2=structure_dict[cont_name][transl_group]['blocks'][misj_group]['blocks'][i+1][10] r2_coord=r1_end if c2_dir==1: c2_coord, last_err_end=class_interv_coord.FIND_CONT_COORD_FORWARD_START(r2_st, c2_st, r2_coord, errors_2,c2_end) if c2_coord<c2_end: structure_dict[cont_name][transl_group]['blocks'][misj_group]['blocks'][i+1][0]=c2_coord+1 structure_dict[cont_name][transl_group]['blocks'][misj_group]['blocks'][i+1][2]=r2_coord+1 for entry in errors_2: if entry[0]>last_err_end: temp_errors.append(entry) del structure_dict[cont_name][transl_group]['blocks'][misj_group]['blocks'][i+1][10][:] for entry in temp_errors: structure_dict[cont_name][transl_group]['blocks'][misj_group]['blocks'][i+1][10].append(entry) del temp_errors[:] else: remove_list.append(i+1) else: #c2_dir==-1 c2_coord, last_err_end=class_interv_coord.FIND_CONT_COORD_REVERSE_END_SECOND(r2_st, c2_end, r2_coord, errors_2) if c2_coord>1: structure_dict[cont_name][transl_group]['blocks'][misj_group]['blocks'][i+1][1]=c2_coord-1 structure_dict[cont_name][transl_group]['blocks'][misj_group]['blocks'][i+1][2]=r2_coord+1 for entry in errors_2: if entry[0]<last_err_end: temp_errors.append(entry) del structure_dict[cont_name][transl_group]['blocks'][misj_group]['blocks'][i+1][10][:] for entry in temp_errors: structure_dict[cont_name][transl_group]['blocks'][misj_group]['blocks'][i+1][10].append(entry) del temp_errors[:] else: remove_list.append(i+1) new_list=[] for j in range(len(structure_dict[cont_name][transl_group]['blocks'][misj_group]['blocks'])): if not j in remove_list: new_list.append(structure_dict[cont_name][transl_group]['blocks'][misj_group]['blocks'][j]) del remove_list[:] del structure_dict[cont_name][transl_group]['blocks'][misj_group]['blocks'][:] for entry in new_list: structure_dict[cont_name][transl_group]['blocks'][misj_group]['blocks'].append(entry) del new_list[:] structure_dict[cont_name][transl_group]['blocks'][misj_group]['blocks']=sorted(structure_dict[cont_name][transl_group]['blocks'][misj_group]['blocks'], key=lambda inter:inter[0], reverse=False) #--------4. detect transpositions and inversions blocks_info_dict={} for cont_name in sorted(structure_dict.keys()): transp_gr_num=0 block_num=0 for transl_group in sorted(structure_dict[cont_name].keys()): for misj_group in sorted(structure_dict[cont_name][transl_group]['blocks'].keys()): #4.1. redefine num values num_entry=0 for entry in structure_dict[cont_name][transl_group]['blocks'][misj_group]['blocks']: entry[9]=num_entry num_entry+=1 temp_sorted=sorted(structure_dict[cont_name][transl_group]['blocks'][misj_group]['blocks'], key=lambda inter:inter[2], reverse=False) for i in range(len(temp_sorted)): structure_dict[cont_name][transl_group]['blocks'][misj_group]['blocks'][temp_sorted[i][9]][9]=i #4.2. check if query fragments have the same order as ref fragments transp_flag=0 for i in range(len(structure_dict[cont_name][transl_group]['blocks'][misj_group]['blocks'])): if i!=structure_dict[cont_name][transl_group]['blocks'][misj_group]['blocks'][i][9]: transp_flag=1 break #4.3. describe blocks info if transp_flag==1: gr_name='_gr_'+str(transp_gr_num) transp_gr_num+=1 if transp_flag==0 and len(structure_dict[cont_name][transl_group]['blocks'][misj_group]['blocks'])>1: inv_flag=1 else: inv_flag=0 for i in range(len(structure_dict[cont_name][transl_group]['blocks'][misj_group]['blocks'])): block_name=block_num block_num+=1 blocks_info_dict[block_name]={} blocks_info_dict[block_name]['block']=structure_dict[cont_name][transl_group]['blocks'][misj_group]['blocks'][i] blocks_info_dict[block_name]['transp_output']=[] blocks_info_dict[block_name]['invers_output']=[] blocks_info_dict[block_name]['between_output']=[] blocks_info_dict[block_name]['local_output']=[] c_st=structure_dict[cont_name][transl_group]['blocks'][misj_group]['blocks'][i][0] c_end=structure_dict[cont_name][transl_group]['blocks'][misj_group]['blocks'][i][1] ref_name=structure_dict[cont_name][transl_group]['blocks'][misj_group]['blocks'][i][8] ref_st=structure_dict[cont_name][transl_group]['blocks'][misj_group]['blocks'][i][2] ref_end=structure_dict[cont_name][transl_group]['blocks'][misj_group]['blocks'][i][3] num=structure_dict[cont_name][transl_group]['blocks'][misj_group]['blocks'][i][9] c_dir=structure_dict[cont_name][transl_group]['blocks'][misj_group]['blocks'][i][4] # find transposition differences if transp_flag==1: err_name='reshuffling-part_'+str(num+1)+gr_name blocks_info_dict[block_name]['transp_output'].append([c_st, c_end, err_name, c_end-c_st+1,'transp',ref_name, ref_st, ref_end]) if len(structure_dict[cont_name][transl_group]['blocks'][misj_group]['blocks'])>1: #find inversion differences if c_dir==-1: blocks_info_dict[block_name]['invers_output'].append([c_st, c_end, 'inversion', c_end-c_st+1,'transp',ref_name, ref_st, ref_end]) else: if inv_flag==1: blocks_info_dict[block_name]['invers_output'].append([c_st, c_end, 'forward', c_end-c_st+1,'transp',ref_name, ref_st, ref_end]) #find deletion differences based on gaps between reference fragments for entry in structure_dict[cont_name][transl_group]['blocks'][misj_group]['blocks']: if num+1==entry[9]: r_st_next=entry[2] if ref_end+1<r_st_next: #if c_dir==1: blocks_info_dict[block_name]['between_output'].append([c_end,c_end, 'deletion', r_st_next-ref_end-1,'transp',ref_name,ref_end+1,r_st_next-1 ]) #else: # blocks_info_dict[block_name]['between_output'].append([max(1,c_st-1),max(1,c_st-1), 'deletion', r_st_next-ref_end-1,'transp',ref_name,ref_end+1,r_st_next-1 ]) #find insertion differences based on gaps between query fragments if i==0: if structure_dict[cont_name][transl_group]['blocks'][misj_group]['output_line']!=[]: st_misj=structure_dict[cont_name][transl_group]['blocks'][misj_group]['output_line'][0] if c_st>st_misj: #if c_dir==1: # blocks_info_dict[block_name]['between_output'].append([st_misj,c_st-1, 'insertion-multiple_copy', c_st-1-st_misj+1,'transp',ref_name,max(1,ref_st-1),max(1,ref_st-1)]) #else: blocks_info_dict[block_name]['between_output'].append([st_misj,c_st-1, 'insertion-multiple_copy', c_st-1-st_misj+1,'transp',ref_name,ref_end,ref_end]) else: if structure_dict[cont_name][transl_group]['output_line']!=[]: st_misj=structure_dict[cont_name][transl_group]['output_line'][0] if c_st>st_misj: #if c_dir==1: # blocks_info_dict[block_name]['between_output'].append([st_misj,c_st-1, 'insertion-multiple_copy', c_st-1-st_misj+1,'transp',ref_name,max(1,ref_st-1),max(1,ref_st-1)]) #else: blocks_info_dict[block_name]['between_output'].append([st_misj,c_st-1, 'insertion-multiple_copy', c_st-1-st_misj+1,'transp',ref_name,ref_end,ref_end]) if i!=len(structure_dict[cont_name][transl_group]['blocks'][misj_group]['blocks'])-1: c_st_next=structure_dict[cont_name][transl_group]['blocks'][misj_group]['blocks'][i+1][0] if c_end+1<c_st_next: seq=contigs_dict[cont_name] space_type=class_interv_coord.ANALYSE_SPACE_SIMB(seq, c_end+1, c_st_next-1) if space_type=='gap': #if c_dir==1: blocks_info_dict[block_name]['between_output'].append([c_end+1,c_st_next-1, 'wrong_gap', c_st_next-c_end-1,'transp',ref_name,ref_end,ref_end ]) #else: # blocks_info_dict[block_name]['between_output'].append([c_end+1,c_st_next-1, 'wrong_gap', c_st_next-c_end-1,'transp',ref_name,max(1,ref_st-1),max(1,ref_st-1)]) elif space_type=='nucleotides': #if c_dir==1: blocks_info_dict[block_name]['between_output'].append([c_end+1,c_st_next-1, 'insertion', c_st_next-c_end-1,'transp',ref_name,ref_end,ref_end ]) #else: # blocks_info_dict[block_name]['between_output'].append([c_end+1,c_st_next-1, 'insertion', c_st_next-c_end-1,'transp',ref_name,max(1,ref_st-1),max(1,ref_st-1)]) else: # space_type='mixed_fragment' errors_list=class_interv_coord.FIND_INSERTION_GAP_INTERVALS(seq, c_end+1, c_st_next-1,'transp') for entry in errors_list: #if c_dir==1: blocks_info_dict[block_name]['between_output'].append([entry[0],entry[1], entry[2], entry[1]-entry[0]+1,'transp',ref_name,ref_end,ref_end ]) #else: # blocks_info_dict[block_name]['between_output'].append([entry[0],entry[1], entry[2], entry[1]-entry[0]+1,'transp',ref_name,max(1,ref_st-1),max(1,ref_st-1)]) else: if structure_dict[cont_name][transl_group]['blocks'][misj_group]['output_line']!=[] : end_misj=structure_dict[cont_name][transl_group]['blocks'][misj_group]['output_line'][1] if c_end<end_misj: #if c_dir==1: blocks_info_dict[block_name]['between_output'].append([c_end+1,end_misj, 'insertion-multiple_copy', end_misj-c_end-1+1,'transp', ref_name, ref_end,ref_end ]) #else: # blocks_info_dict[block_name]['between_output'].append([c_end+1,end_misj, 'insertion-multiple_copy', end_misj-c_end-1+1,'transp', ref_name, max(1,ref_st-1),max(1,ref_st-1)]) else: if structure_dict[cont_name][transl_group]['output_line']!=[] : end_misj=structure_dict[cont_name][transl_group]['output_line'][1] if c_end<end_misj: #if c_dir==1: blocks_info_dict[block_name]['between_output'].append([c_end+1,end_misj, 'insertion-multiple_copy', end_misj-c_end-1+1,'transp', ref_name, ref_end,ref_end ]) #else: # blocks_info_dict[block_name]['between_output'].append([c_end+1,end_misj, 'insertion-multiple_copy', end_misj-c_end-1+1,'transp', ref_name, max(1,ref_st-1),max(1,ref_st-1)]) #write down local differences in the local_output field for entry in structure_dict[cont_name][transl_group]['blocks'][misj_group]['blocks'][i][10]: blocks_info_dict[block_name]['local_output'].append(entry) #2.10. substitute a list of intervals in misj_group['blocks'] by dict of blocks_info_groups structure_dict[cont_name][transl_group]['blocks'][misj_group].pop('blocks',None) structure_dict[cont_name][transl_group]['blocks'][misj_group]['blocks']={} for block_name in list(blocks_info_dict.keys()): structure_dict[cont_name][transl_group]['blocks'][misj_group]['blocks'][block_name]={} for field in list(blocks_info_dict[block_name].keys()): structure_dict[cont_name][transl_group]['blocks'][misj_group]['blocks'][block_name][field]=blocks_info_dict[block_name][field] for block_name in list(structure_dict[cont_name][transl_group]['blocks'][misj_group]['blocks'].keys()): structure_dict[cont_name][transl_group]['blocks'][misj_group]['blocks'][block_name]['between_output']=sorted(structure_dict[cont_name][transl_group]['blocks'][misj_group]['blocks'][block_name]['between_output'], key=lambda inter:inter[0], reverse=False) structure_dict[cont_name][transl_group]['blocks'][misj_group]['blocks'][block_name]['local_output']=sorted(structure_dict[cont_name][transl_group]['blocks'][misj_group]['blocks'][block_name]['local_output'], key=lambda inter:inter[0], reverse=False) for key in list(blocks_info_dict.keys()): blocks_info_dict.pop(key, None) for cont_name in list(structure_dict.keys()): for transl_group in list(structure_dict[cont_name].keys()): for misj_group in list(structure_dict[cont_name][transl_group]['blocks'].keys()): if len(list(structure_dict[cont_name][transl_group]['blocks'][misj_group]['blocks'].keys()))>1: for block_name in list(structure_dict[cont_name][transl_group]['blocks'][misj_group]['blocks'].keys()): block=structure_dict[cont_name][transl_group]['blocks'][misj_group]['blocks'][block_name]['block'] temp_block_list.append(block) tmp_res=ref_coord.MERGE_BLOCK_MSJ(temp_block_list) init_res=structure_dict[cont_name][transl_group]['blocks'][misj_group]['bounds'] if init_res!=[tmp_res[0],tmp_res[1],tmp_res[2],tmp_res[3]]: if init_res[0]!=tmp_res[0]: for block_name in list(structure_dict[cont_name][transl_group]['blocks'][misj_group]['blocks'].keys()): if structure_dict[cont_name][transl_group]['blocks'][misj_group]['blocks'][block_name]['block'][0]==tmp_res[0]: if structure_dict[cont_name][transl_group]['blocks'][misj_group]['blocks'][block_name]['block'][4]==1: ref_pos=structure_dict[cont_name][transl_group]['blocks'][misj_group]['blocks'][block_name]['block'][2] else: ref_pos=structure_dict[cont_name][transl_group]['blocks'][misj_group]['blocks'][block_name]['block'][3] ref_name=structure_dict[cont_name][transl_group]['blocks'][misj_group]['blocks'][block_name]['block'][8] structure_dict[cont_name][transl_group]['blocks'][misj_group]['blocks'][block_name]['between_output'].append([init_res[0], tmp_res[0]-1,'insertion-multiple_copy',tmp_res[0]-init_res[0],'transp2',ref_name,ref_pos-1,ref_pos-1]) if init_res[1]!=tmp_res[1]: for block_name in list(structure_dict[cont_name][transl_group]['blocks'][misj_group]['blocks'].keys()): if structure_dict[cont_name][transl_group]['blocks'][misj_group]['blocks'][block_name]['block'][1]==tmp_res[1]: if structure_dict[cont_name][transl_group]['blocks'][misj_group]['blocks'][block_name]['block'][4]==1: ref_pos=structure_dict[cont_name][transl_group]['blocks'][misj_group]['blocks'][block_name]['block'][3] else: ref_pos=structure_dict[cont_name][transl_group]['blocks'][misj_group]['blocks'][block_name]['block'][2] ref_name=structure_dict[cont_name][transl_group]['blocks'][misj_group]['blocks'][block_name]['block'][8] structure_dict[cont_name][transl_group]['blocks'][misj_group]['blocks'][block_name]['between_output'].append([tmp_res[1]+1, init_res[1],'insertion-multiple_copy',init_res[1]-tmp_res[1],'transp2',ref_name,ref_pos-1,ref_pos-1]) if init_res[2]!=tmp_res[2]: for block_name in list(structure_dict[cont_name][transl_group]['blocks'][misj_group]['blocks'].keys()): if structure_dict[cont_name][transl_group]['blocks'][misj_group]['blocks'][block_name]['block'][2]==tmp_res[2]: if structure_dict[cont_name][transl_group]['blocks'][misj_group]['blocks'][block_name]['block'][4]==1: cont_pos=structure_dict[cont_name][transl_group]['blocks'][misj_group]['blocks'][block_name]['block'][0] else: cont_pos=structure_dict[cont_name][transl_group]['blocks'][misj_group]['blocks'][block_name]['block'][1] ref_name=structure_dict[cont_name][transl_group]['blocks'][misj_group]['blocks'][block_name]['block'][8] structure_dict[cont_name][transl_group]['blocks'][misj_group]['blocks'][block_name]['between_output'].append([cont_pos-1, cont_pos-1,'deletion-collapsed_repeat',tmp_res[2]-init_res[2],'transp2',ref_name,init_res[2],tmp_res[2]-1]) if init_res[3]!=tmp_res[3]: for block_name in list(structure_dict[cont_name][transl_group]['blocks'][misj_group]['blocks'].keys()): if structure_dict[cont_name][transl_group]['blocks'][misj_group]['blocks'][block_name]['block'][3]==tmp_res[3]: if structure_dict[cont_name][transl_group]['blocks'][misj_group]['blocks'][block_name]['block'][4]==1: cont_pos=structure_dict[cont_name][transl_group]['blocks'][misj_group]['blocks'][block_name]['block'][1] else: cont_pos=structure_dict[cont_name][transl_group]['blocks'][misj_group]['blocks'][block_name]['block'][0] ref_name=structure_dict[cont_name][transl_group]['blocks'][misj_group]['blocks'][block_name]['block'][8] structure_dict[cont_name][transl_group]['blocks'][misj_group]['blocks'][block_name]['between_output'].append([cont_pos-1, cont_pos-1,'deletion-collapsed_repeat',init_res[3]-tmp_res[3],'transp2',ref_name,tmp_res[3]+1,init_res[3]]) for i in range(len(temp_block_list)): temp_block_list.pop(0) ''' for cont_name in sorted(structure_dict.keys()): print_flag=0 print '#-------------' print cont_name print for entry in self.intervals[cont_name]: print entry print for transl_group in sorted(structure_dict[cont_name].keys()): print transl_group print 'OUTPUT:', structure_dict[cont_name][transl_group]['output_line'] print 'REASON:', structure_dict[cont_name][transl_group]['reason'] print 'BLOCKS:' for misj_group in sorted(structure_dict[cont_name][transl_group]['blocks'].keys()): print '\t', misj_group print '\tOUTPUT:', structure_dict[cont_name][transl_group]['blocks'][misj_group]['output_line'] print '\tREASON:', structure_dict[cont_name][transl_group]['blocks'][misj_group]['reason'] print '\tBLOCKS:' for block_name in sorted(structure_dict[cont_name][transl_group]['blocks'][misj_group]['blocks'].keys()): print '\t\t', block_name print '\t\tTRANSP_OUTPUT',structure_dict[cont_name][transl_group]['blocks'][misj_group]['blocks'][block_name]['transp_output'] print '\t\tINVERS_OUTPUT',structure_dict[cont_name][transl_group]['blocks'][misj_group]['blocks'][block_name]['invers_output'] print '\t\tBETWEEN_OUTPUT',structure_dict[cont_name][transl_group]['blocks'][misj_group]['blocks'][block_name]['between_output'] print '\t\tLOCAL_OUTPUT',structure_dict[cont_name][transl_group]['blocks'][misj_group]['blocks'][block_name]['local_output'] print '\t\tBLOCK',structure_dict[cont_name][transl_group]['blocks'][misj_group]['blocks'][block_name]['block'] print print print ''' return structure_dict def FIND_NON_STRUCTURAL_ERRORS(self, contigs_dict, ref_dict,reloc_dist): local_case_dict={'1.2':0,'1.3':0,'1.4':0,'4.8':0,'4.9':0,'4.10':0,'5.2':0,'5.3':0,'5.4':0,'8.8':0,'8.9':0,'8.10':0,'9.2':0,'9.3':0,'9.4.1':0,'9.4.2':0,'9.4.3':0,'9.4.4':0,\ '12.8.1':0,'12.8.2':0,'12.8.3':0,'12.8.4':0,'12.9':0,'12.10':0} non_structural_errors=class_errors.Errors() new_intervals={} #1. assign a number to an interval that shows the order of this interval if entries were sorted by the reference start coordinate self.FIND_REF_ORDER_NUM() for cont_name in list(self.intervals.keys()): new_intervals[cont_name]=[] contig_sequence=contigs_dict[cont_name] if len(self.intervals[cont_name])==1: new_intervals[cont_name].append(self.intervals[cont_name][0]) else: # 2. for each entry pair search for non-structural differences and resolve them cur_interv=[] for entry in self.intervals[cont_name][0]: cur_interv.append(entry) for i in range(1,len(self.intervals[cont_name])): next_interv=self.intervals[cont_name][i] a=class_interv_coord.Interv_coord(cur_interv) b=class_interv_coord.Interv_coord(next_interv) interv_case, interv_type=a.FIND_CASE(b,reloc_dist) if interv_type=='non_structural': reference_seq=ref_dict[cur_interv[8]] cur_interv, new_interv_case=a.FIND_ERROR_MERGE(b,interv_case, contig_sequence, reference_seq,cont_name) local_case_dict[new_interv_case]+=1 if cur_interv==[]: print(self.intervals[cont_name]) sys.exit('ERROR: '+interv_case+' unknown case, no merging is provided') else: temp_interv=[] for entry in cur_interv: temp_interv.append(entry) new_intervals[cont_name].append(temp_interv) for j in range(len(next_interv)): cur_interv[j]=next_interv[j] new_intervals[cont_name].append(cur_interv) for cont_name in list(self.intervals.keys()): self.intervals[cont_name]=new_intervals[cont_name] def FIND_REF_ORDER_NUM(self): ref_interv_dict={} for cont_name in list(self.intervals.keys()): for i in range(len(self.intervals[cont_name])): #1. a temp value of query fragment order self.intervals[cont_name][i].append(i) #2. find reference seq names and their corresponding intervals if self.intervals[cont_name][i][8] not in ref_interv_dict: ref_interv_dict[self.intervals[cont_name][i][8]]=[self.intervals[cont_name][i]] else: ref_interv_dict[self.intervals[cont_name][i][8]].append(self.intervals[cont_name][i]) #3. find order of fragments on ref sequences ref_num=-1 for ref_name in list(ref_interv_dict.keys()): ref_num+=1 ref_interv_dict[ref_name]=sorted(ref_interv_dict[ref_name], key=lambda inter:inter[2], reverse=False) #4.make the correspondence between ref order and query order for self.intervals elements for i in range(len(ref_interv_dict[ref_name])): ref_order_num=[ref_num,i] self_interv_num=ref_interv_dict[ref_name][i][11] self.intervals[cont_name][self_interv_num][9]=ref_order_num #5. delete all elements from ref_interv_dict for ref_name in list(ref_interv_dict.keys()): ref_interv_dict.pop(ref_name, None) #6. delete temporal element from self.intervals elements for cont_name in list(self.intervals.keys()): for i in range(len(self.intervals[cont_name])): self.intervals[cont_name][i].pop(11) def IMPROVE_PARSING_RESULTS(self, contigs_dict, ref_dict): for cont_name in list(self.intervals.keys()): self.intervals[cont_name]=sorted(self.intervals[cont_name], key=lambda inter:inter[0], reverse=False) #1. assign a number to an interval that shows the order of this interval if the entries were sorted by a reference start coordinate self.FIND_REF_ORDER_NUM() #2. delete or modify elements with nested query coordinates nested_frag_list=[] nested_transloc_list=[] count_dict={} end_err_dict={} for cont_name in list(self.intervals.keys()): end_err_dict[cont_name]={'wrong_beginning':[], 'wrong_end':[],'duplication':[]} if self.intervals[cont_name][0][0]>1: end_err_dict[cont_name]['wrong_beginning'].append([1,self.intervals[cont_name][0][0]-1,'wrong_beginning',self.intervals[cont_name][0][0]-1,'ends']) if self.intervals[cont_name][-1][1]<self.intervals[cont_name][-1][6]: end_err_dict[cont_name]['wrong_end'].append([self.intervals[cont_name][-1][1]+1,self.intervals[cont_name][-1][6],'wrong_end',self.intervals[cont_name][-1][6]-self.intervals[cont_name][-1][1],'ends']) if len(self.intervals[cont_name])>1: flag_rep=1 # repeat these steps until all query overlaps are resolved while flag_rep==1: #1. detect query fragment overlaps flag_rep=0 #2. assign a number to an interval that shows the order of this interval if the entries were sorted by a reference start coordinate self.FIND_REF_ORDER_NUM() for i in range(len(self.intervals[cont_name])-1): c_st1=self.intervals[cont_name][i][0] c_end1=self.intervals[cont_name][i][1] r_st1=self.intervals[cont_name][i][2] r_end1=self.intervals[cont_name][i][3] num1=self.intervals[cont_name][i][9] for j in range(i+1,len(self.intervals[cont_name])): c_st2=self.intervals[cont_name][j][0] c_end2=self.intervals[cont_name][j][1] r_st2=self.intervals[cont_name][j][2] r_end2=self.intervals[cont_name][j][3] num2=self.intervals[cont_name][j][9] if c_st1<=c_st2 and c_end1>=c_end2 and self.intervals[cont_name][i][8]==self.intervals[cont_name][j][8]:#C1 contains C2 or C1=C2 nested_frag_list.append([j,i])# j in i break elif c_st1>=c_st2 and c_end1<=c_end2 and self.intervals[cont_name][i][8]==self.intervals[cont_name][j][8]:#C2 contains C1: nested_frag_list.append([i,j]) break elif c_st1<=c_st2 and c_end1>=c_end2 and self.intervals[cont_name][i][8]!=self.intervals[cont_name][j][8]:#C1 contains C2 or C1=C2 , different ref nested_transloc_list.append([j,i]) break elif c_st1>=c_st2 and c_end1<=c_end2 and self.intervals[cont_name][i][8]!=self.intervals[cont_name][j][8]:#C2 contains C1, different ref nested_transloc_list.append([i,j]) break elif c_st2>c_end1: break if len(nested_transloc_list)==1: for kj in range(len(self.intervals[cont_name])): if self.intervals[cont_name][kj][8] not in count_dict: count_dict[self.intervals[cont_name][kj][8]]=0 count_dict[self.intervals[cont_name][kj][8]]+=self.intervals[cont_name][kj][3]-self.intervals[cont_name][kj][2]+1 flag_rep=1 first_ref=self.intervals[cont_name][nested_transloc_list[0][0]][8] second_ref=self.intervals[cont_name][nested_transloc_list[0][1]][8] if count_dict[first_ref]>count_dict[second_ref]: self.intervals[cont_name].pop(nested_transloc_list[0][1]) elif count_dict[first_ref]<count_dict[second_ref]: self.intervals[cont_name].pop(nested_transloc_list[0][0]) else: if self.intervals[cont_name][nested_transloc_list[0][0]][1]-self.intervals[cont_name][nested_transloc_list[0][0]][0]+1>=self.intervals[cont_name][nested_transloc_list[0][1]][1]-self.intervals[cont_name][nested_transloc_list[0][1]][0]+1: self.intervals[cont_name].pop(nested_transloc_list[0][1]) else: self.intervals[cont_name].pop(nested_transloc_list[0][0]) count_dict.clear() nested_transloc_list.pop(0) break elif len(nested_frag_list)==1: flag_rep=1 #4. delete contained fragment self.intervals[cont_name].pop(nested_frag_list[0][0]) #5. empty nested_frag_list nested_frag_list.pop(0) break flag_rep=1 # repeat these steps until all reference overlaps are resolved while flag_rep==1: #1. assign a number to an interval that shows the order of this interval if the entries were sorted by a reference start coordinate self.FIND_REF_ORDER_NUM() #2. detect reference fragments overlap flag_rep=0 for i in range(len(self.intervals[cont_name])-1): c_st1=self.intervals[cont_name][i][0] c_end1=self.intervals[cont_name][i][1] r_st1=self.intervals[cont_name][i][2] r_end1=self.intervals[cont_name][i][3] cont_dir1=self.intervals[cont_name][i][4] num1=self.intervals[cont_name][i][9] for j in range(i+1,len(self.intervals[cont_name])): c_st2=self.intervals[cont_name][j][0] c_end2=self.intervals[cont_name][j][1] r_st2=self.intervals[cont_name][j][2] r_end2=self.intervals[cont_name][j][3] cont_dir2=self.intervals[cont_name][j][4] num2=self.intervals[cont_name][j][9] #find nested ref fragment if r_st1<=r_st2 and r_end1>=r_end2 and self.intervals[cont_name][i][8]==self.intervals[cont_name][j][8]: # Ri contains or equal to Rj nested_frag_list.append(j) break elif r_st1>=r_st2 and r_end1<=r_end2 and self.intervals[cont_name][i][8]==self.intervals[cont_name][j][8]: #Rj contains Ri nested_frag_list.append(i) break if len(nested_frag_list)==1: flag_rep=1 #delete all info about nested fragments self.intervals[cont_name].pop(nested_frag_list[0]) nested_frag_list.pop(0) break for cont_name in list(self.intervals.keys()): self.intervals[cont_name]=sorted(self.intervals[cont_name], key=lambda inter:inter[0], reverse=False) if self.intervals[cont_name][0][0]>1: if end_err_dict[cont_name]['wrong_beginning']==[]: err_st=1 end_err_dict[cont_name]['duplication'].append([err_st,self.intervals[cont_name][0][0]-1,'wrong_beginning',self.intervals[cont_name][0][0]-err_st,'ends']) else: if self.intervals[cont_name][0][0]>end_err_dict[cont_name]['wrong_beginning'][0][1]+1: err_st=end_err_dict[cont_name]['wrong_beginning'][0][1]+1 end_err_dict[cont_name]['duplication'].append([err_st,self.intervals[cont_name][0][0]-1,'wrong_beginning',self.intervals[cont_name][0][0]-err_st,'ends']) if self.intervals[cont_name][-1][1]<self.intervals[cont_name][-1][6]: if end_err_dict[cont_name]['wrong_end']==[]: err_end=self.intervals[cont_name][-1][6] end_err_dict[cont_name]['duplication'].append([self.intervals[cont_name][-1][1]+1,err_end,'wrong_end',err_end-self.intervals[cont_name][-1][1],'ends']) else: if self.intervals[cont_name][-1][1]<end_err_dict[cont_name]['wrong_end'][0][0]-1: err_end=end_err_dict[cont_name]['wrong_end'][0][0]-1 end_err_dict[cont_name]['duplication'].append([self.intervals[cont_name][-1][1]+1,err_end,'wrong_end',err_end-self.intervals[cont_name][-1][1],'ends']) for cont_name in list(self.intervals.keys()): for i in range(len(self.intervals[cont_name])): if self.intervals[cont_name][i][10]!=[]: self.intervals[cont_name][i][10]=sorted(self.intervals[cont_name][i][10], key=lambda inter:inter[0], reverse=False) return end_err_dict def FIND_UNMAPPED_CONTIGS(self, contigs_dict): overlap_errors=class_errors.Errors() unmapped_list=[] #1. find unmapped contigs for cont_name in list(contigs_dict.keys()): if cont_name not in self.intervals: unmapped_list.append(cont_name) return unmapped_list def FIND_WRONG_END(self,structure_dict): end_err_dict={} for cont_name in list(structure_dict.keys()): end_err_dict[cont_name]={'wrong_beginning':[], 'wrong_end':[]} first_entry=structure_dict[cont_name][0]['blocks'][0]['blocks'][0]['block'] last_trl=sorted(structure_dict[cont_name].keys())[-1] last_msj=sorted(structure_dict[cont_name][last_trl]['blocks'].keys())[-1] last_blk=sorted(structure_dict[cont_name][last_trl]['blocks'][last_msj]['blocks'].keys())[-1] last_entry=structure_dict[cont_name][last_trl]['blocks'][last_msj]['blocks'][last_blk]['block'] if first_entry[0]>1: for entry in [1,first_entry[0]-1,'wrong_beginning',first_entry[0]-1,'nuc12']: end_err_dict[cont_name]['wrong_beginning'].append(entry) if last_entry[1]<last_entry[6]: for entry in [last_entry[1]+1,last_entry[6],'wrong_end',last_entry[6]-last_entry[1],'nuc12']: end_err_dict[cont_name]['wrong_end'].append(entry) return end_err_dict def FIND_UNCOVERED_REF_REGIONS(self, ref_dict): intervals_dict={} uncov_dict={} for ref_name in list(ref_dict.keys()): intervals_dict[ref_name]=[] uncov_dict[ref_name]=[] for cont_name in list(self.intervals.keys()): for entry in self.intervals[cont_name]: ref_name=entry[8] intervals_dict[ref_name].append([entry[2],entry[3]]) for ref_name in list(intervals_dict.keys()): if intervals_dict[ref_name]!=[]: intervals_dict[ref_name]=sorted(intervals_dict[ref_name],key=lambda inter:inter[0], reverse=False) overlap_list=FIND_OVERLAP_INTERVALS(intervals_dict[ref_name]) uncov_dict[ref_name]=FIND_ZERO_COV(overlap_list,len(ref_dict[ref_name])) return uncov_dict def FIND_ERRORS(self, file_contigs, file_ref,reloc_dist): contigs_dict, contig_seqs, contig_names, contigs_full_names_dict=general.READ_FASTA_ENTRY(file_contigs) ref_dict, ref_seqs, ref_names, ref_full_names_dict=general.READ_FASTA_ENTRY(file_ref) #1. find unmapped query sequences unmapped_list=self.FIND_UNMAPPED_CONTIGS(contigs_dict) uncovered_dict={}#self.FIND_UNCOVERED_REF_REGIONS(ref_dict ) #2. delete nested query and references fragments end_err_dict=self.IMPROVE_PARSING_RESULTS(contigs_dict, ref_dict) #3. find local differences self.FIND_NON_STRUCTURAL_ERRORS(contigs_dict, ref_dict,reloc_dist) #4. check difference correctness ''' for cont_name in self.intervals.keys(): first_simb=self.intervals[cont_name][0][0] if first_simb>1: self.intervals[cont_name][0][10].append([1,first_simb-1,'wrong_beginning', first_simb-1-1+1, 'nuc12']) last_simb=self.intervals[cont_name][-1][1] len_cont=self.intervals[cont_name][-1][6] if last_simb<len_cont: self.intervals[cont_name][-1][10].append([last_simb+1,len_cont,'wrong_end',len_cont-last_simb-1+1, 'nuc13' ]) self.intervals[cont_name][0][10]=sorted(self.intervals[cont_name][0][10], key=lambda inter:inter[0], reverse=False) self.intervals[cont_name][-1][10]=sorted(self.intervals[cont_name][-1][10], key=lambda inter:inter[0], reverse=False) ''' #5. find structural differences struct_dict=self.FIND_STRUCTURAL_ERRORS(contigs_dict, ref_dict,reloc_dist) #end_err_dict=self.FIND_WRONG_END(struct_dict) return struct_dict,end_err_dict,unmapped_list,uncovered_dict #------------------------------------ def FIND_ZERO_COV(overlap_list,len_seq): uncov_list=[] if overlap_list==[]: uncov_list.append([1,len_seq]) else: if overlap_list[0][0]>1: uncov_list.append([1,overlap_list[0][0]-1]) for i in range(len(overlap_list)-1): entry_1=overlap_list[i] entry_2=overlap_list[i+1] if entry_2[0]>entry_1[1]+1: uncov_list.append([entry_1[1]+1,entry_2[0]-1]) if overlap_list[-1][1]<len_seq: uncov_list.append([overlap_list[-1][1]+1,len_seq]) return uncov_list def FIND_OVERLAP_INTERVALS(interv_list): if interv_list==[]: result_list=[] else: if len(interv_list)==1: result_list=interv_list else: result_list=[] st_interv=interv_list[0][0] end_interv=interv_list[0][1] for entry in interv_list[1:]: st_new=entry[0] end_new=entry[1] if end_interv<st_new: result_list.append([st_interv, end_interv]) st_interv=st_new end_interv=end_new else: end_interv=max(end_interv,end_new) result_list.append([st_interv, end_interv]) return result_list def FIND_GAP_POS(seq, start_seq, end_seq): gap_list=[] st=-1 end=-1 for i in range(start_seq, end_seq+1): if seq[i] in 'Nn': if st==-1: st=i end=i else: end=i else: if st==-1: a='do nothing' else: gap_list.append([st,end]) st=-1 end=-1 if st!=-1: gap_list.append([st,end]) return gap_list def FIND_GAPS(seq,start_seq,end_seq,snps_errors_list): gap_list=[] gap_interv=[] #before diff if snps_errors_list==[]: gap_interv.append([0,len(seq)-1]) else: if snps_errors_list[0][0]>start_seq: gap_interv.append([0,snps_errors_list[0][0]-start_seq-1]) #after diff if snps_errors_list[-1][1]<end_seq: gap_interv.append([snps_errors_list[-1][1]-start_seq+1, end_seq-start_seq]) #between diff if len(snps_errors_list)>1: for i in range(len(snps_errors_list)-1): interv_st=snps_errors_list[i][1]-start_seq+1 interv_end=snps_errors_list[i+1][0]-start_seq-1 if interv_st<=interv_end: gap_interv.append([interv_st,interv_end]) #inside diff for i in range(len(snps_errors_list)): cur_err=snps_errors_list[i] if cur_err[2]=='deletion': if i==0: if i<len(snps_errors_list)-1: next_err=snps_errors_list[i+1] if next_err[0]>cur_err[1]: gap_interv.append([cur_err[0]-start_seq, cur_err[0]-start_seq]) else: gap_interv.append([cur_err[0]-start_seq, cur_err[0]-start_seq]) elif i==len(snps_errors_list)-1: if i!=0: prev_err=snps_errors_list[i-1] if prev_err[1]<cur_err[0]: gap_interv.append([cur_err[0]-start_seq, cur_err[0]-start_seq]) else: next_err=snps_errors_list[i+1] prev_err=snps_errors_list[i-1] if next_err[0]>cur_err[1] and prev_err[1]<cur_err[0]: gap_interv.append([cur_err[0]-start_seq, cur_err[0]-start_seq]) for interv in gap_interv: gap_cur_list=FIND_GAP_POS(seq, interv[0], interv[1]) for entry in gap_cur_list: gap_list.append([entry[0]+start_seq, entry[1]+start_seq, 'gap', entry[1]-entry[0]+1, 'snps']) gap_list=sorted(gap_list, key=lambda inter:inter[0], reverse=False) return gap_list def FIND_MISJOIN_GROUP(group_list,reloc_dist): #2.1 redefine num value num_entry=0 for entry in group_list: entry[9]=num_entry num_entry+=1 temp_sorted=sorted(group_list, key=lambda inter:inter[2], reverse=False) for i in range(len(temp_sorted)): group_list[temp_sorted[i][9]][9]=i #2.2 sorted intervals in transl_group by ref coordinate group_list=sorted(group_list, key=lambda inter:inter[9], reverse=False) #2.3 Examine the distance between ref fragments. If they are less than reloc_dist, these entries are placed in one group cur_temp_group=0 temp_group_list=[[group_list[0][9]]] for i in range(len(group_list)-1): if group_list[i+1][2]-group_list[i][3]<=reloc_dist: temp_group_list[cur_temp_group].append(group_list[i+1][9]) else: temp_group_list.append([group_list[i+1][9]]) cur_temp_group+=1 #2.4 sorted intervals by a query coord group_list=sorted(group_list, key=lambda inter:inter[0], reverse=False) #2.5. temporary add a new variable to the intervals showing the number of temp_group for entry in group_list: for i in range(len(temp_group_list)): if entry[9] in temp_group_list[i]: entry.append(i) break del temp_group_list[:] #2.6 find misjoin_groups. Delete temp variable temp_group num if cur_temp_group==0: result_gr_list=[] result_gr_list.append(group_list) else: cur_gr=0 temp_group_list.append([group_list[0]]) for i in range(len(group_list)-1): if group_list[i][11]==group_list[i+1][11]: temp_group_list[cur_gr].append(group_list[i+1]) else: cur_gr+=1 temp_group_list.append([group_list[i+1]]) for misj_group in temp_group_list: for entry in misj_group: entry.pop(11) result_gr_list=[] for misj_group in temp_group_list: new_gr_list=FIND_MISJOIN_GROUP(misj_group,reloc_dist) for entry in new_gr_list: result_gr_list.append(entry) return result_gr_list def PARSE_SNPS_FILE(snps_file,snps_raw_dict): f=open(snps_file,'r') lines=f.readlines() for line in lines: temp=line[:-1].split() ref_pos=int(temp[0]) ref_simb=temp[1] cont_simb=temp[2] cont_pos=int(temp[3]) cont_dir=int(temp[9]) ref_name=temp[10] cont_name=temp[11] snps_raw_dict[cont_name].append([cont_pos, ref_pos, cont_simb, ref_simb, ref_name, cont_dir]) f.close() return snps_raw_dict def CHECK_OVERL_FRAG(coord_line,coord_lines_list): cont_name=coord_line[0] c_st=coord_line[1][0] c_end=coord_line[1][1] r_st=coord_line[1][2] r_end=coord_line[1][3] r_name=coord_line[1][4] flag=0 for entry in coord_lines_list: if cont_name==entry[0] and r_name==entry[1][4]: if not (c_end<entry[1][0] or c_st>entry[1][1]): if not (r_end<entry[1][2] or r_st>entry[1][3]): flag=1 break return flag def FIND_SNPS_TYPES(lines_error_list): snps_errors_list=[] snps_dict={'insertion':[], 'wrong_gap':[], 'gap':[], 'substitution':[],'deletion':[] } #find differences for each base for line in lines_error_list: cont_simb=line[2] ref_simb=line[3] cont_pos=line[0] if cont_simb in 'ATGCatgc': if ref_simb in 'ATGCatgc': error='substitution' elif ref_simb in 'NnQWERYUIOPSDFHJKLZXVBMqweryuiopsdfhjklzxvbm': error='no error' elif ref_simb=='.': error='insertion' elif cont_simb in 'QWERYUIOPSDFHJKLZXVBMqweryuiopsdfhjklzxvbm': if ref_simb in 'ATGCatgcNnQWERYUIOPSDFHJKLZXVBMqweryuiopsdfhjklzxvbm': error='no error' elif ref_simb=='.': error='insertion' elif cont_simb in 'Nn': if ref_simb in 'ATGCatgcNnQWERYUIOPSDFHJKLZXVBMqweryuiopsdfhjklzxvbm': error='no error' elif ref_simb=='.': error='wrong_gap' elif cont_simb=='.': if ref_simb in 'ATGCatgcNnQWERYUIOPSDFHJKLZXVBMqweryuiopsdfhjklzxvbm': error='deletion' elif ref_simb=='.': error='no error' else: print(ref_simb, cont_simb) sys.exit('ERROR: unknown case in snps file during parsing') if error!='no error': snps_dict[error].append(cont_pos) #merge differences into intervals for err in list(snps_dict.keys()): snps_dict[err]=sorted(snps_dict[err]) for err in ['insertion', 'substitution', 'wrong_gap', 'gap']: if len(snps_dict[err])>1: cur_st=-1 cur_end=-1 cur_len=-1 for el in snps_dict[err]: if cur_len==-1: cur_st=el cur_end=el cur_len=1 else: if el==cur_end+1: cur_end+=1 cur_len+=1 else: snps_errors_list.append([cur_st,cur_end, err, cur_len, 'snps']) cur_st=el cur_end=el cur_len=1 snps_errors_list.append([cur_st,cur_end, err, cur_len, 'snps']) elif len(snps_dict[err])==1: snps_errors_list.append([snps_dict[err][0],snps_dict[err][0], err, 1, 'snps']) del_list=[] if len(snps_dict['deletion'])==1: cont_st=snps_dict['deletion'][0] del_list.append([cont_st,1]) elif len(snps_dict['deletion'])>1: cur_st=-1 cur_len=-1 for el in snps_dict['deletion']: if cur_len==-1: cur_st=el cur_len=1 else: if el==cur_st: cur_len+=1 else: del_list.append([cur_st,cur_len]) cur_st=el cur_len=1 del_list.append([cur_st,cur_len]) new_err=[] for entry in del_list: el=entry[0] for i in range(len(snps_errors_list)): err=snps_errors_list[i] if err[0]<=el and err[1]>el: new_err.append([el+1,snps_errors_list[i][1],snps_errors_list[i][2],snps_errors_list[i][1]-el,'snps']) snps_errors_list[i][1]=el snps_errors_list[i][3]=el-snps_errors_list[i][0]+1 snps_errors_list.append([entry[0],entry[0],'deletion',entry[1],'snps']) for el in new_err: snps_errors_list.append(el) snps_errors_list=sorted(snps_errors_list,key=lambda inter:inter[0], reverse=False) return snps_errors_list def FIND_SNPS_SINGLE(input_list): coord_lines_list=input_list[0] delta_file=input_list[1] prefix=input_list[2] unique_name=input_list[3] snp_file=prefix+'_'+str(unique_name)+'.snps' coord_file=prefix+'_'+str(unique_name)+'.coord' f=open(coord_file,'w') frag_dict={} for entry in coord_lines_list: cont_name=entry[0] frag_line=entry[1] f.write(frag_line[5][0]) if cont_name not in frag_dict: frag_dict[cont_name]=[] frag_dict[cont_name].append(frag_line) f.close() f=open(snp_file, 'w') f_in=open(coord_file,'r') try: subprocess.check_call(['show-snps', '-SqHT', delta_file], stdin=f_in, stdout=f) except subprocess.CalledProcessError: return 'failed' f.close() f_in.close() snps_raw_dict={} for cont_name in list(frag_dict.keys()): snps_raw_dict[cont_name]=[] PARSE_SNPS_FILE(snp_file,snps_raw_dict) frag_num_list=[] for cont_name in list(snps_raw_dict.keys()): for snp in snps_raw_dict[cont_name]: for i in range(len(frag_dict[cont_name])): frg=frag_dict[cont_name][i] if frg[4]==snp[4] and snp[0]>=frg[0] and snp[0]<=frg[1]and snp[1]>=frg[2] and snp[1]<=frg[3]: frag_num_list.append(i) if len(frag_num_list)>1: answ=CHECK_OVERL_FRAG([cont_name,frag_dict[cont_name][frag_num_list[0]]],[[cont_name,frag_dict[cont_name][frag_num_list[0]]]]) elif len(frag_num_list)==0: a='do_nothing' else: frag_dict[cont_name][frag_num_list[0]][7].append(snp) for i in range(len(frag_num_list)): frag_num_list.pop(0) for cont_name in list(frag_dict.keys()): for i in range(len(frag_dict[cont_name])): frag_dict[cont_name][i][7]=FIND_SNPS_TYPES(frag_dict[cont_name][i][7]) return frag_dict def FIND_SNPS(frag_dict,coord_file, delta_file,prefix,proc_num, file_contigs): snp_file=prefix+'_filtered.snps' f=open(snp_file, 'w') f_in=open(coord_file,'r') f.close() f_in.close() input_list=[] raund_num=0 flag=0 coord_lines_list=[] temp_list=[] while flag!=1: ind=0 for cont_name in list(frag_dict.keys()): ind+=1 for i in range(len(frag_dict[cont_name])): if frag_dict[cont_name][i][6]==0: if frag_dict[cont_name][i][5]==[]: frag_dict[cont_name][i][6]=1 else: answ=CHECK_OVERL_FRAG([cont_name,frag_dict[cont_name][i]],temp_list) if answ==0: temp_list.append([cont_name,deepcopy(frag_dict[cont_name][i])]) frag_dict[cont_name][i][6]=1 for entry in temp_list: coord_lines_list.append(entry) for j in range(len(temp_list)): temp_list.pop(0) raund_num+=1 if coord_lines_list==[]: flag=1 else: input_list.append([[],delta_file,prefix,raund_num]) for entry in coord_lines_list: input_list[-1][0].append(entry) for j in range(len(coord_lines_list)): coord_lines_list.pop(0) #pool=multiprocessing.Pool(processes=proc_num) #output_list=pool.map(FIND_SNPS_SINGLE,input_list) for input_entry in input_list: frag_dict_part=FIND_SNPS_SINGLE(input_entry) if frag_dict_part=='failed': import sys sys.exit(0) for cont_name in list(frag_dict_part.keys()): for new_entry in frag_dict_part[cont_name]: for i in range(len(frag_dict[cont_name])): if new_entry[:5]==frag_dict[cont_name][i][:5]: for err in new_entry[7]: frag_dict[cont_name][i][7].append(err) break for cont_name in list(frag_dict.keys()): for entry in frag_dict[cont_name]: if entry[6]==0: print('ERROR: 0 instead 1') print(cont_name, entry) def MERGE_GAPS(err_list): new_err_list=[] if len(err_list)>2: for i in range(len(err_list)-2): if err_list[i][0]!=-1: if err_list[i][2]!='gap': new_err_list.append(err_list[i]) else: if err_list[i+1][2]=='gap' and err_list[i][1]+1==err_list[i+1][0] and err_list[i+1][3]==1: if err_list[i+2][2]=='deletion' and err_list[i+1][0]==err_list[i+2][0]: err_list[i][1]+=1 err_list[i][3]+=1 err_list[i+1][0]=-1 new_err_list.append(err_list[i]) else: new_err_list.append(err_list[i]) else: new_err_list.append(err_list[i]) for i in range(len(err_list)-2,len(err_list)): if err_list[i][0]!=-1: new_err_list.append(err_list[i]) new_err_list=sorted(new_err_list, key=lambda inter:inter[0], reverse=False) else: new_err_list=err_list return new_err_list
PypiClean
/Adhesion-0.91.0.tar.gz/Adhesion-0.91.0/helpers/LBFGS_scipy_vsNuMPI.py
import time import scipy.optimize starttime=time.time() import numpy as np from ContactMechanics import FreeFFTElasticHalfSpace from SurfaceTopography import make_sphere from FFTEngine import PFFTEngine from NuMPI.Optimization import LBFGS from NuMPI.Tools.Reduction import Reduction from Adhesion import VDW82smoothMin from System import SmoothContactSystem from NuMPI import MPI comm = MPI.COMM_WORLD rank = comm.Get_rank() pnp = Reduction(comm=comm) class iter_inspector(): def __init__(self): self.neval = 0 self.energies = [] self.maxgradients =[] def __call__(self, system): self.neval += 1 self.energies.append(system.energy) self.maxgradients.append(pnp.max(abs(system.force))) class decorated_objective: def __init__(self, system, objective): self.system = system self.objective = objective self.neval = 0 self.energies = [] self.maxgradients =[] def __call__(self, *args, **kwargs): val = self.objective(*args, **kwargs) self.neval += 1 self.energies.append(system.energy) self.maxgradients.append(pnp.max(abs(system.force))) return val import matplotlib.pyplot as plt fig, (axt, axit) = plt.subplots(2, 1, sharex=True) ns = [128,256, 512] nrepetition = 2 for method, name in zip([LBFGS,"L-BFGS-B"], ["NuMPI", "Scipy"]): times = np.zeros((len(ns), nrepetition)) nits = np.zeros((len(ns), nrepetition)) nevals =np.zeros((len(ns), nrepetition)) for i, n in enumerate(ns): # sphere radius: r_s = 10.0 # contact radius r_c = .2 # peak pressure p_0 = 2.5 # equivalent Young's modulus E_s = 102.#102. # work of adhesion w = 1.0 # tolerance for optimizer tol = 1e-12 # tolerance for contact area gap_tol = 1e-6 nx, ny = n, n sx = 21.0 z0 = 0.05 # needed to get small tolerance, but very very slow fftengine = PFFTEngine((2*nx, 2*ny), comm=comm) # the "Min" part of the potential (linear for small z) is needed for the LBFGS without bounds inter = VDW82smoothMin(w * z0 ** 8 / 3, 16 * np.pi * w * z0 ** 2, gamma=w, pnp = pnp) # Parallel SurfaceTopography Patch substrate = FreeFFTElasticHalfSpace((nx,ny), young=E_s, physical_sizes=(sx, sx), fft=fftengine, pnp=pnp) #print(substrate._comp_nb_grid_pts) #print(fftengine.nb_domain_grid_pts) surface = make_sphere(radius=r_s, nb_grid_pts=(nx, ny), physical_sizes=(sx, sx), subdomain_locations=substrate.topography_subdomain_locations, nb_subdomain_grid_pts=substrate.topography_nb_subdomain_grid_pts, pnp=pnp, standoff=float('inf')) ext_surface = make_sphere(r_s, (2 * nx, 2 * ny), (2 * sx, 2 * sx), centre=(sx / 2, sx / 2), subdomain_locations=substrate.subdomain_locations, nb_subdomain_grid_pts=substrate.nb_subdomain_grid_pts, pnp=pnp, standoff=float('inf')) system = SmoothContactSystem(substrate, inter, surface) penetration = 0 disp0 = ext_surface.heights() + penetration disp0 = np.where(disp0 > 0, disp0, 0) #disp0 = system.shape_minimisation_input(disp0) maxcor = 10 for j in range(nrepetition): starttime =time.time() counter = iter_inspector() objective_monitor = decorated_objective(system, system.objective(penetration, gradient=True)) result = scipy.optimize.minimize(objective_monitor, disp0, method=method, jac=True, options=dict(gtol=1e-6 * abs(w/z0), ftol=1e-25, maxcor=maxcor)) nevals[i,j]= objective_monitor.neval times[i,j] = time.time() - starttime nits[i,j] = result.nit print(method) print(result.message) print("nevals: {}".format(objective_monitor.neval)) print(result.nit) print(times[i,j]) converged = result.success assert converged axt.plot(ns, np.mean(times, axis=1), "o",label="{}".format(name)) l, =axit.plot(ns, np.mean(nits, axis=1), "o",label="{}, nits".format(name)) axit.plot(ns, np.mean(nevals, axis=1), "+",c = l.get_color(), label="{}, nfeval".format(name)) axit.set_xlabel("lateral nb_grid_pts (-)") axt.set_ylabel("execution time (s)") axit.set_ylabel("# of iterations") axit.legend(fancybox=True, framealpha=0.5) axt.legend(fancybox=True, framealpha=0.5) fig.savefig("LBFGS_scipy_vs_NuMPI.png")
PypiClean
/Caroline-presentation-0.2.4.tar.gz/Caroline-presentation-0.2.4/caroline/html_dist/js/mathjax/sre/mathmaps/nemeth/symbols/math_arrows.js
[{"locale":"nemeth"},{"category":"Sm","mappings":{"default":{"default":"⠀⠫⠪⠒⠒⠀"}},"key":"2190"},{"category":"Sm","mappings":{"default":{"default":"⠀⠫⠣⠒⠒⠕⠀"}},"key":"2191"},{"category":"Sm","mappings":{"default":{"default":"⠀⠫⠕⠀"}},"key":"2192"},{"category":"Sm","mappings":{"default":{"default":"⠀⠫⠩⠒⠒⠕⠀"}},"key":"2193"},{"category":"Sm","mappings":{"default":{"default":"⠀⠫⠪⠒⠒⠕⠀"}},"key":"2194"},{"category":"So","mappings":{"default":{"default":"⠀⠫⠣⠩⠪⠒⠒⠕⠀"}},"key":"2195"},{"category":"So","mappings":{"default":{"default":"⠀⠫⠘⠪⠒⠒⠀"}},"key":"2196"},{"category":"So","mappings":{"default":{"default":"⠀⠫⠘⠒⠒⠕⠀"}},"key":"2197"},{"category":"So","mappings":{"default":{"default":"⠀⠫⠰⠒⠒⠕⠀"}},"key":"2198"},{"category":"So","mappings":{"default":{"default":"⠀⠫⠰⠪⠒⠒⠀"}},"key":"2199"},{"category":"Sm","mappings":{"default":{"default":"⠀⠳⠈⠫⠪⠒⠒⠻⠀"}},"key":"219A"},{"category":"Sm","mappings":{"default":{"default":"⠀⠳⠈⠫⠒⠒⠕⠻⠀"}},"key":"219B"},{"category":"So","mappings":{"default":{"default":"⠀⠫⠪⠔⠒⠢⠀"}},"key":"219C"},{"category":"So","mappings":{"default":{"default":"⠀⠫⠔⠒⠢⠕⠀"}},"key":"219D"},{"category":"So","mappings":{"default":{"default":"⠀⠫⠪⠪⠒⠒⠀"}},"key":"219E"},{"category":"So","mappings":{"default":{"default":"⠀⠫⠣⠒⠒⠕⠕⠀"}},"key":"219F"},{"category":"Sm","mappings":{"default":{"default":"⠀⠫⠒⠒⠕⠕⠀"}},"key":"21A0"},{"category":"So","mappings":{"default":{"default":"⠀⠫⠩⠒⠒⠕⠕⠀"}},"key":"21A1"},{"category":"So","mappings":{"default":{"default":"⠀⠫⠪⠒⠒⠠⠽⠀"}},"key":"21A2"},{"category":"Sm","mappings":{"default":{"default":"⠀⠫⠠⠯⠒⠒⠕⠀"}},"key":"21A3"},{"category":"So","mappings":{"default":{"default":"⠀⠫⠪⠒⠒⠳⠀"}},"key":"21A4"},{"category":"So","mappings":{"default":{"default":"⠀⠫⠣⠳⠒⠒⠕⠀"}},"key":"21A5"},{"category":"Sm","mappings":{"default":{"default":"⠀⠫⠳⠒⠒⠕⠀"}},"key":"21A6"},{"category":"So","mappings":{"default":{"default":"⠀⠫⠩⠳⠒⠒⠕⠀"}},"key":"21A7"},{"category":"So","mappings":{"default":{"default":"⠀⠫⠪⠒⠳⠒⠕⠀"}},"key":"21A8"},{"category":"So","mappings":{"default":{"default":"⠀⠫⠪⠒⠒⠠⠕⠀"}},"key":"21A9"},{"category":"So","mappings":{"default":{"default":"⠀⠫⠠⠪⠒⠒⠕⠀"}},"key":"21AA"},{"category":"So","mappings":{"default":{"default":"⠀⠫⠪⠒⠒⠨⠡⠀"}},"key":"21AB"},{"category":"So","mappings":{"default":{"default":"⠀⠫⠨⠡⠒⠒⠕⠀"}},"key":"21AC"},{"category":"So","mappings":{"default":{"default":"⠀⠫⠪⠔⠒⠢⠕⠀"}},"key":"21AD"},{"category":"Sm","mappings":{"default":{"default":"⠀⠳⠈⠫⠪⠒⠒⠕⠀"}},"key":"21AE"},{"category":"So","mappings":{"default":{"default":"⠀⠫⠩⠔⠢⠔⠀"}},"key":"21AF"},{"category":"So","mappings":{"default":{"default":"⠄⡳⠭⠆⠂⠃⠴⠄"}},"key":"21B0"},{"category":"So","mappings":{"default":{"default":"⠄⡳⠭⠆⠂⠃⠂⠄"}},"key":"21B1"},{"category":"So","mappings":{"default":{"default":"⠄⡳⠭⠆⠂⠃⠆⠄"}},"key":"21B2"},{"category":"So","mappings":{"default":{"default":"⠄⡳⠭⠆⠂⠃⠒⠄"}},"key":"21B3"},{"category":"So","mappings":{"default":{"default":"⠀⠫⠠⠳⠒⠒⠕⠀"}},"key":"21B4"},{"category":"So","mappings":{"default":{"default":"⠀⠫⠩⠠⠳⠒⠒⠕⠀"}},"key":"21B5"},{"category":"So","mappings":{"default":{"default":"⠀⠫⠢⠔⠀⠕⠀"}},"key":"21B6"},{"category":"So","mappings":{"default":{"default":"⠀⠫⠪⠢⠀⠔⠀"}},"key":"21B7"},{"category":"So","mappings":{"default":{"default":"⠄⡳⠭⠆⠂⠃⠦⠄"}},"key":"21B8"},{"category":"So","mappings":{"default":{"default":"⠄⡳⠭⠆⠂⠃⠔⠄"}},"key":"21B9"},{"category":"So","mappings":{"default":{"default":"⠀⠫⠢⠔⠕⠀"}},"key":"21BA"},{"category":"So","mappings":{"default":{"default":"⠀⠫⠪⠢⠔⠀"}},"key":"21BB"},{"category":"So","mappings":{"default":{"default":"⠀⠫⠒⠒⠕⠫⠪⠒⠒⠀"}},"key":"21C4"},{"category":"So","mappings":{"default":{"default":"⠀⠫⠣⠒⠒⠕⠐⠫⠩⠒⠒⠕⠀"}},"key":"21C5"},{"category":"So","mappings":{"default":{"default":"⠀⠫⠪⠒⠒⠫⠒⠒⠕⠀"}},"key":"21C6"},{"category":"So","mappings":{"default":{"default":"⠀⠫⠚⠒⠒⠫⠚⠒⠒⠀"}},"key":"21C7"},{"category":"So","mappings":{"default":{"default":"⠀⠫⠣⠒⠒⠕⠐⠫⠣⠒⠒⠕⠀"}},"key":"21C8"},{"category":"So","mappings":{"default":{"default":"⠀⠫⠒⠒⠕⠫⠒⠒⠕⠀"}},"key":"21C9"},{"category":"So","mappings":{"default":{"default":"⠀⠫⠩⠒⠒⠕⠐⠫⠩⠒⠒⠕⠀"}},"key":"21CA"},{"category":"So","mappings":{"default":{"default":"⠀⠳⠈⠫⠪⠪⠒⠒⠀"}},"key":"21CD"},{"category":"Sm","mappings":{"default":{"default":"⠀⠳⠈⠫⠪⠪⠒⠒⠕⠕⠀"}},"key":"21CE"},{"category":"Sm","mappings":{"default":{"default":"⠀⠳⠈⠫⠒⠒⠕⠕⠀"}},"key":"21CF"},{"category":"So","mappings":{"default":{"default":"⠀⠫⠪⠪⠒⠒⠀"}},"key":"21D0"},{"category":"So","mappings":{"default":{"default":"⠀⠫⠣⠒⠒⠕⠕⠀"}},"key":"21D1"},{"category":"Sm","mappings":{"default":{"default":"⠀⠫⠒⠒⠕⠕⠀"}},"key":"21D2"},{"category":"So","mappings":{"default":{"default":"⠀⠫⠣⠒⠒⠕⠕⠀"}},"key":"21D3"},{"category":"Sm","mappings":{"default":{"default":"⠀⠫⠪⠪⠒⠒⠕⠕⠀"}},"key":"21D4"},{"category":"So","mappings":{"default":{"default":"⠀⠫⠣⠪⠪⠒⠒⠕⠕⠀"}},"key":"21D5"},{"category":"So","mappings":{"default":{"default":"⠀⠫⠘⠪⠪⠒⠒⠀"}},"key":"21D6"},{"category":"So","mappings":{"default":{"default":"⠀⠫⠘⠒⠒⠕⠕⠀"}},"key":"21D7"},{"category":"So","mappings":{"default":{"default":"⠀⠫⠰⠒⠒⠕⠕⠀"}},"key":"21D8"},{"category":"So","mappings":{"default":{"default":"⠀⠫⠰⠪⠪⠒⠒⠀"}},"key":"21D9"},{"category":"So","mappings":{"default":{"default":"⠀⠫⠪⠪⠪⠒⠒⠀"}},"key":"21DA"},{"category":"So","mappings":{"default":{"default":"⠀⠫⠒⠒⠕⠕⠕⠀"}},"key":"21DB"},{"category":"So","mappings":{"default":{"default":"⠀⠫⠪⠢⠤⠔⠒⠢⠀"}},"key":"21DC"},{"category":"So","mappings":{"default":{"default":"⠀⠫⠢⠤⠔⠒⠢⠕⠀"}},"key":"21DD"},{"category":"So","mappings":{"default":{"default":"⠀⠳⠳⠈⠫⠣⠒⠒⠕⠻⠀"}},"key":"21DE"},{"category":"So","mappings":{"default":{"default":"⠀⠳⠳⠈⠫⠩⠒⠒⠕⠻⠀"}},"key":"21DF"},{"category":"So","mappings":{"default":{"default":"⠀⠫⠪⠒⠀⠒⠀"}},"key":"21E0"},{"category":"So","mappings":{"default":{"default":"⠀⠫⠣⠒⠀⠒⠕⠀"}},"key":"21E1"},{"category":"So","mappings":{"default":{"default":"⠀⠫⠒⠀⠒⠕⠀"}},"key":"21E2"},{"category":"So","mappings":{"default":{"default":"⠀⠫⠩⠒⠀⠒⠕⠀"}},"key":"21E3"},{"category":"So","mappings":{"default":{"default":"⠀⠫⠳⠪⠒⠒⠀"}},"key":"21E4"},{"category":"So","mappings":{"default":{"default":"⠀⠫⠒⠒⠕⠳⠀"}},"key":"21E5"},{"category":"So","mappings":{"default":{"default":"⠀⠫⠸⠪⠒⠒⠀"}},"key":"21E6"},{"category":"So","mappings":{"default":{"default":"⠀⠫⠣⠸⠒⠒⠕⠀"}},"key":"21E7"},{"category":"So","mappings":{"default":{"default":"⠀⠫⠸⠒⠒⠕⠀"}},"key":"21E8"},{"category":"So","mappings":{"default":{"default":"⠀⠫⠩⠸⠒⠒⠕⠀"}},"key":"21E9"},{"category":"So","mappings":{"default":{"default":"⠄⡳⠭⠆⠂⠑⠁⠄"}},"key":"21EA"},{"category":"So","mappings":{"default":{"default":"⠄⡳⠭⠆⠂⠑⠃⠄"}},"key":"21EB"},{"category":"So","mappings":{"default":{"default":"⠄⡳⠭⠆⠂⠑⠉⠄"}},"key":"21EC"},{"category":"So","mappings":{"default":{"default":"⠄⡳⠭⠆⠂⠑⠙⠄"}},"key":"21ED"},{"category":"So","mappings":{"default":{"default":"⠄⡳⠭⠆⠂⠑⠑⠄"}},"key":"21EE"},{"category":"So","mappings":{"default":{"default":"⠄⡳⠭⠆⠂⠑⠋⠄"}},"key":"21EF"},{"category":"So","mappings":{"default":{"default":"⠄⡳⠭⠆⠂⠋⠴⠄"}},"key":"21F0"},{"category":"So","mappings":{"default":{"default":"⠄⡳⠭⠆⠂⠋⠂⠄"}},"key":"21F1"},{"category":"So","mappings":{"default":{"default":"⠄⡳⠭⠆⠂⠋⠆⠄"}},"key":"21F2"},{"category":"So","mappings":{"default":{"default":"⠀⠫⠣⠸⠪⠒⠒⠕⠀"}},"key":"21F3"},{"category":"Sm","mappings":{"default":{"default":"⠀⠫⠒⠒⠕⠨⠡⠀"}},"key":"21F4"},{"category":"Sm","mappings":{"default":{"default":"⠀⠫⠩⠒⠒⠕⠐⠫⠣⠒⠒⠕⠀"}},"key":"21F5"},{"category":"Sm","mappings":{"default":{"default":"⠀⠫⠒⠒⠕⠫⠒⠒⠕⠫⠒⠒⠕⠀"}},"key":"21F6"},{"category":"Sm","mappings":{"default":{"default":"⠀⠳⠈⠫⠪⠒⠒⠻⠀"}},"key":"21F7"},{"category":"Sm","mappings":{"default":{"default":"⠀⠳⠈⠫⠒⠒⠕⠻⠀"}},"key":"21F8"},{"category":"Sm","mappings":{"default":{"default":"⠀⠳⠈⠫⠪⠒⠒⠕⠀"}},"key":"21F9"},{"category":"Sm","mappings":{"default":{"default":"⠀⠳⠳⠈⠫⠪⠒⠒⠻⠀"}},"key":"21FA"},{"category":"Sm","mappings":{"default":{"default":"⠀⠳⠳⠈⠫⠒⠒⠕⠻⠀"}},"key":"21FB"},{"category":"Sm","mappings":{"default":{"default":"⠀⠳⠳⠈⠫⠪⠒⠒⠕⠀"}},"key":"21FC"},{"category":"Sm","mappings":{"default":{"default":"⠀⠫⠳⠒⠒⠀"}},"key":"21FD"},{"category":"Sm","mappings":{"default":{"default":"⠀⠫⠒⠒⠳⠀"}},"key":"21FE"},{"category":"Sm","mappings":{"default":{"default":"⠀⠫⠳⠒⠒⠳⠀"}},"key":"21FF"},{"category":"So","mappings":{"default":{"default":"⠄⡳⠭⠆⠒⠴⠂⠄"}},"key":"2301"},{"category":"So","mappings":{"default":{"default":"⠄⡳⠭⠆⠒⠴⠒⠄"}},"key":"2303"},{"category":"So","mappings":{"default":{"default":"⠄⡳⠭⠆⠒⠴⠲⠄"}},"key":"2304"},{"category":"So","mappings":{"default":{"default":"⠄⡳⠭⠆⠒⠆⠲⠄"}},"key":"2324"},{"category":"So","mappings":{"default":{"default":"⠄⡳⠭⠆⠒⠦⠃⠄"}},"key":"238B"},{"category":"So","mappings":{"default":{"default":"⠄⡳⠭⠆⠶⠔⠲⠄"}},"key":"2794"},{"category":"So","mappings":{"default":{"default":"⠄⡳⠭⠆⠶⠔⠦⠄"}},"key":"2798"},{"category":"So","mappings":{"default":{"default":"⠄⡳⠭⠆⠶⠔⠔⠄"}},"key":"2799"},{"category":"So","mappings":{"default":{"default":"⠄⡳⠭⠆⠶⠔⠁⠄"}},"key":"279A"},{"category":"So","mappings":{"default":{"default":"⠄⡳⠭⠆⠶⠔⠃⠄"}},"key":"279B"},{"category":"So","mappings":{"default":{"default":"⠄⡳⠭⠆⠶⠔⠉⠄"}},"key":"279C"},{"category":"So","mappings":{"default":{"default":"⠄⡳⠭⠆⠶⠔⠙⠄"}},"key":"279D"},{"category":"So","mappings":{"default":{"default":"⠄⡳⠭⠆⠶⠔⠑⠄"}},"key":"279E"},{"category":"So","mappings":{"default":{"default":"⠄⡳⠭⠆⠶⠔⠋⠄"}},"key":"279F"},{"category":"So","mappings":{"default":{"default":"⠄⡳⠭⠆⠶⠁⠴⠄"}},"key":"27A0"},{"category":"So","mappings":{"default":{"default":"⠄⡳⠭⠆⠶⠁⠂⠄"}},"key":"27A1"},{"category":"So","mappings":{"default":{"default":"⠄⡳⠭⠆⠶⠁⠆⠄"}},"key":"27A2"},{"category":"So","mappings":{"default":{"default":"⠄⡳⠭⠆⠶⠁⠒⠄"}},"key":"27A3"},{"category":"So","mappings":{"default":{"default":"⠄⡳⠭⠆⠶⠁⠲⠄"}},"key":"27A4"},{"category":"So","mappings":{"default":{"default":"⠄⡳⠭⠆⠶⠁⠢⠄"}},"key":"27A5"},{"category":"So","mappings":{"default":{"default":"⠄⡳⠭⠆⠶⠁⠖⠄"}},"key":"27A6"},{"category":"So","mappings":{"default":{"default":"⠄⡳⠭⠆⠶⠁⠶⠄"}},"key":"27A7"},{"category":"So","mappings":{"default":{"default":"⠄⡳⠭⠆⠶⠁⠦⠄"}},"key":"27A8"},{"category":"So","mappings":{"default":{"default":"⠄⡳⠭⠆⠶⠁⠔⠄"}},"key":"27A9"},{"category":"So","mappings":{"default":{"default":"⠄⡳⠭⠆⠶⠁⠁⠄"}},"key":"27AA"},{"category":"So","mappings":{"default":{"default":"⠄⡳⠭⠆⠶⠁⠃⠄"}},"key":"27AB"},{"category":"So","mappings":{"default":{"default":"⠄⡳⠭⠆⠶⠁⠉⠄"}},"key":"27AC"},{"category":"So","mappings":{"default":{"default":"⠄⡳⠭⠆⠶⠁⠙⠄"}},"key":"27AD"},{"category":"So","mappings":{"default":{"default":"⠄⡳⠭⠆⠶⠁⠑⠄"}},"key":"27AE"},{"category":"So","mappings":{"default":{"default":"⠄⡳⠭⠆⠶⠁⠋⠄"}},"key":"27AF"},{"category":"So","mappings":{"default":{"default":"⠄⡳⠭⠆⠶⠃⠂⠄"}},"key":"27B1"},{"category":"So","mappings":{"default":{"default":"⠄⡳⠭⠆⠶⠃⠆⠄"}},"key":"27B2"},{"category":"So","mappings":{"default":{"default":"⠄⡳⠭⠆⠶⠃⠒⠄"}},"key":"27B3"},{"category":"So","mappings":{"default":{"default":"⠄⡳⠭⠆⠶⠃⠲⠄"}},"key":"27B4"},{"category":"So","mappings":{"default":{"default":"⠄⡳⠭⠆⠶⠃⠢⠄"}},"key":"27B5"},{"category":"So","mappings":{"default":{"default":"⠄⡳⠭⠆⠶⠃⠖⠄"}},"key":"27B6"},{"category":"So","mappings":{"default":{"default":"⠄⡳⠭⠆⠶⠃⠶⠄"}},"key":"27B7"},{"category":"So","mappings":{"default":{"default":"⠄⡳⠭⠆⠶⠃⠦⠄"}},"key":"27B8"},{"category":"So","mappings":{"default":{"default":"⠄⡳⠭⠆⠶⠃⠔⠄"}},"key":"27B9"},{"category":"So","mappings":{"default":{"default":"⠄⡳⠭⠆⠶⠃⠁⠄"}},"key":"27BA"},{"category":"So","mappings":{"default":{"default":"⠄⡳⠭⠆⠶⠃⠃⠄"}},"key":"27BB"},{"category":"So","mappings":{"default":{"default":"⠄⡳⠭⠆⠶⠃⠉⠄"}},"key":"27BC"},{"category":"So","mappings":{"default":{"default":"⠄⡳⠭⠆⠶⠃⠙⠄"}},"key":"27BD"},{"category":"So","mappings":{"default":{"default":"⠄⡳⠭⠆⠶⠃⠑⠄"}},"key":"27BE"},{"category":"Sm","mappings":{"default":{"default":"⠄⡳⠭⠆⠶⠋⠴⠄"}},"key":"27F0"},{"category":"Sm","mappings":{"default":{"default":"⠄⡳⠭⠆⠶⠋⠂⠄"}},"key":"27F1"},{"category":"Sm","mappings":{"default":{"default":"⠄⡳⠭⠆⠶⠋⠆⠄"}},"key":"27F2"},{"category":"Sm","mappings":{"default":{"default":"⠄⡳⠭⠆⠶⠋⠒⠄"}},"key":"27F3"},{"category":"Sm","mappings":{"default":{"default":"⠄⡳⠭⠆⠶⠋⠲⠄"}},"key":"27F4"},{"category":"Sm","mappings":{"default":{"default":"⠄⡳⠭⠆⠶⠋⠢⠄"}},"key":"27F5"},{"category":"Sm","mappings":{"default":{"default":"⠄⡳⠭⠆⠶⠋⠖⠄"}},"key":"27F6"},{"category":"Sm","mappings":{"default":{"default":"⠄⡳⠭⠆⠶⠋⠶⠄"}},"key":"27F7"},{"category":"Sm","mappings":{"default":{"default":"⠄⡳⠭⠆⠶⠋⠦⠄"}},"key":"27F8"},{"category":"Sm","mappings":{"default":{"default":"⠄⡳⠭⠆⠶⠋⠔⠄"}},"key":"27F9"},{"category":"Sm","mappings":{"default":{"default":"⠄⡳⠭⠆⠶⠋⠁⠄"}},"key":"27FA"},{"category":"Sm","mappings":{"default":{"default":"⠄⡳⠭⠆⠶⠋⠃⠄"}},"key":"27FB"},{"category":"Sm","mappings":{"default":{"default":"⠄⡳⠭⠆⠶⠋⠉⠄"}},"key":"27FC"},{"category":"Sm","mappings":{"default":{"default":"⠄⡳⠭⠆⠶⠋⠙⠄"}},"key":"27FD"},{"category":"Sm","mappings":{"default":{"default":"⠄⡳⠭⠆⠶⠋⠑⠄"}},"key":"27FE"},{"category":"Sm","mappings":{"default":{"default":"⠄⡳⠭⠆⠶⠋⠋⠄"}},"key":"27FF"},{"category":"Sm","mappings":{"default":{"default":"⠄⡳⠭⠆⠔⠴⠴⠄"}},"key":"2900"},{"category":"Sm","mappings":{"default":{"default":"⠄⡳⠭⠆⠔⠴⠂⠄"}},"key":"2901"},{"category":"Sm","mappings":{"default":{"default":"⠄⡳⠭⠆⠔⠴⠆⠄"}},"key":"2902"},{"category":"Sm","mappings":{"default":{"default":"⠄⡳⠭⠆⠔⠴⠒⠄"}},"key":"2903"},{"category":"Sm","mappings":{"default":{"default":"⠄⡳⠭⠆⠔⠴⠲⠄"}},"key":"2904"},{"category":"Sm","mappings":{"default":{"default":"⠄⡳⠭⠆⠔⠴⠢⠄"}},"key":"2905"},{"category":"Sm","mappings":{"default":{"default":"⠄⡳⠭⠆⠔⠴⠖⠄"}},"key":"2906"},{"category":"Sm","mappings":{"default":{"default":"⠄⡳⠭⠆⠔⠴⠶⠄"}},"key":"2907"},{"category":"Sm","mappings":{"default":{"default":"⠄⡳⠭⠆⠔⠴⠦⠄"}},"key":"2908"},{"category":"Sm","mappings":{"default":{"default":"⠄⡳⠭⠆⠔⠴⠔⠄"}},"key":"2909"},{"category":"Sm","mappings":{"default":{"default":"⠄⡳⠭⠆⠔⠴⠁⠄"}},"key":"290A"},{"category":"Sm","mappings":{"default":{"default":"⠄⡳⠭⠆⠔⠴⠃⠄"}},"key":"290B"},{"category":"Sm","mappings":{"default":{"default":"⠄⡳⠭⠆⠔⠴⠉⠄"}},"key":"290C"},{"category":"Sm","mappings":{"default":{"default":"⠄⡳⠭⠆⠔⠴⠙⠄"}},"key":"290D"},{"category":"Sm","mappings":{"default":{"default":"⠄⡳⠭⠆⠔⠴⠑⠄"}},"key":"290E"},{"category":"Sm","mappings":{"default":{"default":"⠄⡳⠭⠆⠔⠴⠋⠄"}},"key":"290F"},{"category":"Sm","mappings":{"default":{"default":"⠄⡳⠭⠆⠔⠂⠴⠄"}},"key":"2910"},{"category":"Sm","mappings":{"default":{"default":"⠄⡳⠭⠆⠔⠂⠂⠄"}},"key":"2911"},{"category":"Sm","mappings":{"default":{"default":"⠄⡳⠭⠆⠔⠂⠆⠄"}},"key":"2912"},{"category":"Sm","mappings":{"default":{"default":"⠄⡳⠭⠆⠔⠂⠒⠄"}},"key":"2913"},{"category":"Sm","mappings":{"default":{"default":"⠄⡳⠭⠆⠔⠂⠲⠄"}},"key":"2914"},{"category":"Sm","mappings":{"default":{"default":"⠄⡳⠭⠆⠔⠂⠢⠄"}},"key":"2915"},{"category":"Sm","mappings":{"default":{"default":"⠄⡳⠭⠆⠔⠂⠖⠄"}},"key":"2916"},{"category":"Sm","mappings":{"default":{"default":"⠄⡳⠭⠆⠔⠂⠶⠄"}},"key":"2917"},{"category":"Sm","mappings":{"default":{"default":"⠄⡳⠭⠆⠔⠂⠦⠄"}},"key":"2918"},{"category":"Sm","mappings":{"default":{"default":"⠄⡳⠭⠆⠔⠂⠔⠄"}},"key":"2919"},{"category":"Sm","mappings":{"default":{"default":"⠄⡳⠭⠆⠔⠂⠁⠄"}},"key":"291A"},{"category":"Sm","mappings":{"default":{"default":"⠄⡳⠭⠆⠔⠂⠃⠄"}},"key":"291B"},{"category":"Sm","mappings":{"default":{"default":"⠄⡳⠭⠆⠔⠂⠉⠄"}},"key":"291C"},{"category":"Sm","mappings":{"default":{"default":"⠄⡳⠭⠆⠔⠂⠙⠄"}},"key":"291D"},{"category":"Sm","mappings":{"default":{"default":"⠄⡳⠭⠆⠔⠂⠑⠄"}},"key":"291E"},{"category":"Sm","mappings":{"default":{"default":"⠄⡳⠭⠆⠔⠂⠋⠄"}},"key":"291F"},{"category":"Sm","mappings":{"default":{"default":"⠄⡳⠭⠆⠔⠆⠴⠄"}},"key":"2920"},{"category":"Sm","mappings":{"default":{"default":"⠄⡳⠭⠆⠔⠆⠂⠄"}},"key":"2921"},{"category":"Sm","mappings":{"default":{"default":"⠄⡳⠭⠆⠔⠆⠆⠄"}},"key":"2922"},{"category":"Sm","mappings":{"default":{"default":"⠄⡳⠭⠆⠔⠆⠒⠄"}},"key":"2923"},{"category":"Sm","mappings":{"default":{"default":"⠄⡳⠭⠆⠔⠆⠲⠄"}},"key":"2924"},{"category":"Sm","mappings":{"default":{"default":"⠄⡳⠭⠆⠔⠆⠢⠄"}},"key":"2925"},{"category":"Sm","mappings":{"default":{"default":"⠄⡳⠭⠆⠔⠆⠖⠄"}},"key":"2926"},{"category":"Sm","mappings":{"default":{"default":"⠄⡳⠭⠆⠔⠆⠶⠄"}},"key":"2927"},{"category":"Sm","mappings":{"default":{"default":"⠄⡳⠭⠆⠔⠆⠦⠄"}},"key":"2928"},{"category":"Sm","mappings":{"default":{"default":"⠄⡳⠭⠆⠔⠆⠔⠄"}},"key":"2929"},{"category":"Sm","mappings":{"default":{"default":"⠄⡳⠭⠆⠔⠆⠁⠄"}},"key":"292A"},{"category":"Sm","mappings":{"default":{"default":"⠄⡳⠭⠆⠔⠆⠙⠄"}},"key":"292D"},{"category":"Sm","mappings":{"default":{"default":"⠄⡳⠭⠆⠔⠆⠑⠄"}},"key":"292E"},{"category":"Sm","mappings":{"default":{"default":"⠄⡳⠭⠆⠔⠆⠋⠄"}},"key":"292F"},{"category":"Sm","mappings":{"default":{"default":"⠄⡳⠭⠆⠔⠒⠴⠄"}},"key":"2930"},{"category":"Sm","mappings":{"default":{"default":"⠄⡳⠭⠆⠔⠒⠂⠄"}},"key":"2931"},{"category":"Sm","mappings":{"default":{"default":"⠄⡳⠭⠆⠔⠒⠆⠄"}},"key":"2932"},{"category":"Sm","mappings":{"default":{"default":"⠄⡳⠭⠆⠔⠒⠒⠄"}},"key":"2933"},{"category":"Sm","mappings":{"default":{"default":"⠄⡳⠭⠆⠔⠒⠲⠄"}},"key":"2934"},{"category":"Sm","mappings":{"default":{"default":"⠄⡳⠭⠆⠔⠒⠢⠄"}},"key":"2935"},{"category":"Sm","mappings":{"default":{"default":"⠄⡳⠭⠆⠔⠒⠖⠄"}},"key":"2936"},{"category":"Sm","mappings":{"default":{"default":"⠄⡳⠭⠆⠔⠒⠶⠄"}},"key":"2937"},{"category":"Sm","mappings":{"default":{"default":"⠄⡳⠭⠆⠔⠒⠦⠄"}},"key":"2938"},{"category":"Sm","mappings":{"default":{"default":"⠄⡳⠭⠆⠔⠒⠔⠄"}},"key":"2939"},{"category":"Sm","mappings":{"default":{"default":"⠄⡳⠭⠆⠔⠒⠁⠄"}},"key":"293A"},{"category":"Sm","mappings":{"default":{"default":"⠄⡳⠭⠆⠔⠒⠃⠄"}},"key":"293B"},{"category":"Sm","mappings":{"default":{"default":"⠄⡳⠭⠆⠔⠒⠉⠄"}},"key":"293C"},{"category":"Sm","mappings":{"default":{"default":"⠄⡳⠭⠆⠔⠒⠙⠄"}},"key":"293D"},{"category":"Sm","mappings":{"default":{"default":"⠄⡳⠭⠆⠔⠒⠑⠄"}},"key":"293E"},{"category":"Sm","mappings":{"default":{"default":"⠄⡳⠭⠆⠔⠒⠋⠄"}},"key":"293F"},{"category":"Sm","mappings":{"default":{"default":"⠄⡳⠭⠆⠔⠲⠴⠄"}},"key":"2940"},{"category":"Sm","mappings":{"default":{"default":"⠄⡳⠭⠆⠔⠲⠂⠄"}},"key":"2941"},{"category":"Sm","mappings":{"default":{"default":"⠄⡳⠭⠆⠔⠲⠆⠄"}},"key":"2942"},{"category":"Sm","mappings":{"default":{"default":"⠄⡳⠭⠆⠔⠲⠒⠄"}},"key":"2943"},{"category":"Sm","mappings":{"default":{"default":"⠄⡳⠭⠆⠔⠲⠲⠄"}},"key":"2944"},{"category":"Sm","mappings":{"default":{"default":"⠄⡳⠭⠆⠔⠲⠢⠄"}},"key":"2945"},{"category":"Sm","mappings":{"default":{"default":"⠄⡳⠭⠆⠔⠲⠖⠄"}},"key":"2946"},{"category":"Sm","mappings":{"default":{"default":"⠄⡳⠭⠆⠔⠲⠶⠄"}},"key":"2947"},{"category":"Sm","mappings":{"default":{"default":"⠄⡳⠭⠆⠔⠲⠦⠄"}},"key":"2948"},{"category":"Sm","mappings":{"default":{"default":"⠄⡳⠭⠆⠔⠲⠔⠄"}},"key":"2949"},{"category":"Sm","mappings":{"default":{"default":"⠄⡳⠭⠆⠔⠶⠴⠄"}},"key":"2970"},{"category":"Sm","mappings":{"default":{"default":"⠄⡳⠭⠆⠔⠶⠂⠄"}},"key":"2971"},{"category":"Sm","mappings":{"default":{"default":"⠄⡳⠭⠆⠔⠶⠆⠄"}},"key":"2972"},{"category":"Sm","mappings":{"default":{"default":"⠄⡳⠭⠆⠔⠶⠒⠄"}},"key":"2973"},{"category":"Sm","mappings":{"default":{"default":"⠄⡳⠭⠆⠔⠶⠲⠄"}},"key":"2974"},{"category":"Sm","mappings":{"default":{"default":"⠄⡳⠭⠆⠔⠶⠢⠄"}},"key":"2975"},{"category":"Sm","mappings":{"default":{"default":"⠄⡳⠭⠆⠔⠶⠖⠄"}},"key":"2976"},{"category":"Sm","mappings":{"default":{"default":"⠄⡳⠭⠆⠔⠶⠶⠄"}},"key":"2977"},{"category":"Sm","mappings":{"default":{"default":"⠄⡳⠭⠆⠔⠶⠦⠄"}},"key":"2978"},{"category":"Sm","mappings":{"default":{"default":"⠄⡳⠭⠆⠔⠶⠔⠄"}},"key":"2979"},{"category":"Sm","mappings":{"default":{"default":"⠄⡳⠭⠆⠔⠶⠁⠄"}},"key":"297A"},{"category":"Sm","mappings":{"default":{"default":"⠄⡳⠭⠆⠔⠶⠃⠄"}},"key":"297B"},{"category":"Sm","mappings":{"default":{"default":"⠄⡳⠭⠆⠔⠃⠒⠄"}},"key":"29B3"},{"category":"Sm","mappings":{"default":{"default":"⠄⡳⠭⠆⠔⠃⠲⠄"}},"key":"29B4"},{"category":"Sm","mappings":{"default":{"default":"⠄⡳⠭⠆⠔⠃⠙⠄"}},"key":"29BD"},{"category":"Sm","mappings":{"default":{"default":"⠄⡳⠭⠆⠔⠑⠁⠄"}},"key":"29EA"},{"category":"Sm","mappings":{"default":{"default":"⠄⡳⠭⠆⠔⠑⠉⠄"}},"key":"29EC"},{"category":"Sm","mappings":{"default":{"default":"⠄⡳⠭⠆⠔⠑⠙⠄"}},"key":"29ED"},{"category":"Sm","mappings":{"default":{"default":"⠄⡳⠭⠆⠁⠂⠶⠄"}},"key":"2A17"},{"category":"So","mappings":{"default":{"default":"⠄⡳⠭⠆⠃⠴⠴⠄"}},"key":"2B00"},{"category":"So","mappings":{"default":{"default":"⠄⡳⠭⠆⠃⠴⠂⠄"}},"key":"2B01"},{"category":"So","mappings":{"default":{"default":"⠄⡳⠭⠆⠃⠴⠆⠄"}},"key":"2B02"},{"category":"So","mappings":{"default":{"default":"⠄⡳⠭⠆⠃⠴⠒⠄"}},"key":"2B03"},{"category":"So","mappings":{"default":{"default":"⠄⡳⠭⠆⠃⠴⠲⠄"}},"key":"2B04"},{"category":"So","mappings":{"default":{"default":"⠄⡳⠭⠆⠃⠴⠢⠄"}},"key":"2B05"},{"category":"So","mappings":{"default":{"default":"⠄⡳⠭⠆⠃⠴⠖⠄"}},"key":"2B06"},{"category":"So","mappings":{"default":{"default":"⠄⡳⠭⠆⠃⠴⠶⠄"}},"key":"2B07"},{"category":"So","mappings":{"default":{"default":"⠄⡳⠭⠆⠃⠴⠦⠄"}},"key":"2B08"},{"category":"So","mappings":{"default":{"default":"⠄⡳⠭⠆⠃⠴⠔⠄"}},"key":"2B09"},{"category":"So","mappings":{"default":{"default":"⠄⡳⠭⠆⠃⠴⠁⠄"}},"key":"2B0A"},{"category":"So","mappings":{"default":{"default":"⠄⡳⠭⠆⠃⠴⠃⠄"}},"key":"2B0B"},{"category":"So","mappings":{"default":{"default":"⠄⡳⠭⠆⠃⠴⠉⠄"}},"key":"2B0C"},{"category":"So","mappings":{"default":{"default":"⠄⡳⠭⠆⠃⠴⠙⠄"}},"key":"2B0D"},{"category":"So","mappings":{"default":{"default":"⠄⡳⠭⠆⠃⠴⠑⠄"}},"key":"2B0E"},{"category":"So","mappings":{"default":{"default":"⠄⡳⠭⠆⠃⠴⠋⠄"}},"key":"2B0F"},{"category":"So","mappings":{"default":{"default":"⠄⡳⠭⠆⠃⠂⠴⠄"}},"key":"2B10"},{"category":"So","mappings":{"default":{"default":"⠄⡳⠭⠆⠃⠂⠂⠄"}},"key":"2B11"},{"category":"Sm","mappings":{"default":{"default":"⠄⡳⠭⠆⠃⠒⠴⠄"}},"key":"2B30"},{"category":"Sm","mappings":{"default":{"default":"⠄⡳⠭⠆⠃⠒⠂⠄"}},"key":"2B31"},{"category":"Sm","mappings":{"default":{"default":"⠄⡳⠭⠆⠃⠒⠆⠄"}},"key":"2B32"},{"category":"Sm","mappings":{"default":{"default":"⠄⡳⠭⠆⠃⠒⠒⠄"}},"key":"2B33"},{"category":"Sm","mappings":{"default":{"default":"⠄⡳⠭⠆⠃⠒⠲⠄"}},"key":"2B34"},{"category":"Sm","mappings":{"default":{"default":"⠄⡳⠭⠆⠃⠒⠢⠄"}},"key":"2B35"},{"category":"Sm","mappings":{"default":{"default":"⠄⡳⠭⠆⠃⠒⠖⠄"}},"key":"2B36"},{"category":"Sm","mappings":{"default":{"default":"⠄⡳⠭⠆⠃⠒⠶⠄"}},"key":"2B37"},{"category":"Sm","mappings":{"default":{"default":"⠄⡳⠭⠆⠃⠒⠦⠄"}},"key":"2B38"},{"category":"Sm","mappings":{"default":{"default":"⠄⡳⠭⠆⠃⠒⠔⠄"}},"key":"2B39"},{"category":"Sm","mappings":{"default":{"default":"⠄⡳⠭⠆⠃⠒⠁⠄"}},"key":"2B3A"},{"category":"Sm","mappings":{"default":{"default":"⠄⡳⠭⠆⠃⠒⠃⠄"}},"key":"2B3B"},{"category":"Sm","mappings":{"default":{"default":"⠄⡳⠭⠆⠃⠒⠉⠄"}},"key":"2B3C"},{"category":"Sm","mappings":{"default":{"default":"⠄⡳⠭⠆⠃⠒⠙⠄"}},"key":"2B3D"},{"category":"Sm","mappings":{"default":{"default":"⠄⡳⠭⠆⠃⠒⠑⠄"}},"key":"2B3E"},{"category":"Sm","mappings":{"default":{"default":"⠄⡳⠭⠆⠃⠒⠋⠄"}},"key":"2B3F"},{"category":"Sm","mappings":{"default":{"default":"⠄⡳⠭⠆⠃⠲⠴⠄"}},"key":"2B40"},{"category":"Sm","mappings":{"default":{"default":"⠄⡳⠭⠆⠃⠲⠂⠄"}},"key":"2B41"},{"category":"Sm","mappings":{"default":{"default":"⠄⡳⠭⠆⠃⠲⠆⠄"}},"key":"2B42"},{"category":"Sm","mappings":{"default":{"default":"⠄⡳⠭⠆⠃⠲⠒⠄"}},"key":"2B43"},{"category":"Sm","mappings":{"default":{"default":"⠄⡳⠭⠆⠃⠲⠲⠄"}},"key":"2B44"},{"category":"So","mappings":{"default":{"default":"⠄⡳⠭⠆⠃⠲⠢⠄"}},"key":"2B45"},{"category":"So","mappings":{"default":{"default":"⠄⡳⠭⠆⠃⠲⠖⠄"}},"key":"2B46"},{"category":"Sm","mappings":{"default":{"default":"⠄⡳⠭⠆⠃⠲⠶⠄"}},"key":"2B47"},{"category":"Sm","mappings":{"default":{"default":"⠄⡳⠭⠆⠃⠲⠦⠄"}},"key":"2B48"},{"category":"Sm","mappings":{"default":{"default":"⠄⡳⠭⠆⠃⠲⠔⠄"}},"key":"2B49"},{"category":"Sm","mappings":{"default":{"default":"⠄⡳⠭⠆⠃⠲⠁⠄"}},"key":"2B4A"},{"category":"Sm","mappings":{"default":{"default":"⠄⡳⠭⠆⠃⠲⠃⠄"}},"key":"2B4B"},{"category":"Sm","mappings":{"default":{"default":"⠄⡳⠭⠆⠃⠲⠉⠄"}},"key":"2B4C"},{"category":"Sm","mappings":{"default":{"default":"⠄⡳⠭⠋⠋⠑⠔⠄"}},"key":"FFE9"},{"category":"Sm","mappings":{"default":{"default":"⠄⡳⠭⠋⠋⠑⠁⠄"}},"key":"FFEA"},{"category":"Sm","mappings":{"default":{"default":"⠄⡳⠭⠋⠋⠑⠃⠄"}},"key":"FFEB"},{"category":"Sm","mappings":{"default":{"default":"⠄⡳⠭⠋⠋⠑⠉⠄"}},"key":"FFEC"}]
PypiClean
/COMPAS-1.17.5.tar.gz/COMPAS-1.17.5/src/compas_blender/artists/lineartist.py
from typing import Any from typing import List from typing import Optional from typing import Union import bpy import compas_blender from compas.artists import PrimitiveArtist from compas.geometry import Line from compas.colors import Color from compas_blender.artists import BlenderArtist class LineArtist(BlenderArtist, PrimitiveArtist): """Artist for drawing lines in Blender. Parameters ---------- line : :class:`~compas.geometry.Line` A COMPAS line. collection : str | :blender:`bpy.types.Collection` The Blender scene collection the object(s) created by this artist belong to. **kwargs : dict, optional Additional keyword arguments. For more info, see :class:`~compas_blender.artists.BlenderArtist` and :class:`~compas.artists.PrimitiveArtist`. Examples -------- Use the Blender artist explicitly. .. code-block:: python from compas.geometry import Line from compas_blender.artists import LineArtist line = Line([0, 0, 0], [1, 1, 1]) artist = LineArtist(line) artist.draw() Or, use the artist through the plugin mechanism. .. code-block:: python from compas.geometry import Line from compas.artists import Artist line = Line([0, 0, 0], [1, 1, 1]) artist = Artist(line) artist.draw() """ def __init__( self, line: Line, collection: Optional[Union[str, bpy.types.Collection]] = None, **kwargs: Any, ): super().__init__(primitive=line, collection=collection or line.name, **kwargs) def draw(self, color: Optional[Color] = None, show_points: bool = False) -> List[bpy.types.Object]: """Draw the line. Parameters ---------- color : tuple[int, int, int] | tuple[float, float, float] | :class:`~compas.colors.Color`, optional The RGB color of the box. The default color is :attr:`compas.artists.PrimitiveArtist.color`. show_points : bool, optional If True, show the start and end point in addition to the line. Returns ------- list[:blender:`bpy.types.Object`] """ color = Color.coerce(color) or self.color start = self.primitive.start end = self.primitive.end objects = [] if show_points: points = [ { "pos": start, "name": f"{self.primitive.name}.start", "color": color, "radius": 0.01, }, { "pos": end, "name": f"{self.primitive.name}.end", "color": color, "radius": 0.01, }, ] objects += compas_blender.draw_points(points, collection=self.collection) lines = [ { "start": start, "end": end, "color": color, "name": f"{self.primitive.name}", }, ] objects += compas_blender.draw_lines(lines, collection=self.collection) return objects
PypiClean
/AyiinXd-0.0.8-cp311-cp311-macosx_10_9_universal2.whl/fipper/node_modules/https-proxy-agent/dist/agent.js
"use strict"; var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) { function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); } return new (P || (P = Promise))(function (resolve, reject) { function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } } function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } } function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); } step((generator = generator.apply(thisArg, _arguments || [])).next()); }); }; var __importDefault = (this && this.__importDefault) || function (mod) { return (mod && mod.__esModule) ? mod : { "default": mod }; }; Object.defineProperty(exports, "__esModule", { value: true }); const net_1 = __importDefault(require("net")); const tls_1 = __importDefault(require("tls")); const url_1 = __importDefault(require("url")); const assert_1 = __importDefault(require("assert")); const debug_1 = __importDefault(require("debug")); const agent_base_1 = require("agent-base"); const parse_proxy_response_1 = __importDefault(require("./parse-proxy-response")); const debug = debug_1.default('https-proxy-agent:agent'); /** * The `HttpsProxyAgent` implements an HTTP Agent subclass that connects to * the specified "HTTP(s) proxy server" in order to proxy HTTPS requests. * * Outgoing HTTP requests are first tunneled through the proxy server using the * `CONNECT` HTTP request method to establish a connection to the proxy server, * and then the proxy server connects to the destination target and issues the * HTTP request from the proxy server. * * `https:` requests have their socket connection upgraded to TLS once * the connection to the proxy server has been established. * * @api public */ class HttpsProxyAgent extends agent_base_1.Agent { constructor(_opts) { let opts; if (typeof _opts === 'string') { opts = url_1.default.parse(_opts); } else { opts = _opts; } if (!opts) { throw new Error('an HTTP(S) proxy server `host` and `port` must be specified!'); } debug('creating new HttpsProxyAgent instance: %o', opts); super(opts); const proxy = Object.assign({}, opts); // If `true`, then connect to the proxy server over TLS. // Defaults to `false`. this.secureProxy = opts.secureProxy || isHTTPS(proxy.protocol); // Prefer `hostname` over `host`, and set the `port` if needed. proxy.host = proxy.hostname || proxy.host; if (typeof proxy.port === 'string') { proxy.port = parseInt(proxy.port, 10); } if (!proxy.port && proxy.host) { proxy.port = this.secureProxy ? 443 : 80; } // ALPN is supported by Node.js >= v5. // attempt to negotiate http/1.1 for proxy servers that support http/2 if (this.secureProxy && !('ALPNProtocols' in proxy)) { proxy.ALPNProtocols = ['http 1.1']; } if (proxy.host && proxy.path) { // If both a `host` and `path` are specified then it's most likely // the result of a `url.parse()` call... we need to remove the // `path` portion so that `net.connect()` doesn't attempt to open // that as a Unix socket file. delete proxy.path; delete proxy.pathname; } this.proxy = proxy; } /** * Called when the node-core HTTP client library is creating a * new HTTP request. * * @api protected */ callback(req, opts) { return __awaiter(this, void 0, void 0, function* () { const { proxy, secureProxy } = this; // Create a socket connection to the proxy server. let socket; if (secureProxy) { debug('Creating `tls.Socket`: %o', proxy); socket = tls_1.default.connect(proxy); } else { debug('Creating `net.Socket`: %o', proxy); socket = net_1.default.connect(proxy); } const headers = Object.assign({}, proxy.headers); const hostname = `${opts.host}:${opts.port}`; let payload = `CONNECT ${hostname} HTTP/1.1\r\n`; // Inject the `Proxy-Authorization` header if necessary. if (proxy.auth) { headers['Proxy-Authorization'] = `Basic ${Buffer.from(proxy.auth).toString('base64')}`; } // The `Host` header should only include the port // number when it is not the default port. let { host, port, secureEndpoint } = opts; if (!isDefaultPort(port, secureEndpoint)) { host += `:${port}`; } headers.Host = host; headers.Connection = 'close'; for (const name of Object.keys(headers)) { payload += `${name}: ${headers[name]}\r\n`; } const proxyResponsePromise = parse_proxy_response_1.default(socket); socket.write(`${payload}\r\n`); const { statusCode, buffered } = yield proxyResponsePromise; if (statusCode === 200) { req.once('socket', resume); if (opts.secureEndpoint) { // The proxy is connecting to a TLS server, so upgrade // this socket connection to a TLS connection. debug('Upgrading socket connection to TLS'); const servername = opts.servername || opts.host; return tls_1.default.connect(Object.assign(Object.assign({}, omit(opts, 'host', 'hostname', 'path', 'port')), { socket, servername })); } return socket; } // Some other status code that's not 200... need to re-play the HTTP // header "data" events onto the socket once the HTTP machinery is // attached so that the node core `http` can parse and handle the // error status code. // Close the original socket, and a new "fake" socket is returned // instead, so that the proxy doesn't get the HTTP request // written to it (which may contain `Authorization` headers or other // sensitive data). // // See: https://hackerone.com/reports/541502 socket.destroy(); const fakeSocket = new net_1.default.Socket({ writable: false }); fakeSocket.readable = true; // Need to wait for the "socket" event to re-play the "data" events. req.once('socket', (s) => { debug('replaying proxy buffer for failed request'); assert_1.default(s.listenerCount('data') > 0); // Replay the "buffered" Buffer onto the fake `socket`, since at // this point the HTTP module machinery has been hooked up for // the user. s.push(buffered); s.push(null); }); return fakeSocket; }); } } exports.default = HttpsProxyAgent; function resume(socket) { socket.resume(); } function isDefaultPort(port, secure) { return Boolean((!secure && port === 80) || (secure && port === 443)); } function isHTTPS(protocol) { return typeof protocol === 'string' ? /^https:?$/i.test(protocol) : false; } function omit(obj, ...keys) { const ret = {}; let key; for (key in obj) { if (!keys.includes(key)) { ret[key] = obj[key]; } } return ret; } //# sourceMappingURL=agent.js.map
PypiClean
/DjangoDjangoAppCenter-0.0.11-py3-none-any.whl/DjangoAppCenter/simpleui/static/admin/simpleui-x/elementui/form.js
module.exports = /******/ (function (modules) { // webpackBootstrap /******/ // The module cache /******/ var installedModules = {}; /******/ /******/ // The require function /******/ function __webpack_require__(moduleId) { /******/ /******/ // Check if module is in cache /******/ if (installedModules[moduleId]) { /******/ return installedModules[moduleId].exports; /******/ } /******/ // Create a new module (and put it into the cache) /******/ var module = installedModules[moduleId] = { /******/ i: moduleId, /******/ l: false, /******/ exports: {} /******/ }; /******/ /******/ // Execute the module function /******/ modules[moduleId].call(module.exports, module, module.exports, __webpack_require__); /******/ /******/ // Flag the module as loaded /******/ module.l = true; /******/ /******/ // Return the exports of the module /******/ return module.exports; /******/ } /******/ /******/ /******/ // expose the modules object (__webpack_modules__) /******/ __webpack_require__.m = modules; /******/ /******/ // expose the module cache /******/ __webpack_require__.c = installedModules; /******/ /******/ // define getter function for harmony exports /******/ __webpack_require__.d = function (exports, name, getter) { /******/ if (!__webpack_require__.o(exports, name)) { /******/ Object.defineProperty(exports, name, {enumerable: true, get: getter}); /******/ } /******/ }; /******/ /******/ // define __esModule on exports /******/ __webpack_require__.r = function (exports) { /******/ if (typeof Symbol !== 'undefined' && Symbol.toStringTag) { /******/ Object.defineProperty(exports, Symbol.toStringTag, {value: 'Module'}); /******/ } /******/ Object.defineProperty(exports, '__esModule', {value: true}); /******/ }; /******/ /******/ // create a fake namespace object /******/ // mode & 1: value is a module id, require it /******/ // mode & 2: merge all properties of value into the ns /******/ // mode & 4: return value when already ns object /******/ // mode & 8|1: behave like require /******/ __webpack_require__.t = function (value, mode) { /******/ if (mode & 1) value = __webpack_require__(value); /******/ if (mode & 8) return value; /******/ if ((mode & 4) && typeof value === 'object' && value && value.__esModule) return value; /******/ var ns = Object.create(null); /******/ __webpack_require__.r(ns); /******/ Object.defineProperty(ns, 'default', {enumerable: true, value: value}); /******/ if (mode & 2 && typeof value != 'string') for (var key in value) __webpack_require__.d(ns, key, function (key) { return value[key]; }.bind(null, key)); /******/ return ns; /******/ }; /******/ /******/ // getDefaultExport function for compatibility with non-harmony modules /******/ __webpack_require__.n = function (module) { /******/ var getter = module && module.__esModule ? /******/ function getDefault() { return module['default']; } : /******/ function getModuleExports() { return module; }; /******/ __webpack_require__.d(getter, 'a', getter); /******/ return getter; /******/ }; /******/ /******/ // Object.prototype.hasOwnProperty.call /******/ __webpack_require__.o = function (object, property) { return Object.prototype.hasOwnProperty.call(object, property); }; /******/ /******/ // __webpack_public_path__ /******/ __webpack_require__.p = "/dist/"; /******/ /******/ /******/ // Load entry module and return exports /******/ return __webpack_require__(__webpack_require__.s = 95); /******/ }) /************************************************************************/ /******/({ /***/ 0: /***/ (function (module, __webpack_exports__, __webpack_require__) { "use strict"; /* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "a", function () { return normalizeComponent; }); /* globals __VUE_SSR_CONTEXT__ */ // IMPORTANT: Do NOT use ES2015 features in this file (except for modules). // This module is a runtime utility for cleaner component module output and will // be included in the final webpack user bundle. function normalizeComponent( scriptExports, render, staticRenderFns, functionalTemplate, injectStyles, scopeId, moduleIdentifier, /* server only */ shadowMode /* vue-cli only */ ) { // Vue.extend constructor export interop var options = typeof scriptExports === 'function' ? scriptExports.options : scriptExports // render functions if (render) { options.render = render options.staticRenderFns = staticRenderFns options._compiled = true } // functional template if (functionalTemplate) { options.functional = true } // scopedId if (scopeId) { options._scopeId = 'data-v-' + scopeId } var hook if (moduleIdentifier) { // server build hook = function (context) { // 2.3 injection context = context || // cached call (this.$vnode && this.$vnode.ssrContext) || // stateful (this.parent && this.parent.$vnode && this.parent.$vnode.ssrContext) // functional // 2.2 with runInNewContext: true if (!context && typeof __VUE_SSR_CONTEXT__ !== 'undefined') { context = __VUE_SSR_CONTEXT__ } // inject component styles if (injectStyles) { injectStyles.call(this, context) } // register component module identifier for async chunk inferrence if (context && context._registeredComponents) { context._registeredComponents.add(moduleIdentifier) } } // used by ssr in case component is cached and beforeCreate // never gets called options._ssrRegister = hook } else if (injectStyles) { hook = shadowMode ? function () { injectStyles.call(this, this.$root.$options.shadowRoot) } : injectStyles } if (hook) { if (options.functional) { // for template-only hot-reload because in that case the render fn doesn't // go through the normalizer options._injectStyles = hook // register for functioal component in vue file var originalRender = options.render options.render = function renderWithStyleInjection(h, context) { hook.call(context) return originalRender(h, context) } } else { // inject component registration as beforeCreate hook var existing = options.beforeCreate options.beforeCreate = existing ? [].concat(existing, hook) : [hook] } } return { exports: scriptExports, options: options } } /***/ }), /***/ 9: /***/ (function (module, exports) { module.exports = require("element-ui/lib/utils/merge"); /***/ }), /***/ 95: /***/ (function (module, __webpack_exports__, __webpack_require__) { "use strict"; __webpack_require__.r(__webpack_exports__); // CONCATENATED MODULE: ./node_modules/_vue-loader@15.7.1@vue-loader/lib/loaders/templateLoader.js??vue-loader-options!./node_modules/_vue-loader@15.7.1@vue-loader/lib??vue-loader-options!./packages/form/src/form.vue?vue&type=template&id=a1b5ff34& var render = function () { var _vm = this var _h = _vm.$createElement var _c = _vm._self._c || _h return _c( "form", { staticClass: "el-form", class: [ _vm.labelPosition ? "el-form--label-" + _vm.labelPosition : "", {"el-form--inline": _vm.inline} ] }, [_vm._t("default")], 2 ) } var staticRenderFns = [] render._withStripped = true // CONCATENATED MODULE: ./packages/form/src/form.vue?vue&type=template&id=a1b5ff34& // EXTERNAL MODULE: external "element-ui/lib/utils/merge" var merge_ = __webpack_require__(9); var merge_default = /*#__PURE__*/__webpack_require__.n(merge_); // CONCATENATED MODULE: ./node_modules/_babel-loader@7.1.5@babel-loader/lib!./node_modules/_vue-loader@15.7.1@vue-loader/lib??vue-loader-options!./packages/form/src/form.vue?vue&type=script&lang=js& // // // // // // // // /* harmony default export */ var formvue_type_script_lang_js_ = ({ name: 'ElForm', componentName: 'ElForm', provide: function provide() { return { elForm: this }; }, props: { model: Object, rules: Object, labelPosition: String, labelWidth: String, labelSuffix: { type: String, default: '' }, inline: Boolean, inlineMessage: Boolean, statusIcon: Boolean, showMessage: { type: Boolean, default: true }, size: String, disabled: Boolean, validateOnRuleChange: { type: Boolean, default: true }, hideRequiredAsterisk: { type: Boolean, default: false } }, watch: { rules: function rules() { // remove then add event listeners on form-item after form rules change this.fields.forEach(function (field) { field.removeValidateEvents(); field.addValidateEvents(); }); if (this.validateOnRuleChange) { this.validate(function () { }); } } }, computed: { autoLabelWidth: function autoLabelWidth() { if (!this.potentialLabelWidthArr.length) return 0; var max = Math.max.apply(Math, this.potentialLabelWidthArr); return max ? max + 'px' : ''; } }, data: function data() { return { fields: [], potentialLabelWidthArr: [] // use this array to calculate auto width }; }, created: function created() { var _this = this; this.$on('el.form.addField', function (field) { if (field) { _this.fields.push(field); } }); /* istanbul ignore next */ this.$on('el.form.removeField', function (field) { if (field.prop) { _this.fields.splice(_this.fields.indexOf(field), 1); } }); }, methods: { resetFields: function resetFields() { if (!this.model) { console.warn('[Element Warn][Form]model is required for resetFields to work.'); return; } this.fields.forEach(function (field) { field.resetField(); }); }, clearValidate: function clearValidate() { var props = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : []; var fields = props.length ? typeof props === 'string' ? this.fields.filter(function (field) { return props === field.prop; }) : this.fields.filter(function (field) { return props.indexOf(field.prop) > -1; }) : this.fields; fields.forEach(function (field) { field.clearValidate(); }); }, validate: function validate(callback) { var _this2 = this; if (!this.model) { console.warn('[Element Warn][Form]model is required for validate to work!'); return; } var promise = void 0; // if no callback, return promise if (typeof callback !== 'function' && window.Promise) { promise = new window.Promise(function (resolve, reject) { callback = function callback(valid) { valid ? resolve(valid) : reject(valid); }; }); } var valid = true; var count = 0; // 如果需要验证的fields为空,调用验证时立刻返回callback if (this.fields.length === 0 && callback) { callback(true); } var invalidFields = {}; this.fields.forEach(function (field) { field.validate('', function (message, field) { if (message) { valid = false; } invalidFields = merge_default()({}, invalidFields, field); if (typeof callback === 'function' && ++count === _this2.fields.length) { callback(valid, invalidFields); } }); }); if (promise) { return promise; } }, validateField: function validateField(props, cb) { props = [].concat(props); var fields = this.fields.filter(function (field) { return props.indexOf(field.prop) !== -1; }); if (!fields.length) { console.warn('[Element Warn]please pass correct props!'); return; } fields.forEach(function (field) { field.validate('', cb); }); }, getLabelWidthIndex: function getLabelWidthIndex(width) { var index = this.potentialLabelWidthArr.indexOf(width); // it's impossible if (index === -1) { throw new Error('[ElementForm]unpected width ', width); } return index; }, registerLabelWidth: function registerLabelWidth(val, oldVal) { if (val && oldVal) { var index = this.getLabelWidthIndex(oldVal); this.potentialLabelWidthArr.splice(index, 1, val); } else if (val) { this.potentialLabelWidthArr.push(val); } }, deregisterLabelWidth: function deregisterLabelWidth(val) { var index = this.getLabelWidthIndex(val); this.potentialLabelWidthArr.splice(index, 1); } } }); // CONCATENATED MODULE: ./packages/form/src/form.vue?vue&type=script&lang=js& /* harmony default export */ var src_formvue_type_script_lang_js_ = (formvue_type_script_lang_js_); // EXTERNAL MODULE: ./node_modules/_vue-loader@15.7.1@vue-loader/lib/runtime/componentNormalizer.js var componentNormalizer = __webpack_require__(0); // CONCATENATED MODULE: ./packages/form/src/form.vue /* normalize component */ var component = Object(componentNormalizer["a" /* default */])( src_formvue_type_script_lang_js_, render, staticRenderFns, false, null, null, null ) /* hot reload */ if (false) { var api; } component.options.__file = "packages/form/src/form.vue" /* harmony default export */ var src_form = (component.exports); // CONCATENATED MODULE: ./packages/form/index.js /* istanbul ignore next */ src_form.install = function (Vue) { Vue.component(src_form.name, src_form); }; /* harmony default export */ var packages_form = __webpack_exports__["default"] = (src_form); /***/ }) /******/ });
PypiClean
/GQCMS-0.0.4-py3-none-any.whl/build/lib/build/lib/build/lib/build/lib/build/lib/build/lib/build/lib/build/lib/gqcms/matrices/Hamiltonian.py
import numpy as np from typing import List from gqcms import Hubbard from gqcms.matrices import Determinant def createHamiltonian(H_core: np.ndarray, I: np.ndarray, determinants: list) -> np.ndarray: """ Create the hamiltonian matrix from a list of determinants :param H_core: core Hamiltonian i.e one electron integrals :param I: two electron integrals :param determinants: list of Determinants objects """ H = np.zeros((len(determinants), len(determinants))) # Make use of the fact that H is hermitian and calculate only the # upper traingle for i, det_i in enumerate(determinants): for j, det_j in enumerate(determinants[i:], start=i): # Compute how many orbitals are different num_diff_orbitals = det_i.num_different_orbitals(det_j) output = 0 if num_diff_orbitals == 0: # Get all orbitals in the ONV orbital_list = det_i.get_spin_orbitals() # One electron integrals for u in orbital_list: output += H_core[u, u] # Two electron integrals for k, p in enumerate(orbital_list): for q in orbital_list[k+1:]: output += I[p, q, p, q] elif num_diff_orbitals == 1: # Get different orbitals and sign diff_spin_orb_i, diff_spin_orb_j, sign = det_i.get_different_orbitals(det_j) # print(f"i: {i}\t{diff_spin_orb_i}\t j: {j}\t{diff_spin_orb_j}") # One electron term output += H_core[diff_spin_orb_i[0], diff_spin_orb_j[0]] # Two electron terms for p in det_i.get_spin_orbitals(): if p != diff_spin_orb_i[0]: output += I[p, diff_spin_orb_i[0], p, diff_spin_orb_j[0]] output *= sign elif num_diff_orbitals == 2: # Get different orbitals and sign diff_spin_orb_i, diff_spin_orb_j, sign = det_i.get_different_orbitals(det_j) output += sign * \ I[diff_spin_orb_i[0], diff_spin_orb_i[1], diff_spin_orb_j[0], diff_spin_orb_j[1]] # H is hermitian H[i, j] = output H[j, i] = H[i, j] return H def createHamiltonianSCI(molecule: Hubbard, result_HF, excitations: List[int] = None, basis=None, return_extra=False) -> np.ndarray: """ Create the selected configration interaction (SCI) Hamiltonian :param molecule: information of the Hubbard system :param excitations: list of the selected excitations :param result_HF: result of an HF calculation :param return_extra: return the spin block coefficient matrix and CI basis or not (default is False) """ # Create one electron intergral matrix H_core_ao = -molecule.t*molecule.adj_mat + np.diag([molecule.potential.get(site, 0) for site in range(molecule.sites)]) # # Transform H_core to HF-MO basis # H_core_mo_a = np.einsum('uj,vi,uv', result_HF.C_a, result_HF.C_a, H_core_ao) # H_core_mo_b = np.einsum('uj,vi,uv', result_HF.C_b, result_HF.C_b, H_core_ao) # # Spin block H_core_mo # H_core_mo = np.zeros((2*molecule.sites, 2*molecule.sites)) # H_core_mo[::2, ::2] = H_core_mo_a # H_core_mo[1::2, 1::2] = H_core_mo_b # Create spin block coefficient matrix and sort C = np.block([ [result_HF.C_a, np.zeros_like(result_HF.C_b)], [np.zeros_like(result_HF.C_a), result_HF.C_b] ]) # Spin block H_core in AO basis H_core_ao_spin_block = np.block([ [H_core_ao, np.zeros_like(H_core_ao)], [np.zeros_like(H_core_ao), H_core_ao] ]) # Transform H_core from AO to HF-MO basis H_core_mo = C.T @ H_core_ao_spin_block @ C # Sort C and H_core_mo to align with the electron repulsion tenor indices sort_indices = np.asarray([p for pair in zip(range(0, molecule.sites), range(molecule.sites, 2*molecule.sites)) for p in pair]) C = C[:, sort_indices] H_core_mo = H_core_mo[:, sort_indices] H_core_mo = H_core_mo[sort_indices, :] # Create electron repulsion integrals (eri) tensor eri_ao = np.zeros((molecule.sites, molecule.sites, molecule.sites, molecule.sites)) for site in range(molecule.sites): eri_ao[site, site, site, site] = molecule.U # Spin block eri I = np.eye(2) eri_spin_block_ao = np.kron(I, eri_ao) eri_spin_block_ao = np.kron(I, eri_spin_block_ao.T) # Convert to physicist's notation and antisymmetrize eri_spin_block_ao_phys = eri_spin_block_ao.transpose(0, 2, 1, 3) gao = eri_spin_block_ao_phys - eri_spin_block_ao_phys.transpose(0, 1, 3, 2) # Transform gao from AO to MO basis temp = np.einsum('pi,pqrs->iqrs', C, gao) temp = np.einsum('qj,iqrs->ijrs', C, temp) temp = np.einsum('ijrs,rk->ijks', temp, C) eri_mo = np.einsum('ijks,sl->ijkl', temp, C) if excitations is not None: # Generate requested excitations det_ref = Determinant(nalpha=molecule.nalpha, nbeta=molecule.nbeta, sites=molecule.sites) basis = [det_ref] for excitation in excitations: basis.extend(det_ref.n_tuply_excitations(excitation, molecule.sites)) # Check if a basis is given, else return error elif basis is None: raise ValueError("A list of excitations or a list of determinants should be given.") # Create Hamiltonian in ONV basis H_onv = createHamiltonian(H_core_mo, eri_mo, basis) # Return basis if asked if return_extra: return H_onv, basis else: return H_onv
PypiClean
/JATA_Tools-0.1.9-py3-none-any.whl/UpdatedPixPlot/UpdatedPixPlot.py
from __future__ import division import warnings; warnings.filterwarnings('ignore') from tensorflow.keras.preprocessing.image import save_img, img_to_array, array_to_img from os.path import join, exists, dirname, realpath from tensorflow.keras.applications.inception_v3 import preprocess_input from tensorflow.keras.applications import InceptionV3 from sklearn.metrics import pairwise_distances_argmin_min try: from keras.backend.tensorflow_backend import set_session except BaseExceptions as e: print("Keras Tensorflor Backend Failled With Error: ", e) print("Using most recent tensorflow keras backend...") from tensorflow.compat.v1.keras.backend import set_session print("Success! :)") from dateutil.parser import parse as parse_date from sklearn.preprocessing import minmax_scale from keras_preprocessing.image import load_img from pointgrid import align_points_to_grid from scipy.spatial.distance import cdist from distutils.dir_util import copy_tree from sklearn.decomposition import PCA from scipy.spatial import ConvexHull from iiif_downloader import Manifest from collections import defaultdict from rasterfairy import coonswarp import matplotlib.pyplot as plt from tensorflow.keras.models import Model from scipy.stats import kde from hdbscan import HDBSCAN import tensorflow as tf # import tensorflow as tf tf.python.control_flow_ops = tf from umap import UMAP import multiprocessing import pkg_resources import rasterfairy import numpy as np import datetime import operator import argparse import random import shutil import glob2 import uuid import math import gzip import json import lap import sys import csv import os from PIL import ImageFile ImageFile.LOAD_TRUNCATED_IMAGES = True try: from MulticoreTSNE import MulticoreTSNE as TSNE except: from sklearn.manifold import TSNE try: from urllib.parse import unquote # python 3 except: from urllib import unquote # python 2 # handle dynamic GPU memory allocation tf_config = tf.compat.v1.ConfigProto() tf_config.gpu_options.allow_growth = True tf_config.log_device_placement = True sess = tf.compat.v1.Session(config=tf_config) ''' NB: Keras Image class objects return image.size as w,h Numpy array representations of images return image.shape as h,w,c ''' config = { 'images': None, 'metadata': None, 'out_dir': 'output', 'max_images': None, 'use_cache': True, 'encoding': 'utf8', 'min_cluster_size': 20, 'atlas_size': 2048, 'cell_size': 32, 'lod_cell_height': 128, 'n_neighbors': 6, 'min_distance': 0.001, 'metric': 'correlation', 'pointgrid_fill': 0.05, 'square_cells': False, 'gzip': False, 'plot_id': str(uuid.uuid1()), 'seed': 24, } ## # Entry ## def process_images(**kwargs): '''Main method for processing user images and metadata''' np.random.seed(kwargs['seed']) tf.compat.v1.set_random_seed(kwargs['seed']) copy_web_assets(**kwargs) kwargs['out_dir'] = join(kwargs['out_dir'], 'data') kwargs['image_paths'], kwargs['metadata'] = filter_images(**kwargs) kwargs['atlas_dir'] = get_atlas_data(**kwargs) get_manifest(**kwargs) write_images(**kwargs) print(' * done!') def copy_web_assets(**kwargs): '''Copy the /web directory from the pixplot source to the users cwd''' src = join(dirname(realpath(__file__)), 'web') dest = join(os.getcwd(), kwargs['out_dir']) copy_tree(src, dest) # write version numbers into output for i in ['index.html', os.path.join('assets', 'js', 'tsne.js')]: path = os.path.join(dest, i) with open(path, 'r') as f: f = f.read().replace('VERSION_NUMBER', get_version()) with open(path, 'w') as out: out.write(f) if kwargs['copy_web_only']: sys.exit() ## # Images ## def filter_images(**kwargs): '''Main method for filtering images given user metadata (if provided)''' image_paths = [] for i in stream_images(image_paths=get_image_paths(**kwargs)): # get image height and width w, h = i.original.size # remove images with 0 height or width when resized to lod height if (h == 0) or (w == 0): print(' * skipping {} because it contains 0 height or width'.format(i.path)) continue # remove images that have 0 height or width when resized try: resized = i.resize_to_max(kwargs['lod_cell_height']) except ValueError: print(' * skipping {} because it contains 0 height or width when resized'.format(i.path)) continue # remove images that are too wide for the atlas if (w / h) > (kwargs['atlas_size'] / kwargs['cell_size']): print(' * skipping {} because its dimensions are oblong'.format(i.path)) continue image_paths.append(i.path) # handle the case user provided no metadata if not kwargs.get('metadata', False): return [ limit_image_count(image_paths, **kwargs), [], ] # handle user metadata: retain only records with image and metadata l = get_metadata_list(**kwargs) meta_bn = set([clean_filename(i.get('filename', '')) for i in l]) img_bn = set([clean_filename(i) for i in image_paths]) # identify images with metadata and those without metadata meta_present = img_bn.intersection(meta_bn) meta_missing = list(img_bn - meta_bn) # notify the user of images that are missing metadata if meta_missing: print(' ! Some images are missing metadata:\n -', '\n - '.join(meta_missing[:10])) if len(meta_missing) > 10: print(' ...', len(meta_missing) - 10, 'more') with open('missing-metadata.txt', 'w') as out: out.write('\n'.join(meta_missing)) # get the sorted lists of images and metadata d = {clean_filename(i['filename']): i for i in l} images = [] metadata = [] for i in image_paths: if clean_filename(i) in meta_present: images.append(i) metadata.append(d[clean_filename(i)]) kwargs['metadata'] = metadata write_metadata(**kwargs) return [ limit_image_count(images, **kwargs), limit_image_count(metadata, **kwargs), ] def limit_image_count(arr, **kwargs): '''If the user passed a max_images value, return [:max_images] from arr''' if kwargs.get('max_images', False): return arr[:kwargs['max_images']] return arr def get_image_paths(**kwargs): '''Called once to provide a list of image paths--handles IIIF manifest input''' # handle case where --images points to iiif manifest image_paths = None if not kwargs['images']: print('\nError: please provide an images argument, e.g.:') print('pixplot --images "cat_pictures/*.jpg"\n') sys.exit() # handle list of IIIF image inputs if os.path.exists(kwargs['images']): with open(kwargs['images']) as f: f = [i.strip() for i in f.read().split('\n') if i.strip()] if [i.startswith('http') for i in f]: for i in f: Manifest(url=i).save_images(limit=1) image_paths = sorted(glob2.glob(os.path.join('iiif-downloads', 'images', '*'))) # handle case where images flag points to a glob of images if not image_paths: image_paths = sorted(glob2.glob(kwargs['images'])) # handle case user provided no images if not image_paths: print('\nError: No input images were found. Please check your --images glob\n') sys.exit() # optional shuffle that mutates image_paths if kwargs['shuffle']: print(' * shuffling input images') random.shuffle(image_paths) return image_paths def stream_images(**kwargs): '''Read in all images from args[0], a list of image paths''' for idx, i in enumerate(kwargs['image_paths']): try: metadata = None if kwargs.get('metadata', False) and kwargs['metadata'][idx]: metadata = kwargs['metadata'][idx] yield Image(i, metadata=metadata) except Exception as exc: print(' * image', i, 'could not be processed --', exc) def clean_filename(s): '''Given a string that points to a filename, return a clean filename''' return unquote(os.path.basename(s)) ## # Metadata ## def get_metadata_list(**kwargs): '''Return a list of objects with image metadata''' if not kwargs['metadata']: return [] # handle csv metadata l = [] if kwargs['metadata'].endswith('.csv'): with open(kwargs['metadata']) as f: reader = csv.reader(f) headers = [i.lower() for i in next(reader)] for i in reader: l.append({headers[j]: i[j] if i[j] else '' for j, _ in enumerate(headers)}) # handle json metadata else: for i in glob2.glob(kwargs['metadata']): with open(i) as f: l.append(json.load(f)) return l def write_metadata(metadata, **kwargs): '''Write list `metadata` of objects to disk''' if not metadata: return out_dir = join(kwargs['out_dir'], 'metadata') for i in ['filters', 'options', 'file']: out_path = join(out_dir, i) if not exists(out_path): os.makedirs(out_path) # create the lists of images with each tag d = defaultdict(list) for i in metadata: filename = clean_filename(i['filename']) i['tags'] = [j.strip() for j in i.get('tags', '').split('|')] for j in i['tags']: d['__'.join(j.split())].append(filename) write_json(os.path.join(out_dir, 'file', filename + '.json'), i, **kwargs) write_json(os.path.join(out_dir, 'filters', 'filters.json'), [{ 'filter_name': 'select', 'filter_values': list(d.keys()), }], **kwargs) # create the options for the category dropdown for i in d: write_json(os.path.join(out_dir, 'options', i + '.json'), d[i], **kwargs) # create the map from date to images with that date (if dates present) date_d = defaultdict(list) for i in metadata: date = i.get('year', '') if date: date_d[date].append(clean_filename(i['filename'])) # find the min and max dates to show on the date slider dates = np.array([int(i.strip()) for i in date_d if is_number(i)]) domain = {'min': float('inf'), 'max': -float('inf')} mean = np.mean(dates) std = np.std(dates) for i in dates: # update the date domain with all non-outlier dates if abs(mean - i) < (std * 4): domain['min'] = int(min(i, domain['min'])) domain['max'] = int(max(i, domain['max'])) # write the dates json if len(date_d) > 1: write_json(os.path.join(out_dir, 'dates.json'), { 'domain': domain, 'dates': date_d, }, **kwargs) def is_number(s): '''Return a boolean indicating if a string is a number''' try: int(s) return True except: return False ## # Main ## def get_manifest(**kwargs): '''Create and return the base object for the manifest output file''' # load the atlas data atlas_data = json.load(open(join(kwargs['atlas_dir'], 'atlas_positions.json'))) # store each cell's size and atlas position atlas_ids = set([i['idx'] for i in atlas_data]) sizes = [[] for _ in atlas_ids] pos = [[] for _ in atlas_ids] for idx, i in enumerate(atlas_data): sizes[i['idx']].append([i['w'], i['h']]) pos[i['idx']].append([i['x'], i['y']]) # obtain the paths to each layout's JSON positions layouts = get_layouts(**kwargs) # create a heightmap for the umap layout if 'umap' in layouts and layouts['umap']: get_heightmap(layouts['umap']['layout'], 'umap', **kwargs) # specify point size scalars point_sizes = {} point_sizes['min'] = 0 point_sizes['grid'] = 1 / math.ceil(len(kwargs['image_paths']) ** (1 / 2)) point_sizes['max'] = point_sizes['grid'] * 1.2 point_sizes['scatter'] = point_sizes['grid'] * .2 point_sizes['initial'] = point_sizes['scatter'] # fetch the date distribution data for point sizing if 'date' in layouts and layouts['date']: date_layout = read_json(layouts['date']['labels'], **kwargs) point_sizes['date'] = 1 / ((date_layout['cols'] + 1) * len(date_layout['labels'])) # create manifest json manifest = { 'version': get_version(), 'plot_id': kwargs['plot_id'], 'layouts': layouts, 'initial_layout': 'umap', 'point_sizes': point_sizes, 'imagelist': get_path('imagelists', 'imagelist', **kwargs), 'atlas_dir': kwargs['atlas_dir'], 'metadata': True if kwargs['metadata'] else False, 'default_hotspots': get_hotspots(vecs=read_json(layouts['umap']['layout'], **kwargs), **kwargs), 'custom_hotspots': get_path('hotspots', 'user_hotspots', add_hash=False, **kwargs), 'config': { 'sizes': { 'atlas': kwargs['atlas_size'], 'cell': kwargs['cell_size'], 'lod': kwargs['lod_cell_height'], }, }, 'creation_date': datetime.datetime.today().strftime('%d-%B-%Y-%H:%M:%S'), } path = get_path('manifests', 'manifest', **kwargs) write_json(path, manifest, **kwargs) path = get_path(None, 'manifest', add_hash=False, **kwargs) write_json(path, manifest, **kwargs) # create images json imagelist = { 'cell_sizes': sizes, 'images': [clean_filename(i) for i in kwargs['image_paths']], 'atlas': { 'count': len(atlas_ids), 'positions': pos, }, } write_json(manifest['imagelist'], imagelist) ## # Atlases ## def get_atlas_data(**kwargs): ''' Generate and save to disk all atlases to be used for this visualization If square, center each cell in an nxn square, else use uniform height ''' # if the atlas files already exist, load from cache out_dir = os.path.join(kwargs['out_dir'], 'atlases', kwargs['plot_id']) if os.path.exists(out_dir) and kwargs['use_cache'] and not kwargs.get('shuffle', False): print(' * loading saved atlas data') return out_dir if not os.path.exists(out_dir): os.makedirs(out_dir) # else create the atlas images and store the positions of cells in atlases print(' * creating atlas files') n = 0 # number of atlases x = 0 # x pos in atlas y = 0 # y pos in atlas positions = [] # l[cell_idx] = atlas data atlas = np.zeros((kwargs['atlas_size'], kwargs['atlas_size'], 3)) for idx, i in enumerate(stream_images(**kwargs)): if kwargs['square_cells']: cell_data = i.resize_to_square(kwargs['cell_size']) else: cell_data = i.resize_to_height(kwargs['cell_size']) _, v, _ = cell_data.shape appendable = False if (x + v) <= kwargs['atlas_size']: appendable = True elif (y + (2 * kwargs['cell_size'])) <= kwargs['atlas_size']: y += kwargs['cell_size'] x = 0 appendable = True if not appendable: save_atlas(atlas, out_dir, n) n += 1 atlas = np.zeros((kwargs['atlas_size'], kwargs['atlas_size'], 3)) x = 0 y = 0 atlas[y:y + kwargs['cell_size'], x:x + v] = cell_data # find the size of the cell in the lod canvas lod_data = i.resize_to_max(kwargs['lod_cell_height']) h, w, _ = lod_data.shape # h,w,colors in lod-cell sized image `i` positions.append({ 'idx': n, # atlas idx 'x': x, # x offset of cell in atlas 'y': y, # y offset of cell in atlas 'w': w, # w of cell at lod size 'h': h, # h of cell at lod size }) x += v save_atlas(atlas, out_dir, n) out_path = os.path.join(out_dir, 'atlas_positions.json') with open(out_path, 'w') as out: json.dump(positions, out) return out_dir def save_atlas(atlas, out_dir, n): '''Save an atlas to disk''' out_path = join(out_dir, 'atlas-{}.jpg'.format(n)) save_img(out_path, atlas) ## # Layouts ## def get_layouts(**kwargs): '''Get the image positions in each projection''' vecs = vectorize_images(**kwargs) umap = get_umap_layout(vecs=vecs, **kwargs) linear_assignment = get_lap_layout(umap=umap, **kwargs) grid = get_grid_layout(**kwargs) umap_jittered = get_pointgrid_layout(umap, 'umap', **kwargs) categorical = get_categorical_layout(**kwargs) date = get_date_layout(**kwargs) layouts = { 'umap': { 'layout': umap, 'jittered': umap_jittered, }, 'alphabetic': { 'layout': grid, }, 'grid': { 'layout': linear_assignment, }, 'categorical': categorical, 'date': date, } return layouts def vectorize_images(**kwargs): '''Create and return vector representation of Image() instances''' print(' * preparing to vectorize {} images'.format(len(kwargs['image_paths']))) vector_dir = os.path.join(kwargs['out_dir'], 'image-vectors') if not os.path.exists(vector_dir): os.makedirs(vector_dir) base = InceptionV3(include_top=True, weights='imagenet', ) model = Model(inputs=base.input, outputs=base.get_layer('avg_pool').output) print(' * creating image array') vecs = [] for idx, i in enumerate(stream_images(**kwargs)): vector_path = os.path.join(vector_dir, os.path.basename(i.path) + '.npy') if os.path.exists(vector_path) and kwargs['use_cache']: vec = np.load(vector_path) else: im = preprocess_input(img_to_array(i.original.resize((299, 299)))) vec = model.predict(np.expand_dims(im, 0)).squeeze() np.save(vector_path, vec) vecs.append(vec) print(' * vectorized {}/{} images'.format(idx + 1, len(kwargs['image_paths']))) return np.array(vecs) def get_umap_layout(**kwargs): '''Get the x,y positions of images passed through a umap projection''' print(' * creating UMAP layout') out_path = get_path('layouts', 'umap', **kwargs) if os.path.exists(out_path) and kwargs['use_cache']: return out_path model = UMAP(n_neighbors=kwargs['n_neighbors'], min_dist=kwargs['min_distance'], metric=kwargs['metric']) # run PCA to reduce dimensionality of image vectors w = PCA(n_components=min(100, len(kwargs['vecs']))).fit_transform(kwargs['vecs']) # fetch categorical labels for images (if provided) y = [] if kwargs.get('metadata', False): labels = [i.get('label', None) for i in kwargs['metadata']] # if the user provided labels, integerize them if any([i for i in labels]): d = defaultdict(lambda: len(d)) for i in labels: if i == None: y.append(-1) else: y.append(d[i]) y = np.array(y) # project the PCA space down to 2d for visualization z = model.fit(w, y=y if np.any(y) else None).embedding_ return write_layout(out_path, z, **kwargs) def get_tsne_layout(**kwargs): '''Get the x,y positions of images passed through a TSNE projection''' print(' * creating TSNE layout with ' + str(multiprocessing.cpu_count()) + ' cores...') out_path = get_path('layouts', 'tsne', **kwargs) if os.path.exists(out_path) and kwargs['use_cache']: return out_path model = TSNE(perplexity=kwargs.get('perplexity', 2), n_jobs=multiprocessing.cpu_count()) z = model.fit_transform(kwargs['vecs']) return write_layout(out_path, z, **kwargs) def get_rasterfairy_layout(**kwargs): '''Get the x, y position of images passed through a rasterfairy projection''' print(' * creating rasterfairy layout') out_path = get_path('layouts', 'rasterfairy', **kwargs) if os.path.exists(out_path) and kwargs['use_cache']: return out_path umap = np.array(read_json(kwargs['umap'], **kwargs)) umap = (umap + 1) / 2 # scale 0:1 try: umap = coonswarp.rectifyCloud(umap, # stretch the distribution perimeterSubdivisionSteps=4, autoPerimeterOffset=False, paddingScale=1.05) except: print(' * coonswarp rectification could not be performed') pos = rasterfairy.transformPointCloud2D(umap)[0] return write_layout(out_path, pos, **kwargs) def get_lap_layout(**kwargs): print(' * creating linear assignment layout') out_path = get_path('layouts', 'assignment', **kwargs) if os.path.exists(out_path) and kwargs['use_cache']: return out_path # load the umap layout umap = np.array(read_json(kwargs['umap'], **kwargs)) umap = (umap + 1) / 2 # scale 0:1 # determine length of each side in square grid side = math.ceil(umap.shape[0] ** (1 / 2)) # create square grid 0:1 in each dimension grid_x, grid_y = np.meshgrid(np.linspace(0, 1, side), np.linspace(0, 1, side)) grid = np.dstack((grid_x, grid_y)).reshape(-1, 2) # compute pairwise distance costs cost = cdist(grid, umap, 'sqeuclidean') # increase cost cost = cost * (10000000. / cost.max()) # run the linear assignment min_cost, row_assignments, col_assignments = lap.lapjv(np.copy(cost), extend_cost=True) # use the assignment vals to determine gridified positions of `arr` pos = grid[col_assignments] return write_layout(out_path, pos, **kwargs) def get_grid_layout(**kwargs): '''Get the x,y positions of images in a grid projection''' print(' * creating grid layout') out_path = get_path('layouts', 'grid', **kwargs) if os.path.exists(out_path) and kwargs['use_cache']: return out_path paths = kwargs['image_paths'] n = math.ceil(len(paths) ** (1 / 2)) l = [] # positions for i, _ in enumerate(paths): x = i % n y = math.floor(i / n) l.append([x, y]) z = np.array(l) return write_layout(out_path, z, **kwargs) def get_pointgrid_layout(path, label, **kwargs): '''Gridify the positions in `path` and return the path to this new layout''' print(' * creating {} pointgrid'.format(label)) out_path = get_path('layouts', label + '-jittered', **kwargs) if os.path.exists(out_path) and kwargs['use_cache']: return out_path arr = np.array(read_json(path, **kwargs)) z = align_points_to_grid(arr, fill=0.025) return write_layout(out_path, z, **kwargs) ## # Date layout ## def get_date_layout(cols=3, bin_units='years', **kwargs): ''' Get the x,y positions of input images based on their dates @param int cols: the number of columns to plot for each bar @param str bin_units: the temporal units to use when creating bins ''' date_vals = [kwargs['metadata'][i].get('year', False) for i in range(len(kwargs['metadata']))] if not kwargs['metadata'] or not any(date_vals): return False # if the data layouts have been cached, return them positions_out_path = get_path('layouts', 'timeline', **kwargs) labels_out_path = get_path('layouts', 'timeline-labels', **kwargs) if os.path.exists(positions_out_path) and \ os.path.exists(labels_out_path) and \ kwargs['use_cache']: return { 'layout': positions_out_path, 'labels': labels_out_path, } # date layout is not cached, so fetch dates and process print(' * creating date layout with {} columns'.format(cols)) datestrings = [i.metadata.get('year', 'no_date') for i in stream_images(**kwargs)] dates = [datestring_to_date(i) for i in datestrings] rounded_dates = [round_date(i, bin_units) for i in dates] # create d[formatted_date] = [indices into datestrings of dates that round to formatted_date] d = defaultdict(list) for idx, i in enumerate(rounded_dates): d[i].append(idx) # determine the number of distinct grid positions in the x and y axes n_coords_x = (cols + 1) * len(d) n_coords_y = 1 + max([len(d[i]) for i in d]) // cols if n_coords_y > n_coords_x: return get_date_layout(cols=int(cols * 2), **kwargs) # create a mesh of grid positions in clip space -1:1 given the time distribution grid_x = (np.arange(0, n_coords_x) / (n_coords_x - 1)) * 2 grid_y = (np.arange(0, n_coords_y) / (n_coords_x - 1)) * 2 # divide each grid axis by half its max length to center at the origin 0,0 grid_x = grid_x - np.max(grid_x) / 2.0 grid_y = grid_y - np.max(grid_y) / 2.0 # make dates increase from left to right by sorting keys of d d_keys = np.array(list(d.keys())) seconds = np.array([date_to_seconds(dates[d[i][0]]) for i in d_keys]) d_keys = d_keys[np.argsort(seconds)] # determine which images will fill which units of the grid established above coords = np.zeros((len(datestrings), 2)) # 2D array with x, y clip-space coords of each date for jdx, j in enumerate(d_keys): for kdx, k in enumerate(d[j]): x = jdx * (cols + 1) + (kdx % cols) y = kdx // cols coords[k] = [grid_x[x], grid_y[y]] # find the positions of labels label_positions = np.array([[grid_x[i * (cols + 1)], grid_y[0]] for i in range(len(d))]) # move the labels down in the y dimension by a grid unit dx = (grid_x[1] - grid_x[0]) # size of a single cell label_positions[:, 1] = label_positions[:, 1] - dx # quantize the label positions and label positions image_positions = round_floats(coords) label_positions = round_floats(label_positions.tolist()) # write and return the paths to the date based layout return { 'layout': write_json(positions_out_path, image_positions, **kwargs), 'labels': write_json(labels_out_path, { 'positions': label_positions, 'labels': d_keys.tolist(), 'cols': cols, }, **kwargs), } def datestring_to_date(datestring): ''' Given a string representing a date return a datetime object ''' try: return parse_date(str(datestring), fuzzy=True, default=datetime.datetime(9999, 1, 1)) except Exception as exc: print(' * could not parse datestring {}'.format(datestring)) return datestring def date_to_seconds(date): ''' Given a datetime object return an integer representation for that datetime ''' if isinstance(date, datetime.datetime): return (date - datetime.datetime.today()).total_seconds() else: return - float('inf') def round_date(date, unit): ''' Return `date` truncated to the temporal unit specified in `units` ''' if not isinstance(date, datetime.datetime): return 'no_date' formatted = date.strftime('%d %B %Y -- %X') if unit in set(['seconds', 'minutes', 'hours']): date = formatted.split('--')[1].strip() if unit == 'seconds': date = date elif unit == 'minutes': date = ':'.join(d.split(':')[:-1]) + ':00' elif unit == 'hours': date = date.split(':')[0] + ':00:00' elif unit in set(['days', 'months', 'years', 'decades', 'centuries']): date = formatted.split('--')[0].strip() if unit == 'days': date = date elif unit == 'months': date = ' '.join(date.split()[1:]) elif unit == 'years': date = date.split()[-1] elif unit == 'decades': date = str(int(date.split()[-1]) // 10) + '0' elif unit == 'centuries': date = str(int(date.split()[-1]) // 100) + '00' return date ## # Metadata layout ## def get_categorical_layout(null_category='Other', margin=2, **kwargs): ''' Return a numpy array with shape (n_points, 2) with the point positions of observations in box regions determined by each point's category metadata attribute (if applicable) ''' if not kwargs.get('metadata', False): return False # determine the out path and return from cache if possible out_path = get_path('layouts', 'categorical', **kwargs) labels_out_path = get_path('layouts', 'categorical-labels', **kwargs) if os.path.exists(out_path): return out_path # accumulate d[category] = [indices of points with category] categories = [i.get('category', None) for i in kwargs['metadata']] if not any(categories): return False d = defaultdict(list) for idx, i in enumerate(categories): d[i].append(idx) # store the number of observations in each group keys_and_counts = [{'key': i, 'count': len(d[i])} for i in d] keys_and_counts.sort(key=operator.itemgetter('count'), reverse=True) # get the box layout then subdivide into discrete points boxes = get_categorical_boxes([i['count'] for i in keys_and_counts], margin=margin) points = get_categorical_points(boxes) # sort the points into the order of the observations in the metadata counts = {i['key']: 0 for i in keys_and_counts} offsets = {i['key']: 0 for i in keys_and_counts} for idx, i in enumerate(keys_and_counts): offsets[i['key']] += sum([j['count'] for j in keys_and_counts[:idx]]) sorted_points = [] for idx, i in enumerate(stream_images(**kwargs)): category = i.metadata.get('category', null_category) sorted_points.append(points[offsets[category] + counts[category]]) counts[category] += 1 sorted_points = np.array(sorted_points) # add to the sorted points the anchors for the text labels for each group text_anchors = np.array([[i.x, i.y - margin / 2] for i in boxes]) # add the anchors to the points - these will be removed after the points are projected sorted_points = np.vstack([sorted_points, text_anchors]) # scale -1:1 using the largest axis as the scaling metric _max = np.max(sorted_points) for i in range(2): _min = np.min(sorted_points[:, i]) sorted_points[:, i] -= _min sorted_points[:, i] /= (_max - _min) sorted_points[:, i] -= np.max(sorted_points[:, i]) / 2 sorted_points[:, i] *= 2 # separate out the sorted points and text positions text_anchors = sorted_points[-len(text_anchors):] sorted_points = sorted_points[:-len(text_anchors)] z = round_floats(sorted_points.tolist()) return { 'layout': write_json(out_path, z, **kwargs), 'labels': write_json(labels_out_path, { 'positions': round_floats(text_anchors.tolist()), 'labels': [i['key'] for i in keys_and_counts], }, **kwargs) } def get_categorical_boxes(group_counts, margin=2): ''' @arg [int] group_counts: counts of the number of images in each distinct level within the metadata's caetgories @kwarg int margin: space between boxes in the 2D layout @returns [Box] an array of Box() objects; one per level in `group_counts` ''' boxes = [] for i in group_counts: x = y = math.ceil(i ** (1 / 2)) boxes.append(Box(i, x, y, None, None)) # find the position along x axis where we want to create a break wrap = sum([i.cells for i in boxes]) ** (1 / 2) # find the valid positions on the y axis y = margin y_spots = [] for idx, i in enumerate(boxes): if (y + i.h) < wrap: y_spots.append(y) y += i.h + margin y_spot_index = 0 for idx, i in enumerate(boxes): # find the y position y = y_spots[y_spot_index] # find members with this y position row_members = [j.x + j.w for j in boxes if j.y == y] # assign the y position i.y = y y_spot_index = (y_spot_index + 1) % len(y_spots) # assign the x position i.x = max(row_members) + margin if row_members else margin return boxes def get_categorical_points(arr, unit_size=None): '''Given an array of Box() objects, return a 2D distribution with shape (n_cells, 2)''' points_arr = [] for i in arr: area = i.w * i.h per_unit = (area / i.cells) ** (1 / 2) x_units = math.ceil(i.w / per_unit) y_units = math.ceil(i.h / per_unit) if not unit_size: unit_size = min(i.w / x_units, i.h / y_units) for j in range(i.cells): x = j % x_units y = j // x_units points_arr.append([ i.x + x * unit_size, i.y + y * unit_size, ]) return np.array(points_arr) class Box: '''Store the width, height, and x, y coords of a box''' def __init__(self, *args): self.cells = args[0] self.w = args[1] self.h = args[2] self.x = None if len(args) < 4 else args[3] self.y = None if len(args) < 5 else args[4] ## # Helpers ## def get_path(*args, **kwargs): '''Return the path to a JSON file with conditional gz extension''' sub_dir, filename = args out_dir = join(kwargs['out_dir'], sub_dir) if sub_dir else kwargs['out_dir'] if kwargs.get('add_hash', True): filename += '-' + kwargs['plot_id'] path = join(out_dir, filename + '.json') return path + '.gz' if kwargs.get('gzip', False) else path def write_layout(path, obj, **kwargs): '''Write layout json `obj` to disk and return the path to the saved file''' obj = (minmax_scale(obj) - 0.5) * 2 # scale -1:1 obj = round_floats(obj) return write_json(path, obj, **kwargs) def round_floats(obj, digits=5): '''Return 2D array obj with rounded float precision''' return [[round(float(j), digits) for j in i] for i in obj] def write_json(path, obj, **kwargs): '''Write json object `obj` to disk and return the path to that file''' out_dir, filename = os.path.split(path) if not os.path.exists(out_dir): os.makedirs(out_dir) if kwargs.get('gzip', False): with gzip.GzipFile(path, 'w') as out: out.write(json.dumps(obj, indent=4).encode(kwargs['encoding'])) return path else: with open(path, 'w') as out: json.dump(obj, out, indent=4) return path def read_json(path, **kwargs): '''Read and return the json object written by the current process at `path`''' if kwargs.get('gzip', False): with gzip.GzipFile(path, 'r') as f: return json.loads(f.read().decode(kwargs['encoding'])) with open(path) as f: return json.load(f) def get_hotspots(**kwargs): '''Return the stable clusters from the condensed tree of connected components from the density graph''' print(' * HDBSCAN clustering data with ' + str(multiprocessing.cpu_count()) + ' cores...') config = { 'core_dist_n_jobs': multiprocessing.cpu_count(), 'min_cluster_size': kwargs['min_cluster_size'], 'cluster_selection_epsilon': 0.01, 'min_samples': 1, 'approx_min_span_tree': False, } v = kwargs['vecs'] z = HDBSCAN(**config).fit(v) # find the points in each cluster d = defaultdict(list) for idx, i in enumerate(z.labels_): d[i].append(v[idx]) # find the convex hull for each cluster's points convex_hulls = [] for i in d: hull = ConvexHull(d[i]) points = [hull.points[j] for j in hull.vertices] # the last convex hull simplex needs to connect back to the first point convex_hulls.append(np.vstack([points, points[0]])) # find the centroids for each cluster centroids = [] for i in d: x, y = np.array(d[i]).T centroids.append(np.array([np.mean(x), np.mean(y)])) # identify the number of points in each cluster lens = [len(d[i]) for i in d] # combine data into cluster objects closest, _ = pairwise_distances_argmin_min(centroids, v) paths = [kwargs['image_paths'][i] for i in closest] clusters = [{ 'img': clean_filename(paths[idx]), 'convex_hull': convex_hulls[idx].tolist(), 'n_images': lens[idx], } for idx, i in enumerate(closest)] # remove massive clusters retained = [] for idx, i in enumerate(clusters): x, y = np.array(i['convex_hull']).T area = 0.5 * np.abs(np.dot(x, np.roll(y, 1)) - np.dot(y, np.roll(x, 1))) if area < 0.2: retained.append(i) # sort the clusers by size clusters = sorted(retained, key=lambda i: i['n_images'], reverse=True) for idx, i in enumerate(clusters): i['label'] = 'Cluster {}'.format(idx + 1) # save the hotspots to disk and return the path to the saved json print(' * found', len(clusters), 'hotspots') return write_json(get_path('hotspots', 'hotspot', **kwargs), clusters, **kwargs) def get_heightmap(path, label, **kwargs): '''Create a heightmap using the distribution of points stored at `path`''' X = read_json(path, **kwargs) if 'positions' in X: X = X['positions'] X = np.array(X) # create kernel density estimate of distribution X nbins = 200 x, y = X.T xi, yi = np.mgrid[x.min():x.max():nbins * 1j, y.min():y.max():nbins * 1j] zi = kde.gaussian_kde(X.T)(np.vstack([xi.flatten(), yi.flatten()])) # create the plot fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(5, 5)) fig.subplots_adjust(0, 0, 1, 1) plt.pcolormesh(xi, yi, zi.reshape(xi.shape), shading='gouraud', cmap=plt.cm.gray) plt.axis('off') # save the plot out_dir = os.path.join(kwargs['out_dir'], 'heightmaps') if not os.path.exists(out_dir): os.makedirs(out_dir) out_path = os.path.join(out_dir, label + '-heightmap.png') plt.savefig(out_path, pad_inches=0) def write_images(**kwargs): '''Write all originals and thumbs to the output dir''' for i in stream_images(**kwargs): filename = clean_filename(i.path) # copy original for lightbox out_dir = join(kwargs['out_dir'], 'originals') if not exists(out_dir): os.makedirs(out_dir) out_path = join(out_dir, filename) shutil.copy(i.path, out_path) # copy thumb for lod texture out_dir = join(kwargs['out_dir'], 'thumbs') if not exists(out_dir): os.makedirs(out_dir) out_path = join(out_dir, filename) img = array_to_img(i.resize_to_max(kwargs['lod_cell_height'])) save_img(out_path, img) def get_version(): '''Return the version of pixplot installed''' return pkg_resources.get_distribution('pixplot').version class Image: def __init__(self, *args, **kwargs): self.path = args[0] self.original = load_img(self.path) self.metadata = kwargs['metadata'] def resize_to_max(self, n): ''' Resize self.original so its longest side has n pixels (maintain proportion) ''' w, h = self.original.size size = (n, int(n * h / w)) if w > h else (int(n * w / h), n) return img_to_array(self.original.resize(size)) def resize_to_height(self, height): ''' Resize self.original into an image with height h and proportional width ''' w, h = self.original.size if (w / h * height) < 1: resizedwidth = 1 else: resizedwidth = int(w / h * height) size = (resizedwidth, height) return img_to_array(self.original.resize(size)) def resize_to_square(self, n, center=False): ''' Resize self.original to an image with nxn pixels (maintain proportion) if center, center the colored pixels in the square, else left align ''' a = self.resize_to_max(n) h, w, c = a.shape pad_lr = int((n - w) / 2) # left right pad pad_tb = int((n - h) / 2) # top bottom pad b = np.zeros((n, n, 3)) if center: b[pad_tb:pad_tb + h, pad_lr:pad_lr + w, :] = a else: b[:h, :w, :] = a return b ## # Entry Point ## def parse(): '''Read command line args and begin data processing''' description = 'Generate the data required to create a PixPlot viewer' parser = argparse.ArgumentParser(description=description, formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('--images', type=str, default=config['images'], help='path to a glob of images to process', required=False) parser.add_argument('--metadata', type=str, default=config['metadata'], help='path to a csv or glob of JSON files with image metadata (see readme for format)', required=False) parser.add_argument('--max_images', type=int, default=config['max_images'], help='maximum number of images to process from the input glob', required=False) parser.add_argument('--use_cache', type=bool, default=config['use_cache'], help='given inputs identical to prior inputs, load outputs from cache', required=False) parser.add_argument('--encoding', type=str, default=config['encoding'], help='the encoding of input metadata', required=False) parser.add_argument('--min_cluster_size', type=int, default=config['min_cluster_size'], help='the minimum number of images in a cluster', required=False) parser.add_argument('--out_dir', type=str, default=config['out_dir'], help='the directory to which outputs will be saved', required=False) parser.add_argument('--cell_size', type=int, default=config['cell_size'], help='the size of atlas cells in px', required=False) parser.add_argument('--n_neighbors', type=int, default=config['n_neighbors'], help='the n_neighbors argument for UMAP') parser.add_argument('--min_distance', type=float, default=config['min_distance'], help='the min_distance argument for umap') parser.add_argument('--metric', type=str, default=config['metric'], help='the metric argument for umap') parser.add_argument('--pointgrid_fill', type=float, default=config['pointgrid_fill'], help='float 0:1 that determines sparsity of jittered distributions (lower means more sparse)') parser.add_argument('--copy_web_only', action='store_true', help='update ./output/web without reprocessing data') parser.add_argument('--gzip', action='store_true', help='save outputs with gzip compression') parser.add_argument('--shuffle', action='store_true', help='shuffle the input images before data processing begins') parser.add_argument('--plot_id', type=str, default=config['plot_id'], help='unique id for a plot; useful for resuming processing on a started plot') parser.add_argument('--seed', type=int, default=config['seed'], help='seed for random processes') config.update(vars(parser.parse_args())) process_images(**config) if __name__ == '__main__': parse() # python UpdatedPixPlot.py --images "images/*.jpg"
PypiClean
/Energy_Pinch-1.0.1-py3-none-any.whl/energy_pinch/module.py
from numpy import array, diff,sort,cumsum,concatenate,linspace,polyfit,polyval,RankWarning import matplotlib.pyplot as plt #from scipy.interpolate import interp1d import warnings #from scipy import interpolate def pinch(streams,dt=10,table=False,composites=False,grand_composite=False): Ts=[] # shifted temperatures Thot=[] #actual hot temperatures Tcold=[] #actual cold temperatures for s in streams: s['T']=array(s['T']) if diff(s['T'])[0]<0: s['type']=1 s['Ts']=s['T']-dt/2 else: s['type']=0 s['Ts']=s['T']+dt/2 for Ts1 in s['Ts']: if not Ts1 in Ts: Ts.append(Ts1) for T1 in s['T']: if s['type']: if not T1 in Thot: Thot.append(T1) else: if not T1 in Tcold: Tcold.append(T1) for i in range(len(streams)): #print('assign names...') if not 'name' in streams[i].keys(): if streams[i]['type']==1: streams[i]['name']='hot_'+str(i) else: streams[i]['name']='cold_'+str(i) Ts=sort(Ts)[::-1] Thot=sort(Thot) Tcold=sort(Tcold) #print(Thot) #print(Tcold) #grouping groups=[] count=-1 for i in range(len(Ts)-1): count+=1 groups.append({'streams':[],'dh':0,'dts':Ts[i]-Ts[i+1]}) for j in range(len(streams)): if Ts[i+1]>=streams[j]['Ts'][0] and Ts[i]<=streams[j]['Ts'][1] or Ts[i+1]>=streams[j]['Ts'][1] and Ts[i]<=streams[j]['Ts'][0]: groups[count]['streams'].append(j) f=-1 if streams[j]['type']: f=1 #net balance groups[count]['dh']+=f*groups[count]['dts']*streams[j]['DC'] cascade1=concatenate(([0],cumsum(list(map(lambda x:x['dh'],groups))))) hot_utility=-min(cascade1) pinchTs=Ts[cascade1==min(cascade1)] pinchThot=pinchTs[0]+dt/2 pinchTcold=pinchTs[0]-dt/2 cold_utility=cascade1[-1]+hot_utility Hhot=[0] for i in range(len(Thot)-1): Hhot.append(0) for j in range(len(streams)): if streams[j]['type'] and Thot[i+1]<=streams[j]['T'][0] and Thot[i]>=streams[j]['T'][1]: Hhot[i+1]+=streams[j]['DC']*(Thot[i+1]-Thot[i]) Hhot=cumsum(Hhot) Hcold=[cold_utility] for i in range(len(Tcold)-1): Hcold.append(0) for j in range(len(streams)): if not streams[j]['type'] and Tcold[i+1]<=streams[j]['T'][1] and Tcold[i]>=streams[j]['T'][0]: Hcold[i+1]+=streams[j]['DC']*(Tcold[i+1]-Tcold[i]) Hcold=cumsum(Hcold) if table: from prettytable import PrettyTable t = PrettyTable(['Ts °C','streams','DT °C','Bilan net kW',"cascade 1 kW",'cascade 2 kW']) for i in range(len(groups)): t.add_row([Ts[i],'','','',cascade1[i],cascade1[i]+hot_utility]) groups_=[] for j in range(len(groups[i]["streams"])): groups_.append(streams[groups[i]["streams"][j]]['name']) t.add_row(['',groups_,groups[i]['dts'],groups[i]['dh'],'','']) t.add_row([Ts[-1],'','','',cascade1[-1],cascade1[-1]+hot_utility]) print(t.get_string(title="Pinch analysis DT="+str(dt)+"°C")) if composites: plt.figure(1) plt.plot(Hcold,Tcold,markerfacecolor='white',marker='s',markersize=4,color='blue',label='Cold composite curve') plt.plot(Hhot,Thot,markerfacecolor='white',marker='s',markersize=4,color='red',label='Hot composite curve') # plt.plot([cold_utility]*100,linspace(Tcold[0],Thot[-1],100),'--',color='gray') # plt.plot([0]*100,linspace(Tcold[0],Thot[-1],100),'--',color='gray') # plt.plot([Hhot[-1]]*100,linspace(Tcold[0],Thot[-1],100),'--',color='gray') # plt.plot([Hcold[-1]]*100,linspace(Tcold[0],Tcold[-1],100),'--',color='gray') with warnings.catch_warnings(): warnings.simplefilter('ignore', RankWarning) zhot = polyfit(Hhot,Thot, 6) zcold = polyfit(Hcold,Tcold,1) # tck_hot = interpolate.splrep(Hhot,Thot) #plt.fill_between([0,cold_utility],[pinchTcold,pinchTcold],hatch='////',facecolor='white',) #plt.fill_between([Hhot[-1],Hcold[-1]],[Thot[-1],Thot[-1]],[pinchThot,pinchThot],hatch='////',facecolor='white',) plt.plot([cold_utility]*100,linspace(Tcold[0], polyval(zhot,cold_utility),100),'--',color='gray') plt.plot([Hhot[-1]]*100,linspace(Thot[-1], polyval(zcold,Hhot[-1]),100),'--',color='gray') plt.annotate('', xy=(0, polyval(zhot,cold_utility)),xytext=(cold_utility,polyval(zhot,cold_utility)), arrowprops=dict(arrowstyle="<->",linestyle="-",color='gray')) plt.annotate('', xy=(Hhot[-1], polyval(zcold,Hhot[-1])),xytext=(Hcold[-1],polyval(zcold,Hhot[-1])), arrowprops=dict(arrowstyle="<->",linestyle="-",color='gray')) plt.text(Hcold[0]/40,pinchThot,'{:.1f}'.format(cold_utility)+' kW') plt.text(Hhot[-1]*1,polyval(zcold,Hhot[-1])*.9,'{:.1f}'.format(hot_utility)+' kW') plt.grid() plt.legend() plt.xlabel('Heat flow (kW)') plt.ylabel('Temperature (°C)') #plt.yticks(concatenate((Thot,Tcold))) if grand_composite: plt.figure(2) plt.plot(cascade1+hot_utility,Ts) plt.grid() plt.xlabel('Net heat flow (kW)') plt.ylabel('Shifted temperature (°C)') # plt.annotate('', xy=(0, Ts[-1]),xytext=(cold_utility,Ts[-1]), arrowprops=dict(arrowstyle="<->",linestyle="-",color='gray')) # plt.annotate('', xy=(0, Ts[0]),xytext=(hot_utility,Ts[0]), arrowprops=dict(arrowstyle="<->",linestyle="-",color='gray')) plt.fill_between(cascade1+hot_utility,Ts,where=Ts<=pinchTs,hatch='//',facecolor='skyblue') plt.fill_between(cascade1+hot_utility,Ts,Ts[0],where=Ts>=pinchTs,hatch='\\',facecolor="salmon") plt.text(hot_utility,Thot[-1]*.9,'{:.1f}'.format(hot_utility)+' kW') plt.text(cold_utility*.85,5,'{:.1f}'.format(cold_utility)+' kW') return {'hot_utility':hot_utility,'cold_utility':cold_utility} def test(ex): if ex=="TD GPB 2020": streams=[{'DC':2,'T':[20,135.],'name':'F1'}, {'DC':3,'T':[170,60],'name':'C1'}, {'DC':4,'T':[80,140],'name':'F2'}, {'DC':1.5,'T':[150,30],'name':'C2'}] pinch(streams,10);
PypiClean
/Hikka_Pyro-2.0.66-py3-none-any.whl/pyrogram/types/inline_mode/inline_query.py
from typing import List, Match import pyrogram from pyrogram import raw from pyrogram import types, enums from ..object import Object from ..update import Update class InlineQuery(Object, Update): """An incoming inline query. When the user sends an empty query, your bot could return some default or trending results. Parameters: id (``str``): Unique identifier for this query. from_user (:obj:`~pyrogram.types.User`): Sender. query (``str``): Text of the query (up to 512 characters). offset (``str``): Offset of the results to be returned, can be controlled by the bot. chat_type (:obj:`~pyrogram.enums.ChatType`, *optional*): Type of the chat, from which the inline query was sent. location (:obj:`~pyrogram.types.Location`. *optional*): Sender location, only for bots that request user location. matches (List of regex Matches, *optional*): A list containing all `Match Objects <https://docs.python.org/3/library/re.html#match-objects>`_ that match the query of this inline query. Only applicable when using :obj:`Filters.regex <pyrogram.Filters.regex>`. """ def __init__( self, *, client: "pyrogram.Client" = None, id: str, from_user: "types.User", query: str, offset: str, chat_type: "enums.ChatType", location: "types.Location" = None, matches: List[Match] = None ): super().__init__(client) self.id = id self.from_user = from_user self.query = query self.offset = offset self.chat_type = chat_type self.location = location self.matches = matches @staticmethod def _parse(client, inline_query: raw.types.UpdateBotInlineQuery, users: dict) -> "InlineQuery": peer_type = inline_query.peer_type chat_type = None if isinstance(peer_type, raw.types.InlineQueryPeerTypeSameBotPM): chat_type = enums.ChatType.BOT elif isinstance(peer_type, raw.types.InlineQueryPeerTypePM): chat_type = enums.ChatType.PRIVATE elif isinstance(peer_type, raw.types.InlineQueryPeerTypeChat): chat_type = enums.ChatType.GROUP elif isinstance(peer_type, raw.types.InlineQueryPeerTypeMegagroup): chat_type = enums.ChatType.SUPERGROUP elif isinstance(peer_type, raw.types.InlineQueryPeerTypeBroadcast): chat_type = enums.ChatType.CHANNEL return InlineQuery( id=str(inline_query.query_id), from_user=types.User._parse(client, users[inline_query.user_id]), query=inline_query.query, offset=inline_query.offset, chat_type=chat_type, location=types.Location( longitude=inline_query.geo.long, latitude=inline_query.geo.lat, client=client ) if inline_query.geo else None, client=client ) async def answer( self, results: List["types.InlineQueryResult"], cache_time: int = 300, is_gallery: bool = False, is_personal: bool = False, next_offset: str = "", switch_pm_text: str = "", switch_pm_parameter: str = "" ): """Bound method *answer* of :obj:`~pyrogram.types.InlineQuery`. Use this method as a shortcut for: .. code-block:: python await client.answer_inline_query( inline_query.id, results=[...] ) Example: .. code-block:: python await inline_query.answer([...]) Parameters: results (List of :obj:`~pyrogram.types.InlineQueryResult`): A list of results for the inline query. cache_time (``int``, *optional*): The maximum amount of time in seconds that the result of the inline query may be cached on the server. Defaults to 300. is_gallery (``bool``, *optional*): Pass True, if results should be displayed in gallery mode instead of list mode. Defaults to False. is_personal (``bool``, *optional*): Pass True, if results may be cached on the server side only for the user that sent the query. By default (False), results may be returned to any user who sends the same query. next_offset (``str``, *optional*): Pass the offset that a client should send in the next query with the same text to receive more results. Pass an empty string if there are no more results or if you don‘t support pagination. Offset length can’t exceed 64 bytes. switch_pm_text (``str``, *optional*): If passed, clients will display a button with specified text that switches the user to a private chat with the bot and sends the bot a start message with the parameter switch_pm_parameter switch_pm_parameter (``str``, *optional*): `Deep-linking <https://core.telegram.org/bots#deep-linking>`_ parameter for the /start message sent to the bot when user presses the switch button. 1-64 characters, only A-Z, a-z, 0-9, _ and - are allowed. Example: An inline bot that sends YouTube videos can ask the user to connect the bot to their YouTube account to adapt search results accordingly. To do this, it displays a "Connect your YouTube account" button above the results, or even before showing any. The user presses the button, switches to a private chat with the bot and, in doing so, passes a start parameter that instructs the bot to return an oauth link. Once done, the bot can offer a switch_inline button so that the user can easily return to the chat where they wanted to use the bot's inline capabilities. """ return await self._client.answer_inline_query( inline_query_id=self.id, results=results, cache_time=cache_time, is_gallery=is_gallery, is_personal=is_personal, next_offset=next_offset, switch_pm_text=switch_pm_text, switch_pm_parameter=switch_pm_parameter )
PypiClean
/JaxHankel-0.1.2.tar.gz/JaxHankel-0.1.2/README.md
# JaxHankel A Hankel transform implementation in jax, based in scipy's implementation ## Examples In cosmoogy, use to convert power spectrum into correlation functions and vice-versa, ```python import matplotlib.pyplot as plt import jax.numpy as jnp from jax_fht.cosmology import FFTLog def xi(r, A=1.0): return A * jnp.exp(-(r ** 2)) fftlog = FFTLog(num=1, log_r_min=-4.0, log_r_max=4.0) pk = fftlog.xi2pk(xi(fftlog.r)) plt.loglog(fftlog.k, pk) ``` Note that it is vectorized along the last dimension. Thanks to jax we can now compute derivatives too, see for instance the derivative of the power spectrum respect to its norm (A), ```python get_pk = lambda norm: fftlog.xi2pk(xi(fftlog.r, norm)) derivative = jacobian(get_pk)(5.) ``` ## Install ```bash $ pip install JaxHankel ```
PypiClean
/FragPELE-2.1.1.tar.gz/FragPELE-2.1.1/frag_pele/Helpers/clusterizer.py
import sys import mdtraj as md from AdaptivePELE.clustering import clustering, thresholdcalculator from AdaptivePELE.spawning import spawning, densitycalculator from AdaptivePELE.constants import constants from AdaptivePELE.utilities import utilities import pandas as pd import glob import os def cluster_traject(resname, trajToDistribute, columnToChoose, distance_contact, clusterThreshold, path_to_cluster, output_path, mapping_out, epsilon=0.5, report_basename="report", condition="min", metricweights="linear", nclusters=5): outputPathConst = constants.OutputPathConstants(output_path) outputPathConst.tmpFolder = output_path outputPathConst.buildTmpFolderConstants(outputPathConst.tmpFolder) utilities.makeFolder(outputPathConst.tmpFolder) thresholdCalc = thresholdcalculator.ThresholdCalculatorConstant(value=clusterThreshold) similarityEval = clustering.CMSimilarityEvaluator("Jaccard") clusteringObject = clustering.ContactMapAccumulativeClustering(thresholdCalc, similarityEval, resname=resname, reportBaseFilename=report_basename, columnOfReportFile=columnToChoose, contactThresholdDistance=distance_contact, altSelection=True) clusteringObject.cluster([path_to_cluster], ignoreFirstRow=True) spawning_params = spawning.SpawningParams() spawning_params.reportFilename = report_basename spawning_params.epsilon = epsilon spawning_params.nclusters = nclusters spawning_params.metricWeights = metricweights spawning_params.condition = condition density = densitycalculator.NullDensityCalculator() spawningObject = spawning.EpsilonDegeneracyCalculator(spawning_params, density) degeneracy = spawningObject.calculate(clusteringObject.clusters, trajToDistribute, spawning_params) spawningObject.log() _, procMapping = spawningObject.writeSpawningInitialStructures(outputPathConst, degeneracy, clusteringObject, 0) processorManagerFilename = "processorMapping.txt" utilities.writeProcessorMappingToDisk(mapping_out, processorManagerFilename, procMapping) def get_column_num(path, header_column, report_basename="report"): reports = glob.glob(os.path.join(path, "*{}*".format(report_basename))) try: reports[0] except IndexError: raise IndexError("Not report file found. Check you are in adaptive's or Pele root folder") data = pd.read_csv(reports[0], sep=' ', engine='python') header_list = data.columns.values.tolist() column_number = header_list.index(header_column) return column_number def check_atom_overlapping(pdb_list, ligand_resname="GRW"): pdb_wrong = [] for pdb in pdb_list: structure = md.load(pdb) ligand = structure.topology.select("resname {}".format(ligand_resname)) clash = md.compute_neighbors(structure, cutoff=0.002, query_indices=ligand) if len(clash[0]) != 0: pdb_wrong.append(pdb) return pdb_wrong
PypiClean
/BatzenCA-0.1.tar.gz/BatzenCA-0.1/batzenca/database/releases.py
import os import datetime import warnings import codecs import sqlalchemy from sqlalchemy import Column, Integer, String, Date, Boolean, ForeignKey from sqlalchemy.orm import relationship, backref, Session from sqlalchemy.ext.associationproxy import association_proxy from base import Base, EntryNotFound from peers import Peer from keys import Key class ReleaseKeyAssociation(Base): __tablename__ = 'releasekeyassociations' left_id = Column(Integer, ForeignKey('keys.id'), primary_key=True) right_id = Column(Integer, ForeignKey('releases.id'), primary_key=True) policy_exception = Column(Boolean) is_active = Column(Boolean) key = relationship("Key", backref=backref("release_associations", cascade="all, delete-orphan") ) release = relationship("Release", backref=backref("key_associations", cascade="all, delete-orphan") ) def __init__(self, key, active=True, policy_exception=False): self.key = key self.is_active = active self.policy_exception = policy_exception class Release(Base): """Releases are bundles of objects of type :class:`batzenca.database.keys.Key`. Releases contain active and inactive keys. The former are keys users are expected to use. The latter inform the user about invalidated keys for example by revoked signatures. :param batzenca.database.mailinglists.MailingList mailinglist: the mailinglist for which this release is intended :param date: the date of this release :param iterable active_keys: keys distributed in this release that user ought to use :param iterable inactive_keys: keys which are not active in this release, yet should be distributed. For example, this could include keys with revocation signatures which are distributed to inform users about this revocation. :param batzenca.database.policies.Policy policy: the policy against which keys in this release should be checked """ __tablename__ = 'releases' id = Column(Integer, primary_key=True) mailinglist_id = Column(Integer, ForeignKey('mailinglists.id')) mailinglist = relationship("MailingList", backref=backref("releases", order_by="Release.date", cascade="all, delete-orphan")) date = Column(Date) published = Column(Boolean) policy_id = Column(Integer, ForeignKey('policies.id')) policy = relationship("Policy") keys = association_proxy('key_associations', 'key') def __init__(self, mailinglist, date, active_keys, inactive_keys=None, policy=None): self.mailinglist = mailinglist if date is None: date = datetime.date.today() self.date = date if policy is not None: self.policy = policy else: self.policy = mailinglist.policy for key in active_keys: self.key_associations.append(ReleaseKeyAssociation(key=key)) for key in inactive_keys: self.key_associations.append(ReleaseKeyAssociation(key=key, active=False)) self.published = False @classmethod def from_mailinglist_and_date(cls, mailinglist, date): """Return the release on ``mailinglist`` for ``date`` from the database. If more than one element is found the "first" element is returned, where "first" has no particular meaning and is implementation specific. In this case a warning is issued. :param batzenca.database.mailinglists.MailingList mailinglist: the mailinglist on which the target release was released :param date: the date on which the target release was released :raises batzenca.database.base.EntryNotFound: when no entry is found .. note:: The returned object was aquired from the master session and lives there. """ from batzenca.session import session res = session.db_session.query(cls).filter(cls.mailinglist_id == mailinglist.id, cls.date == date) if res.count() == 0: raise EntryNotFound("No release for mailinglist '%s' with date '%s' in database."%(mailinglist, date)) else: if res.count() > 1: warnings.warn("More than one release for mailinglist '%s' with date '%s' in database, picking first one"%(mailinglist, date)) return res.first() def inherit(self, date=None, policy=None, deactivate_invalid=True, delete_old_inactive_keys=5): """Construct a new release by inheritance from this release. Inheritance means that active and inactive keys are carried forward. :param date: the date of this release. If ``None`` today's date is used :param boolean deactivate_invalid: deactivate keys which are no longer valid, e.g. because they are expired. :param boolean delete_old_inactive_keys: delete inactive keys which have been around for a while, this parameter is passed to :func:`batzenca.database.releases.Release.delete_old_inactive_keys` as ``releasecount``. """ active_keys = list(self.active_keys) inactive_keys = list(self.inactive_keys) if policy is None: policy = self.policy release = Release(mailinglist=self.mailinglist, date=date, active_keys = active_keys, inactive_keys = inactive_keys, policy=policy) if deactivate_invalid: release.deactivate_invalid() if delete_old_inactive_keys: release.delete_old_inactive_keys(delete_old_inactive_keys) for key in self.active_keys: if self.has_exception(key): release.add_exception(key) return release def verify(self, ignore_exceptions=False): """Check if all active keys in this release pass the policy check. :param boolean ignore_exceptions: keys may have a policy exception which means that they pass this test even though they do violate the policy. By default active keys with an existing policy exception are ignored. If ``True`` these keys are checked as well. """ for assoc in self.key_associations: if assoc.is_active and (ignore_exceptions or not assoc.policy_exception): self.policy.check(assoc.key) def __repr__(self): s = "<Release: %s, %s (%s), %s (%s + %s) keys>"%(self.id, self.date, self.mailinglist, len(self.key_associations), len(self.active_keys), len(self.inactive_keys)) return unicode(s).encode('utf-8') def __str__(self): from batzenca.database.policies import PolicyViolation inact_no_sig = 0 inact_expired = 0 policy = self.policy for key in self.keys: with warnings.catch_warnings(): warnings.simplefilter("ignore", PolicyViolation) if policy.check_ca_signature(key) == False: inact_no_sig += 1 continue if key.expires and key.expires < self.date: inact_expired += 1 continue return "date: %10s, list: %10s, policy date: %10s, active keys: %3d, inactive keys: %2d (expired: %2d, not signed: %2d), total keys: %3d"%(self.date, self.mailinglist.name, self.policy.implementation_date, len(self.active_keys), len(self.inactive_keys), inact_expired, inact_no_sig, len(self.keys)) @property def ascii_keys(self): """All active and inactive keys in this release as OpenPGP ASCII text""" from batzenca.session import session return session.gnupg.keys_export([key.kid for key in self.keys]) def diff(self, other=None): """Compare this release with ``other``. :param batzenca.database.releases.Release other: the release to compare against, if ``None`` then ``self.prev`` is chosen :return: this function returns five tuples: - ``keys_in`` - keys that are active in this release but are not active in ``other`` - ``keys_out`` - all keys that are either active or inactive in ``other`` but are not active in this release. - ``peers_joined`` - peers active in this release but not in ``other`` - ``peers_changed`` - peers that have different active keys in ``other`` and this release - ``peers_left`` - peers that are active in ``other`` but in this release """ if other is None: other = self.prev keys_prev = set(other.active_keys + self.inactive_keys) keys_curr = set(self.active_keys) # keys that are in this release # keys that used to be in but are not any more keys_out = keys_prev.difference(keys_curr) # keys that are new keys_in = keys_curr.difference(other.active_keys) peers_prev = set([Peer.from_key(key) for key in keys_prev]) peers_curr = set([Peer.from_key(key) for key in keys_curr]) peers_in = set([Peer.from_key(key) for key in keys_in ]) peers_out = set([Peer.from_key(key) for key in keys_out ]) peers_joined = peers_curr.difference(peers_prev) peers_changed = peers_in.intersection(peers_out) peers_left = peers_prev.difference(peers_curr) return keys_in, keys_out, peers_joined, peers_changed, peers_left @property def peers(self): """All active peers in this release""" return tuple(Peer.from_key(key) for key in sorted(self.active_keys, key=lambda x: x.name.lower())) @staticmethod def _format_entry(i, key): return (u" %3d. %s"%(i, key), u" %s"%key.peer) @property def active_keys(self): """All active keys in this release.""" if self.id is None: return [assoc for assoc in self.key_associations if assoc.is_active] from batzenca.session import session return session.db_session.query(Key).join(ReleaseKeyAssociation).filter(ReleaseKeyAssociation.right_id == self.id, ReleaseKeyAssociation.is_active == True).all() @property def inactive_keys(self): """All inactive keys in this release.""" if self.id is None: return [assoc.key for assoc in self.key_associations if not assoc.is_active] from batzenca.session import session return session.db_session.query(Key).join(ReleaseKeyAssociation).filter(ReleaseKeyAssociation.right_id == self.id, ReleaseKeyAssociation.is_active == False).all() def deactivate_invalid(self): """Deactivate those keys which evaluate to false and those keys which are not signed by the CA. A key evaluates to false if it is expired. Keys are considered unsigned if the CA signature is revoked. """ if self.published: raise ValueError("Release '%s' is already published and should not be modified."%self) for assoc in self.key_associations: if assoc.is_active: if not bool(assoc.key): assoc.is_active = False elif not assoc.key.is_signed_by(self.policy.ca): assoc.is_active = False def delete_old_inactive_keys(self, releasecount=5): """Remove those inactive keys which have been inactive for a while. :param boolean releasecount: the number of releases for which a key must have been inactive to be removed. """ if self.published: raise ValueError("Release '%s' is already published and should not be modified."%self) old_release = self for i in range(releasecount): if old_release.prev: old_release = old_release.prev else: return delete_keys = [] for key in self.inactive_keys: if key not in old_release.active_keys: delete_keys.append(key) elif key.expires and key.expires < self.date: delete_keys.append(key) for key in delete_keys: assoc = self._get_assoc(key) self.key_associations.remove(assoc) from batzenca.session import session session.db_session.delete(assoc) def _get_assoc(self, key): if key.id is None or self.id is None: for assoc in self.key_associations: if assoc.key is key and assoc.release is self: return assoc raise ValueError("Key '%s' is not in release '%s'"%(key, self)) from batzenca.session import session res = session.db_session.query(ReleaseKeyAssociation).filter(ReleaseKeyAssociation.left_id == key.id, ReleaseKeyAssociation.right_id == self.id) if res.count() > 1: raise RuntimeError("The key '%s' is associated with the release '%' more than once; the database is in an inconsistent state."%(key, self)) if res.count() == 0: raise ValueError("Key '%s' is not in release '%s'"%(key, self)) return res.first() def add_exception(self, key): """Add a policy exception for the provided key. :param batzenca.database.keys.Key key: the key for which to add the exception """ if self.published: raise ValueError("Release '%s' is already published and should not be modified."%self) assoc = self._get_assoc(key) assoc.policy_exception = True def has_exception(self, key): """Return ``True`` if the provided key has a policy exception. :param batzenca.database.keys.Key key: the key to check """ assoc = self._get_assoc(key) return assoc.policy_exception def is_active(self, key): """Return ``True`` if the provided key is active in this release, :param batzenca.database.keys.Key key: the key to check """ assoc = self._get_assoc(Key) return assoc.is_active def update_key_from_peer(self, peer): if not peer in self: raise ValueError("Peer '%s' is not in '%s'"%(peer, self)) if peer.key in self: raise ValueError("Key '%s' of peer '%s' is already in release '%s'"%(peer.key, peer, self)) from batzenca.session import session res = session.db_session.query(ReleaseKeyAssociation).join(Key).join(Peer).filter(Key.peer_id == peer.id, ReleaseKeyAssociation.left_id == Key.id, ReleaseKeyAssociation.right_id == self.id, ReleaseKeyAssociation.is_active == True) for assoc in res.all(): if not bool(assoc.key): assoc.is_active = False elif not assoc.key.is_signed_by(self.policy.ca): assoc.is_active = False else: raise ValueError("Key '%s' of peer '%s' has a valid signature by CA '%s' mandated in '%s'"%(assoc.key, peer, self.policy.ca, self.policy)) self.add_key(peer.key, active=True, check=True) def add_key(self, key, active=True, check=True): if self.published: raise ValueError("Release '%s' is already published and should not be modified."%self) if key.peer is None: raise ValueError("Key '%s' has no peer associated"%key) else: if active and key.peer in self: raise ValueError("Peer '%s' associated with Key '%s' already has an active key in this release"%(key.peer, key)) if check and active: self.policy.check(key) self.key_associations.append(ReleaseKeyAssociation(key=key, active=active)) def __contains__(self, obj): from batzenca.session import session if self.id is None: raise RuntimeError("The object '%s' was not committed to the database yet, we cannot issue queries involving its id yet."%self) try: if obj.id is None: raise RuntimeError("The object '%s' was not committed to the database yet, we cannot issue queries involving its id yet."%obj) except AttributeError: raise TypeError("Cannot handle objects of type '%s'"%type(obj)) if isinstance(obj, Key): res = session.db_session.query(Key).join(ReleaseKeyAssociation).filter(ReleaseKeyAssociation.left_id == obj.id, ReleaseKeyAssociation.right_id == self.id, ReleaseKeyAssociation.is_active == True) if res.count() == 0: return False elif res.count() == 1: return True else: raise RuntimeError("The key '%s' is associated with the release '%' more than once; the database is in an inconsistent state."%(obj, self)) elif isinstance(obj, Peer): res = session.db_session.query(Peer).join(Key).join(ReleaseKeyAssociation).filter(Key.peer_id == obj.id, ReleaseKeyAssociation.left_id == Key.id, ReleaseKeyAssociation.right_id == self.id, ReleaseKeyAssociation.is_active == True) if res.count() == 0: return False elif res.count() == 1: return True else: raise RuntimeError("The peer '%s' is associated with the release '%' more than once; the database is in an inconsistent state."%(obj, self)) else: raise TypeError("Cannot handle objects of type '%s'"%type(obj)) @property def prev(self): """ The previous release. """ idx = self.mailinglist.releases.index(self) if idx > 0: return self.mailinglist.releases[idx-1] else: return None @property def yaml(self): """YAML representation of this release. This contains the name of the database, the date of this release, the key id of the CA, all key ids of active and passive keys. """ s = [] s.append( "mailinglist: %s"%self.mailinglist.email ) s.append( "date: %04d-%02d-%02d"%(self.date.year, self.date.month, self.date.day) ) s.append( "ca: %s"%self.policy.ca.kid ) s.append( "active keys:" ) for key in self.active_keys: s.append( " - %s"%key.kid ) s.append( "" ) s.append( "inactive keys:" ) for key in self.inactive_keys: s.append( " - %s"%key.kid ) s.append( "" ) return "\n".join(s) def expiring_keys(self, days=30): return tuple(key for key in self.active_keys if key.expires and key.expires < self.date + datetime.timedelta(days=days)) def __call__(self, previous=None, check=True, still_alive=False): """Return tuple representing this release as a (message, keys) pair. :param batzenca.database.releases.Release previous: the previous release, we call :func:`batzenca.database.releases.Release.diff` on it. if ``None`` then ``self.prev`` is used. :param boolean check: if ``True`` then :func:`batzenca.database.releases.Release.verify` is run. :param boolean still_alive: if ``True`` then the field ``{dead_man_switch}`` in :attr:`batzenca.database.mailinglists.MailingList.key_update_msg` is replaced by :attr:`batzenca.database.mailinglists.MailingList.dead_man_switch_msg`. :return: a tuple containing two strings. The first one is :attr:`batzenca.database.mailingists.MailingList.key_update_msg` with the output of :class:`batzenca.database.releases.Release.diff` used to fill in its fields. The second component is ``self``'s :attr:`batzenca.database.releases.Release.ascii_keys`. """ if check: self.verify() sorted_keys = lambda keys: sorted(keys, key=lambda x: x.name.lower()) keys = [] for i,key in enumerate(sorted_keys(self.active_keys)): keys.extend(Release._format_entry(i, key)) keys = "\n".join(keys) if previous is None: previous = self.prev if previous: keys_in, keys_out, peers_joined, peers_changed, peers_left = self.diff(previous) keys_in = "\n".join(sum([self._format_entry(i, key) for i,key in enumerate(sorted_keys(keys_in)) ], tuple())) keys_out = "\n".join(sum([self._format_entry(i, key) for i,key in enumerate(sorted_keys(keys_out))], tuple())) peers_joined = ", ".join(peer.name for peer in peers_joined) peers_changed = ", ".join(peer.name for peer in peers_changed) peers_left = ", ".join(peer.name for peer in peers_left) else: keys_in, keys_out, peers_joined, peers_changed, peers_left = "","","","","" msg = self.mailinglist.key_update_msg.format(mailinglist=self.mailinglist.name, keys=keys, keys_in = keys_in, keys_out = keys_out, peers_in = peers_joined, peers_changed = peers_changed, peers_out = peers_left, dead_man_switch = self.mailinglist.dead_man_switch_msg if still_alive else "", ca=self.policy.ca.name, ca_email=self.policy.ca.email) return msg, self.ascii_keys def release_message(self, previous=None, check=True, debug=False, attachments=None): """ """ ca = self.policy.ca mailinglist = self.mailinglist date_str = "%04d%02d%02d"%(self.date.year, self.date.month, self.date.day) body_, attachment_ = self(previous=previous, check=check) from email import encoders from email.mime.base import MIMEBase from email.mime.multipart import MIMEMultipart from email.mime.text import MIMEText from batzenca.pgpmime import PGPMIME payload = MIMEMultipart() payload.attach(MIMEText(body_.encode('utf-8'), _charset='utf-8')) attachment = MIMEBase('application', 'pgp-keys') attachment.set_payload(attachment_) encoders.encode_base64(attachment) attachment.add_header('Content-Disposition', 'attachment', filename="%s_%s.asc"%(mailinglist.name, date_str)) payload.attach(attachment) if attachments: for attachment in attachments: payload.attach(attachment) msg = PGPMIME(payload, self.active_keys, ca) # we are being a bit paranoid and check that we didn't fuck up encryption or something for key in self.active_keys: assert(key.kid not in msg.as_string()) to = mailinglist.email if not debug else ca.email msg['To'] = to msg['From'] = ca.email msg['Subject'] = "KeyUpdate {date} [{mailinglist}]".format(date=date_str, mailinglist=mailinglist.name) return msg def welcome_messages(self, tolerance=180, debug=False): mailinglist = self.mailinglist ca = self.policy.ca today = datetime.date.today() tolerance = today - datetime.timedelta(days=tolerance) from email.mime.text import MIMEText from batzenca.pgpmime import PGPMIME M = [] for peer in self.peers: was_recently_active = False for key in peer.keys: if any(rel.date > tolerance for rel in key.releases if rel.mailinglist == self.mailinglist and rel != self): was_recently_active = True break if not was_recently_active: body = self.mailinglist.new_member_msg.format(peer=peer.name, mailinglist=mailinglist.name, mailinglist_email=mailinglist.email, ca=ca.name, ca_email=ca.email) payload = MIMEText(body.encode('utf-8'), _charset='utf-8') msg = PGPMIME(payload, [peer.key, ca], ca) to = peer.email if not debug else ca.email msg['To'] = to msg['From'] = ca.email msg['Subject'] = "welcome to [{mailinglist}]".format(mailinglist=mailinglist.name) M.append( msg ) return tuple(M) def key_expiry_messages(self, days=30, debug=False): mailinglist = self.mailinglist ca = self.policy.ca from email.mime.text import MIMEText from batzenca.pgpmime import PGPMIME M = [] for key in self.expiring_keys(days=days): body = self.mailinglist.key_expiry_warning_msg.format(peer=key.peer.name, keyid=key.kid, expiry_date=key.expires, mailinglist = mailinglist.name, mailinglist_email = mailinglist.email, ca_email = ca.email) payload = MIMEText(body.encode('utf-8'), _charset='utf-8') msg = PGPMIME(payload, [key, ca], ca) to = key.email if not debug else ca.email msg['To'] = to msg['From'] = ca.email msg['Subject'] = "key expiry warning".format(mailinglist=mailinglist.name) M.append( msg ) return tuple(M) def dump(self, filename=None): """Write this release to to filename.yaml and filename.asc where the former receives :attr:`batzenca.database.releases.Release.yaml` and the latter receives :attr:`batzenca.database.releases.Release.ascii_keys`. :param str filename: a string containing a full path and basename. """ from batzenca import session if filename is None: filename = os.path.join(session.release_dump_path, "%s-%04d%02d%02d"%(self.mailinglist.name, self.date.year, self.date.month, self.date.day)) codecs.open(filename+".yaml", encoding="utf8", mode="w").write( self.yaml ) open(filename+".asc", "w").write( self(previous=None, check=False)[1] ) def send(self, smtpserver, previous=None, check=True, debug=False, attachments=None, new_peer_tolerance_days=180, key_expiry_warning_days=30): """Publish this release. This entails (if ``debug==False``): 1. updating the release date of this release 2. a call to :func:`batzenca.database.releases.Release.deactivate_invalid` 3. sending an e-mail to new members who have not been on this list for ``new_peer_tolerance_days`` days. 4. sending a key update message to the list 5. sending a key expiry message to keys that expire within ``key_expiry_warning_days`` days 6. a call to :func:`batzenca.database.releases.Release.dump` 7. setting this release status to published. If ``debug == True`` then e-mails are sent to the CA's e-mail address instead of the list and/or peers. Furthermore, neither the date nor the published status of this release is affected in this case. :param smtpserver: :param batzenca.database.releases.Release previous: the previous release, we call :func:`batzenca.database.releases.Release.diff` on it. if ``None`` then ``self.prev`` is used. :param boolean check: if ``True`` then :func:`batzenca.database.releases.Release.verify` is run. :param boolean debug: :param iterable attachments: :param int new_peer_tolerance_days: :param int key_expiry_warning_days: .. warning: Calling this function may modify this release. Firstly, this function calls :func:`batzenca.database.releases.Release.deactivate_invalid`. Secondly, if ``debug`` is ``False``, :attr:`batzenca.database.releases.Release.date` is set to today's date and :attr:`batzenca.database.releases.Release.published` is set to ``True``. """ if self.published: raise ValueError("Release '%s' is already published"%self) # 1. updating the release date of this release if not debug: self.date = datetime.date.today() # 2. a call to :func:`batzenca.database.releases.Release.deactivate_invalid` self.deactivate_invalid() # 3. sending an e-mail to new members who have not been on this list for ``new_peer_tolerance_days`` days. if new_peer_tolerance_days and self.mailinglist.new_member_msg: messages = self.welcome_messages(tolerance=new_peer_tolerance_days, debug=debug) for msg in messages: if debug: smtpserver.sendmail(self.policy.ca.email, (msg['To'],self.policy.ca.email), msg.as_string()) else: # we send a copy to self smtpserver.sendmail(self.policy.ca.email, (self.policy.ca.email, ), msg.as_string()) # 4. sending a key update message to the list msg = self.release_message(previous=previous, check=check, debug=debug, attachments=attachments) smtpserver.sendmail(self.policy.ca.email, (msg['To'],), msg.as_string()) # 5. sending a key expiry message to keys that expire within ``key_expiry_warning_days`` days if key_expiry_warning_days and self.mailinglist.key_expiry_warning_msg: messages = self.key_expiry_messages(days=key_expiry_warning_days, debug=debug) for msg in messages: if debug: smtpserver.sendmail(self.policy.ca.email, (self.policy.ca.email,), msg.as_string()) else: # we send a copy to self smtpserver.sendmail(self.policy.ca.email, (msg['To'],self.policy.ca.email), msg.as_string()) # 6. a call to :func:`batzenca.database.releases.Release.dump` if not debug: self.dump() # 7. setting this release status to published. if not debug: self.published = True
PypiClean