text
stringlengths 4
1.02M
| meta
dict |
|---|---|
from django.contrib import admin
from django.apps import apps
### register all models in this app in the admin
for model in apps.get_app_config('bulkvote').get_models():
admin.site.register(model)
|
{
"content_hash": "43b61b49d42a3134ef173ed57716bd1a",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 58,
"avg_line_length": 33.5,
"alnum_prop": 0.7611940298507462,
"repo_name": "tykling/bulkvote",
"id": "60bab7a933d8242d81a3947c492654060ffdd7d9",
"size": "201",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/bulkvote/admin.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "18675"
},
{
"name": "HTML",
"bytes": "7377"
},
{
"name": "JavaScript",
"bytes": "80480"
},
{
"name": "Python",
"bytes": "11704"
}
],
"symlink_target": ""
}
|
from sahara.plugins.vanilla.hadoop2 import config as c
from sahara.tests.unit import base
class VanillaTwoConfigTestCase(base.SaharaTestCase):
def test_get_hadoop_dirs(self):
ng = FakeNG(storage_paths=['/vol1', '/vol2'])
dirs = c._get_hadoop_dirs(ng)
expected = {
'hadoop_name_dirs': ['/vol1/hdfs/namenode',
'/vol2/hdfs/namenode'],
'hadoop_data_dirs': ['/vol1/hdfs/datanode',
'/vol2/hdfs/datanode'],
'hadoop_log_dir': '/vol1/hadoop/logs',
'hadoop_secure_dn_log_dir': '/vol1/hadoop/logs/secure',
'yarn_log_dir': '/vol1/yarn/logs'
}
self.assertEqual(expected, dirs)
class FakeNG(object):
def __init__(self, storage_paths=None):
self.paths = storage_paths
def storage_paths(self):
return self.paths
|
{
"content_hash": "cde7dabe9fc8c7d37d0dcf0639922be9",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 67,
"avg_line_length": 34.42307692307692,
"alnum_prop": 0.5675977653631284,
"repo_name": "crobby/sahara",
"id": "cbfa6cd5f3c2820259ce7ec8d0d8e367932e3d0f",
"size": "1478",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "sahara/tests/unit/plugins/vanilla/hadoop2/test_configs.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Java",
"bytes": "3609"
},
{
"name": "Mako",
"bytes": "33627"
},
{
"name": "PigLatin",
"bytes": "792"
},
{
"name": "Python",
"bytes": "3355980"
},
{
"name": "Shell",
"bytes": "61693"
}
],
"symlink_target": ""
}
|
import os
from setuptools import setup
with open(os.path.join(os.path.dirname(__file__), 'README.rst')) as readme:
README = readme.read()
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
setup(
name='django-social-widgets',
version='0.5.0',
packages=['social_widgets'],
include_package_data=True,
license='MIT License',
description='Django app for easy embedding social network widgets and '
'plugins into your site. Supports Facebook, Twitter, Google+, '
'YouTube, Instagram and Pinterest.',
long_description=README,
keywords='Django, social network, template, facebook, twitter',
url='https://github.com/creafz/django-social-widgets',
download_url=
'https://github.com/creafz/django-social-widgets/tarball/0.5.0',
author='Alex Parinov',
author_email='creafz@gmail.com',
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
],
install_requires=["django>=1.5"],
)
|
{
"content_hash": "91924402db66e75206c0fa88f66712d2",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 79,
"avg_line_length": 38.733333333333334,
"alnum_prop": 0.6173264486517499,
"repo_name": "creafz/django-social-widgets",
"id": "86bcffc7d79d2f8fbf74557287b72357861b4356",
"size": "1743",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1554"
},
{
"name": "HTML",
"bytes": "91575"
},
{
"name": "JavaScript",
"bytes": "863"
},
{
"name": "Python",
"bytes": "12304"
}
],
"symlink_target": ""
}
|
"""
Utilities for applying a watermark to an image using PIL.
Original Source: http://code.activestate.com/recipes/362879/
"""
import Image, ImageEnhance
import random
import traceback
def _percent(var):
"""
Just a simple interface to the _val function with a more meaningful name.
"""
return _val(var, True)
def _int(var):
"""
Just a simple interface to the _val function with a more meaningful name.
"""
return _val(var)
def _val(var, is_percent=False):
"""
Tries to determine the appropriate value of a particular variable that is
passed in. If the value is supposed to be a percentage, a whole integer
will be sought after and then turned into a floating point number between
0 and 1. If the value is supposed to be an integer, the variable is cast
into an integer.
"""
try:
if is_percent:
var = float(int(var.strip('%')) / 100.0)
else:
var = int(var)
except ValueError:
raise ValueError('invalid watermark parameter: ' + var)
return var
def reduce_opacity(img, opacity):
"""
Returns an image with reduced opacity.
"""
assert opacity >= 0 and opacity <= 1
if img.mode != 'RGBA':
img = img.convert('RGBA')
else:
img = img.copy()
alpha = img.split()[3]
alpha = ImageEnhance.Brightness(alpha).enhance(opacity)
img.putalpha(alpha)
return img
def determine_scale(scale, img, mark):
"""
Scales an image using a specified ratio or 'F'. If `scale` is 'F', the
image is scaled to be as big as possible to fit in `img` without falling off
the edges. Returns the scaled `mark`.
"""
if scale:
try:
scale = float(scale)
except (ValueError, TypeError):
pass
if type(scale) in (str, unicode) and scale.lower() == 'f':
# scale, but preserve the aspect ratio
scale = min(
float(img.size[0]) / mark.size[0],
float(img.size[1]) / mark.size[1]
)
elif type(scale) not in (float, int):
raise ValueError('Invalid scale value "%s"! Valid values are 1) "F" for ratio-preserving scaling and 2) floating-point numbers and integers greater than 0.' % (scale,))
# determine the new width and height
w = int(mark.size[0] * float(scale))
h = int(mark.size[1] * float(scale))
# apply the new width and height, and return the new `mark`
return (w, h)
else:
return mark.size
def determine_rotation(rotation, mark):
"""
Determines the number of degrees to rotate the watermark image.
"""
if (isinstance(rotation, str) or isinstance(rotation, unicode)) \
and rotation.lower() == 'r':
rotation = random.randint(0, 359)
else:
rotation = _int(rotation)
return rotation
def determine_position(position, img, mark):
"""
Options:
TL: top-left
TR: top-right
BR: bottom-right
BL: bottom-left
C: centered
R: random
X%xY%: relative positioning on both the X and Y axes
X%xY: relative positioning on the X axis and absolute positioning on the
Y axis
XxY%: absolute positioning on the X axis and relative positioning on the
Y axis
XxY: absolute positioning on both the X and Y axes
"""
max_left = max(img.size[0] - mark.size[0], 0)
max_top = max(img.size[1] - mark.size[1], 0)
if not position:
position = 'r'
if isinstance(position, tuple):
left, top = position
elif isinstance(position, str) or isinstance(position, unicode):
position = position.lower()
# corner positioning
if position in ['tl', 'tr', 'br', 'bl']:
if 't' in position:
top = 0
elif 'b' in position:
top = max_top
if 'l' in position:
left = 0
elif 'r' in position:
left = max_left
# center positioning
elif position == 'c':
left = int(max_left / 2)
top = int(max_top / 2)
# random positioning
elif position == 'r':
left = random.randint(0, max_left)
top = random.randint(0, max_top)
# relative or absolute positioning
elif 'x' in position:
left, top = position.split('x')
if '%' in left:
left = max_left * _percent(left)
else:
left = _int(left)
if '%' in top:
top = max_top * _percent(top)
else:
top = _int(top)
return (left, top)
def watermark(img, mark, position=(0, 0), opacity=1, scale=1.0, tile=False, greyscale=False, rotation=0, return_name=False, **kwargs):
"""
Adds a watermark to an image.
"""
if opacity < 1:
mark = reduce_opacity(mark, opacity)
if type(scale) != tuple:
scale = determine_scale(scale, img, mark)
mark = mark.resize(scale)
if greyscale and mark.mode != 'LA':
mark = mark.convert('LA')
rotation = determine_rotation(rotation, mark)
if rotation != 0:
# give some leeway for rotation overlapping
new_w = mark.size[0] * 1.5
new_h = mark.size[1] * 1.5
new_mark = Image.new('RGBA', (new_w, new_h), (0,0,0,0))
# center the watermark in the newly resized image
new_l = (new_w - mark.size[0]) / 2
new_t = (new_h - mark.size[1]) / 2
new_mark.paste(mark, (new_l, new_t))
mark = new_mark.rotate(rotation)
position = determine_position(position, img, mark)
if img.mode != 'RGBA':
img = img.convert('RGBA')
# make sure we have a tuple for a position now
assert isinstance(position, tuple), 'Invalid position "%s"!' % position
# create a transparent layer the size of the image and draw the
# watermark in that layer.
layer = Image.new('RGBA', img.size, (0,0,0,0))
if tile:
first_y = position[1] % mark.size[1] - mark.size[1]
first_x = position[0] % mark.size[0] - mark.size[0]
for y in range(first_y, img.size[1], mark.size[1]):
for x in range(first_x, img.size[0], mark.size[0]):
layer.paste(mark, (x, y))
else:
layer.paste(mark, position)
# composite the watermark with the layer
return Image.composite(layer, img, layer)
def test():
im = Image.open('test.png')
mark = Image.open('overlay.png')
watermark(im, mark,
tile=True,
opacity=0.5,
rotation=30).save('test1.png')
watermark(im, mark,
scale='F').save('test2.png')
watermark(im, mark,
position=(100, 100),
opacity=0.5,
greyscale=True,
rotation=-45).save('test3.png')
watermark(im, mark,
position='C',
tile=False,
opacity=0.2,
scale=2,
rotation=30).save('test4.png')
if __name__ == '__main__':
test()
|
{
"content_hash": "6115595da0dc0ce2ff2b64d209f196bd",
"timestamp": "",
"source": "github",
"line_count": 245,
"max_line_length": 181,
"avg_line_length": 29.4,
"alnum_prop": 0.5572678050812162,
"repo_name": "beholderrk/django-watermark",
"id": "3c472665c21e6162ac13550a3f371bacaa73ae65",
"size": "7203",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "watermarker/utils.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "20527"
}
],
"symlink_target": ""
}
|
from share.normalize import * # noqa
class AgentIdentifier(Parser):
uri = IRI(ctx)
class WorkIdentifier(Parser):
uri = IRI(ctx)
class Person(Parser):
name = ctx.full_name
identifiers = Map(
Delegate(AgentIdentifier),
ctx.orcid_id,
RunPython(lambda x: 'http://figshare.com/authors/{url_name}/{id}'.format(**x), ctx)
)
class Creator(Parser):
agent = Delegate(Person, ctx)
class Tag(Parser):
name = ctx
class ThroughTags(Parser):
tag = Delegate(Tag, ctx)
class CreativeWork(Parser):
schema = RunPython('get_schema', ctx.defined_type)
FIGSHARE_TYPES = ['figure', 'media', 'dataset', 'fileset', 'poster', 'paper', 'presentation', 'thesis', 'code', 'metadata']
title = ctx.title
description = ctx.description
is_deleted = RunPython(lambda x: not x, ctx.is_active)
date_published = ParseDate(ctx.published_date)
date_updated = ParseDate(ctx.modified_date)
free_to_read_type = IRI(ctx.license.url)
related_agents = Map(Delegate(Creator), ctx.authors)
identifiers = Map(Delegate(WorkIdentifier), ctx.doi, ctx.url, ctx.figshare_url)
tags = Map(
Delegate(ThroughTags),
ctx.tags,
Map(ctx.title, ctx.categories)
)
class Extra:
files = ctx.files
version = ctx.version
thumb = ctx.thumb
embargo_date = ctx.embargo_date
embargo_reason = ctx.embargo_reason
embargo_type = ctx.embargo_type
citation = ctx.citation
defined_type = ctx.defined_type
def get_schema(self, defined_type):
return {
'fileset': 'Project',
'figure': 'CreativeWork',
'poster': 'Poster',
'code': 'Software',
'dataset': 'DataSet',
}[self.FIGSHARE_TYPES[defined_type - 1]]
|
{
"content_hash": "fe469e03e84b965019fedcbf09974fb2",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 127,
"avg_line_length": 25.64788732394366,
"alnum_prop": 0.6161449752883031,
"repo_name": "zamattiac/SHARE",
"id": "154d77b3f2c2ae0aaa65f81f540f41c11081c7f9",
"size": "1821",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "providers/com/figshare/v2/normalizer.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "3690"
},
{
"name": "HTML",
"bytes": "1582"
},
{
"name": "Python",
"bytes": "1517988"
},
{
"name": "Shell",
"bytes": "633"
}
],
"symlink_target": ""
}
|
def getSecret(secret):
with open(secret, 'r') as f:
res = f.readline().strip('\'')
f.close()
return res
def train(args):
from watson_machine_learning_client import WatsonMachineLearningAPIClient
from minio import Minio
from urllib.parse import urlsplit
from pathlib import Path
import os,time
wml_train_code = args.train_code
wml_execution_command = args.execution_command.strip('\'')
wml_framework_name = args.framework if args.framework else 'tensorflow'
wml_framework_version = args.framework_version if args.framework_version else '1.15'
wml_runtime_name = args.runtime if args.runtime else 'python'
wml_runtime_version = args.runtime_version if args.runtime_version else '3.6'
wml_run_definition = args.run_definition if args.run_definition else 'python-tensorflow-definition'
wml_run_name = args.run_name if args.run_name else 'python-tensorflow-run'
wml_author_name = args.author_name if args.author_name else 'default-author'
wml_compute_name = args.compute_name if args.compute_name else 'k80'
wml_compute_nodes = args.compute_nodes if args.compute_nodes else '1'
wml_runtime_version_v4 = wml_framework_version + '-py' + wml_runtime_version
wml_compute_nodes_v4 = int(wml_compute_nodes)
# retrieve credentials
wml_url = getSecret("/app/secrets/wml_url")
wml_apikey = getSecret("/app/secrets/wml_apikey")
wml_instance_id = getSecret("/app/secrets/wml_instance_id")
wml_data_source_type = getSecret("/app/secrets/wml_data_source_type")
cos_endpoint = getSecret("/app/secrets/cos_endpoint")
cos_endpoint_parts = urlsplit(cos_endpoint)
if bool(cos_endpoint_parts.scheme):
cos_endpoint_hostname = cos_endpoint_parts.hostname
else:
cos_endpoint_hostname = cos_endpoint
cos_endpoint = 'https://' + cos_endpoint
cos_access_key = getSecret("/app/secrets/cos_access_key")
cos_secret_key = getSecret("/app/secrets/cos_secret_key")
cos_input_bucket = getSecret("/app/secrets/cos_input_bucket")
cos_output_bucket = getSecret("/app/secrets/cos_output_bucket")
# download model code
model_code = os.path.join('/app', wml_train_code)
cos = Minio(cos_endpoint_hostname,
access_key=cos_access_key,
secret_key=cos_secret_key,
secure=True)
cos.fget_object(cos_input_bucket, wml_train_code, model_code)
# set up the WML client
wml_credentials = {
"url": wml_url,
"instance_id": wml_instance_id,
"apikey": wml_apikey
}
client = WatsonMachineLearningAPIClient(wml_credentials)
# define the model
lib_meta = {
client.runtimes.LibraryMetaNames.NAME: wml_run_definition,
client.runtimes.LibraryMetaNames.VERSION: wml_framework_version,
client.runtimes.LibraryMetaNames.FILEPATH: model_code,
client.runtimes.LibraryMetaNames.PLATFORM: {"name": wml_framework_name, "versions": [wml_framework_version]}
}
# check exisiting library
library_details = client.runtimes.get_library_details()
for library_detail in library_details['resources']:
if library_detail['entity']['name'] == wml_run_definition:
# Delete library if exist because we cannot update model_code
uid = client.runtimes.get_library_uid(library_detail)
client.repository.delete(uid)
break
custom_library_details = client.runtimes.store_library(lib_meta)
custom_library_uid = client.runtimes.get_library_uid(custom_library_details)
# create a pipeline with the model definitions included
doc = {
"doc_type": "pipeline",
"version": "2.0",
"primary_pipeline": wml_framework_name,
"pipelines": [{
"id": wml_framework_name,
"runtime_ref": "hybrid",
"nodes": [{
"id": "training",
"type": "model_node",
"op": "dl_train",
"runtime_ref": wml_run_name,
"inputs": [],
"outputs": [],
"parameters": {
"name": "tf-mnist",
"description": wml_run_definition,
"command": wml_execution_command,
"training_lib_href": "/v4/libraries/"+custom_library_uid,
"compute": {
"name": wml_compute_name,
"nodes": wml_compute_nodes_v4
}
}
}]
}],
"runtimes": [{
"id": wml_run_name,
"name": wml_framework_name,
"version": wml_runtime_version_v4
}]
}
metadata = {
client.repository.PipelineMetaNames.NAME: wml_run_name,
client.repository.PipelineMetaNames.DOCUMENT: doc
}
pipeline_id = client.pipelines.get_uid(client.repository.store_pipeline(meta_props=metadata))
client.pipelines.get_details(pipeline_id)
# start the training run for v4
metadata = {
client.training.ConfigurationMetaNames.TRAINING_RESULTS_REFERENCE: {
"name": "training-results-reference_name",
"connection": {
"endpoint_url": cos_endpoint,
"access_key_id": cos_access_key,
"secret_access_key": cos_secret_key
},
"location": {
"bucket": cos_output_bucket
},
"type": wml_data_source_type
},
client.training.ConfigurationMetaNames.TRAINING_DATA_REFERENCES:[{
"name": "training_input_data",
"type": wml_data_source_type,
"connection": {
"endpoint_url": cos_endpoint,
"access_key_id": cos_access_key,
"secret_access_key": cos_secret_key
},
"location": {
"bucket": cos_input_bucket
}
}],
client.training.ConfigurationMetaNames.PIPELINE_UID: pipeline_id
}
training_id = client.training.get_uid(client.training.run(meta_props=metadata))
print("training_id", client.training.get_details(training_id))
print("get status", client.training.get_status(training_id))
# for v4
run_details = client.training.get_details(training_id)
run_uid = training_id
# print logs
client.training.monitor_logs(run_uid)
client.training.monitor_metrics(run_uid)
# checking the result
status = client.training.get_status(run_uid)
print("status: ", status)
while status['state'] != 'completed':
time.sleep(20)
status = client.training.get_status(run_uid)
print(status)
Path(args.output_run_uid_path).parent.mkdir(parents=True, exist_ok=True)
Path(args.output_run_uid_path).write_text(run_uid)
# Get training details
training_details = client.training.get_details(run_uid)
print("training_details", training_details)
Path(args.output_training_uid_path).parent.mkdir(parents=True, exist_ok=True)
Path(args.output_training_uid_path).write_text(run_uid)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--train-code', type=str, required=True)
parser.add_argument('--execution-command', type=str, required=True)
parser.add_argument('--framework', type=str)
parser.add_argument('--framework-version', type=str)
parser.add_argument('--runtime', type=str)
parser.add_argument('--runtime-version', type=str)
parser.add_argument('--run-definition', type=str)
parser.add_argument('--run-name', type=str)
parser.add_argument('--author-name', type=str)
parser.add_argument('--config', type=str, default="secret_name")
parser.add_argument('--compute-name', type=str)
parser.add_argument('--compute-nodes', type=str)
parser.add_argument('--output-run-uid-path', type=str, default="/tmp/run_uid")
parser.add_argument('--output-training-uid-path', type=str, default="/tmp/training_uid")
args = parser.parse_args()
# Check secret name is not empty
if (not args.config):
print("Secret for this pipeline is not properly created, exiting with status 1...")
exit(1)
train(args)
|
{
"content_hash": "d1dc9a293418f92e324cd7ec84e8ddee",
"timestamp": "",
"source": "github",
"line_count": 205,
"max_line_length": 116,
"avg_line_length": 40.66829268292683,
"alnum_prop": 0.6186877773779537,
"repo_name": "kubeflow/pipelines",
"id": "64c96f8a1ac2cde0c34458eb54f98de987e17a3e",
"size": "8925",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "components/ibm-components/watson/train/src/wml-train.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "799"
},
{
"name": "CSS",
"bytes": "2171"
},
{
"name": "Dockerfile",
"bytes": "49331"
},
{
"name": "Go",
"bytes": "1903937"
},
{
"name": "HTML",
"bytes": "3656"
},
{
"name": "JavaScript",
"bytes": "544297"
},
{
"name": "Jinja",
"bytes": "938"
},
{
"name": "Jupyter Notebook",
"bytes": "359548"
},
{
"name": "Makefile",
"bytes": "22164"
},
{
"name": "Mustache",
"bytes": "23652"
},
{
"name": "PowerShell",
"bytes": "3194"
},
{
"name": "Python",
"bytes": "5684887"
},
{
"name": "Shell",
"bytes": "264595"
},
{
"name": "Smarty",
"bytes": "8295"
},
{
"name": "Starlark",
"bytes": "553"
},
{
"name": "TypeScript",
"bytes": "4294958"
}
],
"symlink_target": ""
}
|
from lxml import etree
import mock
from six.moves import urllib
from cinder.tests.unit.volume.drivers.netapp.dataontap import fakes as fake
import cinder.volume.drivers.netapp.dataontap.client.api as netapp_api
FAKE_VOL_XML = b"""<volume-info xmlns='http://www.netapp.com/filer/admin'>
<name>open123</name>
<state>online</state>
<size-total>0</size-total>
<size-used>0</size-used>
<size-available>0</size-available>
<is-inconsistent>false</is-inconsistent>
<is-invalid>false</is-invalid>
</volume-info>"""
FAKE_XML1 = b"""<options>\
<test1>abc</test1>\
<test2>abc</test2>\
</options>"""
FAKE_XML2 = b"""<root><options>somecontent</options></root>"""
FAKE_NA_ELEMENT = netapp_api.NaElement(etree.XML(FAKE_VOL_XML))
FAKE_INVOKE_DATA = 'somecontent'
FAKE_XML_STR = 'abc'
FAKE_API_NAME = 'volume-get-iter'
FAKE_API_NAME_ELEMENT = netapp_api.NaElement(FAKE_API_NAME)
FAKE_NA_SERVER_STR = '127.0.0.1'
FAKE_NA_SERVER = netapp_api.NaServer(FAKE_NA_SERVER_STR)
FAKE_NA_SERVER_API_1_5 = netapp_api.NaServer(FAKE_NA_SERVER_STR)
FAKE_NA_SERVER_API_1_5.set_vfiler('filer')
FAKE_NA_SERVER_API_1_5.set_api_version(1, 5)
FAKE_NA_SERVER_API_1_14 = netapp_api.NaServer(FAKE_NA_SERVER_STR)
FAKE_NA_SERVER_API_1_14.set_vserver('server')
FAKE_NA_SERVER_API_1_14.set_api_version(1, 14)
FAKE_NA_SERVER_API_1_20 = netapp_api.NaServer(FAKE_NA_SERVER_STR)
FAKE_NA_SERVER_API_1_20.set_vfiler('filer')
FAKE_NA_SERVER_API_1_20.set_vserver('server')
FAKE_NA_SERVER_API_1_20.set_api_version(1, 20)
VOLUME_VSERVER_NAME = 'fake_vserver'
VOLUME_NAMES = ('volume1', 'volume2')
VOLUME_NAME = 'volume1'
FAKE_QUERY = {'volume-attributes': None}
FAKE_DES_ATTR = {'volume-attributes': ['volume-id-attributes',
'volume-space-attributes',
'volume-state-attributes',
'volume-qos-attributes']}
FAKE_CALL_ARGS_LIST = [mock.call(80), mock.call(8088), mock.call(443),
mock.call(8488)]
FAKE_RESULT_API_ERR_REASON = netapp_api.NaElement('result')
FAKE_RESULT_API_ERR_REASON.add_attr('errno', '000')
FAKE_RESULT_API_ERR_REASON.add_attr('reason', 'fake_reason')
FAKE_RESULT_API_ERRNO_INVALID = netapp_api.NaElement('result')
FAKE_RESULT_API_ERRNO_INVALID.add_attr('errno', '000')
FAKE_RESULT_API_ERRNO_VALID = netapp_api.NaElement('result')
FAKE_RESULT_API_ERRNO_VALID.add_attr('errno', '14956')
FAKE_RESULT_SUCCESS = netapp_api.NaElement('result')
FAKE_RESULT_SUCCESS.add_attr('status', 'passed')
FAKE_HTTP_OPENER = urllib.request.build_opener()
INITIATOR_IQN = 'iqn.2015-06.com.netapp:fake_iqn'
USER_NAME = 'fake_user'
PASSWORD = 'passw0rd'
ENCRYPTED_PASSWORD = 'B351F145DA527445'
NO_RECORDS_RESPONSE = etree.XML("""
<results status="passed">
<num-records>0</num-records>
</results>
""")
VOLUME_GET_NAME_RESPONSE = etree.XML("""
<results status="passed">
<attributes-list>
<volume-attributes>
<volume-id-attributes>
<name>%(volume)s</name>
<owning-vserver-name>%(vserver)s</owning-vserver-name>
</volume-id-attributes>
</volume-attributes>
</attributes-list>
<num-records>1</num-records>
</results>
""" % {'volume': VOLUME_NAMES[0], 'vserver': VOLUME_VSERVER_NAME})
INVALID_GET_ITER_RESPONSE_NO_ATTRIBUTES = etree.XML("""
<results status="passed">
<num-records>1</num-records>
<next-tag>fake_tag</next-tag>
</results>
""")
INVALID_GET_ITER_RESPONSE_NO_RECORDS = etree.XML("""
<results status="passed">
<attributes-list/>
<next-tag>fake_tag</next-tag>
</results>
""")
INVALID_RESPONSE = etree.XML("""
<results status="passed">
<num-records>1</num-records>
</results>
""")
GET_OPERATIONAL_LIF_ADDRESSES_RESPONSE = etree.XML("""
<results status="passed">
<num-records>2</num-records>
<attributes-list>
<net-interface-info>
<address>%(address1)s</address>
</net-interface-info>
<net-interface-info>
<address>%(address2)s</address>
</net-interface-info>
</attributes-list>
</results>
""" % {"address1": "1.2.3.4", "address2": "99.98.97.96"})
QOS_POLICY_GROUP_GET_ITER_RESPONSE = etree.XML("""
<results status="passed">
<attributes-list>
<qos-policy-group-info>
<max-throughput>30KB/S</max-throughput>
<num-workloads>1</num-workloads>
<pgid>53</pgid>
<policy-group>fake_qos_policy_group_name</policy-group>
<policy-group-class>user_defined</policy-group-class>
<uuid>12496028-b641-11e5-abbd-123478563412</uuid>
<vserver>cinder-iscsi</vserver>
</qos-policy-group-info>
</attributes-list>
<num-records>1</num-records>
</results>
""")
VOLUME_LIST_INFO_RESPONSE = etree.XML("""
<results status="passed">
<volumes>
<volume-info>
<name>vol0</name>
<block-type>64_bit</block-type>
<state>online</state>
<size-total>1441193750528</size-total>
<size-used>3161096192</size-used>
<size-available>1438032654336</size-available>
<percentage-used>0</percentage-used>
<owning-vfiler>vfiler0</owning-vfiler>
<containing-aggregate>aggr0</containing-aggregate>
<space-reserve>volume</space-reserve>
<space-reserve-enabled>true</space-reserve-enabled>
<is-inconsistent>false</is-inconsistent>
<is-unrecoverable>false</is-unrecoverable>
<is-invalid>false</is-invalid>
</volume-info>
<volume-info>
<name>vol1</name>
<block-type>64_bit</block-type>
<state>online</state>
<size-total>1441193750528</size-total>
<size-used>3161096192</size-used>
<size-available>1438032654336</size-available>
<percentage-used>0</percentage-used>
<owning-vfiler>vfiler0</owning-vfiler>
<containing-aggregate>aggr0</containing-aggregate>
<space-reserve>volume</space-reserve>
<space-reserve-enabled>true</space-reserve-enabled>
<is-inconsistent>false</is-inconsistent>
<is-unrecoverable>false</is-unrecoverable>
<is-invalid>false</is-invalid>
</volume-info>
<volume-info>
<name>vol2</name>
<block-type>64_bit</block-type>
<state>offline</state>
<size-total>1441193750528</size-total>
<size-used>3161096192</size-used>
<size-available>1438032654336</size-available>
<percentage-used>0</percentage-used>
<owning-vfiler>vfiler0</owning-vfiler>
<containing-aggregate>aggr0</containing-aggregate>
<space-reserve>volume</space-reserve>
<space-reserve-enabled>true</space-reserve-enabled>
<is-inconsistent>false</is-inconsistent>
<is-unrecoverable>false</is-unrecoverable>
<is-invalid>false</is-invalid>
</volume-info>
<volume-info>
<name>vol3</name>
<block-type>64_bit</block-type>
<state>online</state>
<size-total>1441193750528</size-total>
<size-used>3161096192</size-used>
<size-available>1438032654336</size-available>
<percentage-used>0</percentage-used>
<owning-vfiler>vfiler0</owning-vfiler>
<containing-aggregate>aggr0</containing-aggregate>
<space-reserve>volume</space-reserve>
<space-reserve-enabled>true</space-reserve-enabled>
<is-inconsistent>false</is-inconsistent>
<is-unrecoverable>false</is-unrecoverable>
<is-invalid>false</is-invalid>
</volume-info>
</volumes>
</results>
""")
SNAPSHOT_INFO_FOR_PRESENT_NOT_BUSY_SNAPSHOT_CMODE = etree.XML("""
<results status="passed">
<attributes-list>
<snapshot-info>
<name>%(snapshot_name)s</name>
<busy>False</busy>
<volume>%(vol_name)s</volume>
<snapshot-instance-uuid>abcd-ef01-2345-6789</snapshot-instance-uuid>
</snapshot-info>
</attributes-list>
<num-records>1</num-records>
</results>
""" % {
'snapshot_name': fake.SNAPSHOT['name'],
'vol_name': fake.SNAPSHOT['volume_id'],
})
SNAPSHOT_INFO_FOR_PRESENT_BUSY_SNAPSHOT_CMODE = etree.XML("""
<results status="passed">
<attributes-list>
<snapshot-info>
<name>%(snapshot_name)s</name>
<busy>True</busy>
<volume>%(vol_name)s</volume>
</snapshot-info>
</attributes-list>
<num-records>1</num-records>
</results>
""" % {
'snapshot_name': fake.SNAPSHOT['name'],
'vol_name': fake.SNAPSHOT['volume_id'],
})
SNAPSHOT_INFO_FOR_PRESENT_NOT_BUSY_SNAPSHOT_7MODE = etree.XML("""
<results status="passed">
<snapshots>
<snapshot-info>
<name>%(snapshot_name)s</name>
<busy>False</busy>
<volume>%(vol_name)s</volume>
<snapshot-instance-uuid>abcd-ef01-2345-6789</snapshot-instance-uuid>
</snapshot-info>
</snapshots>
</results>
""" % {
'snapshot_name': fake.SNAPSHOT['name'],
'vol_name': fake.SNAPSHOT['volume_id'],
})
SNAPSHOT_INFO_MARKED_FOR_DELETE_SNAPSHOT_7MODE = etree.XML("""
<results status="passed">
<snapshots>
<snapshot-info>
<name>deleted_cinder_%(snapshot_name)s</name>
<busy>False</busy>
<volume>%(vol_name)s</volume>
<snapshot-instance-uuid>abcd-ef01-2345-6789</snapshot-instance-uuid>
</snapshot-info>
</snapshots>
</results>
""" % {
'snapshot_name': fake.SNAPSHOT['name'],
'vol_name': fake.SNAPSHOT['volume_id'],
})
SNAPSHOT_INFO_MARKED_FOR_DELETE_SNAPSHOT_7MODE_BUSY = etree.XML("""
<results status="passed">
<snapshots>
<snapshot-info>
<name>deleted_cinder_busy_snapshot</name>
<busy>True</busy>
<volume>%(vol_name)s</volume>
<snapshot-instance-uuid>abcd-ef01-2345-6789</snapshot-instance-uuid>
</snapshot-info>
</snapshots>
</results>
""" % {
'snapshot_name': fake.SNAPSHOT['name'],
'vol_name': fake.SNAPSHOT['volume_id'],
})
SNAPSHOT_INFO_FOR_PRESENT_BUSY_SNAPSHOT_7MODE = etree.XML("""
<results status="passed">
<snapshots>
<snapshot-info>
<name>%(snapshot_name)s</name>
<busy>True</busy>
<volume>%(vol_name)s</volume>
</snapshot-info>
</snapshots>
</results>
""" % {
'snapshot_name': fake.SNAPSHOT['name'],
'vol_name': fake.SNAPSHOT['volume_id'],
})
SNAPSHOT_NOT_PRESENT_7MODE = etree.XML("""
<results status="passed">
<snapshots>
<snapshot-info>
<name>NOT_THE_RIGHT_SNAPSHOT</name>
<busy>false</busy>
<volume>%(vol_name)s</volume>
</snapshot-info>
</snapshots>
</results>
""" % {'vol_name': fake.SNAPSHOT['volume_id']})
NODE_NAME = 'fake_node1'
NODE_NAMES = ('fake_node1', 'fake_node2')
VOLUME_AGGREGATE_NAME = 'fake_aggr1'
VOLUME_AGGREGATE_NAMES = ('fake_aggr1', 'fake_aggr2')
AGGR_GET_ITER_RESPONSE = etree.XML("""
<results status="passed">
<attributes-list>
<aggr-attributes>
<aggr-64bit-upgrade-attributes>
<aggr-status-attributes>
<is-64-bit-upgrade-in-progress>false</is-64-bit-upgrade-in-progress>
</aggr-status-attributes>
</aggr-64bit-upgrade-attributes>
<aggr-fs-attributes>
<block-type>64_bit</block-type>
<fsid>1758646411</fsid>
<type>aggr</type>
</aggr-fs-attributes>
<aggr-inode-attributes>
<files-private-used>512</files-private-used>
<files-total>30384</files-total>
<files-used>96</files-used>
<inodefile-private-capacity>30384</inodefile-private-capacity>
<inodefile-public-capacity>30384</inodefile-public-capacity>
<maxfiles-available>30384</maxfiles-available>
<maxfiles-possible>243191</maxfiles-possible>
<maxfiles-used>96</maxfiles-used>
<percent-inode-used-capacity>0</percent-inode-used-capacity>
</aggr-inode-attributes>
<aggr-ownership-attributes>
<home-id>4082368507</home-id>
<home-name>cluster3-01</home-name>
<owner-id>4082368507</owner-id>
<owner-name>cluster3-01</owner-name>
</aggr-ownership-attributes>
<aggr-performance-attributes>
<free-space-realloc>off</free-space-realloc>
<max-write-alloc-blocks>0</max-write-alloc-blocks>
</aggr-performance-attributes>
<aggr-raid-attributes>
<checksum-status>active</checksum-status>
<checksum-style>block</checksum-style>
<disk-count>3</disk-count>
<ha-policy>cfo</ha-policy>
<has-local-root>true</has-local-root>
<has-partner-root>false</has-partner-root>
<is-checksum-enabled>true</is-checksum-enabled>
<is-hybrid>false</is-hybrid>
<is-hybrid-enabled>false</is-hybrid-enabled>
<is-inconsistent>false</is-inconsistent>
<mirror-status>unmirrored</mirror-status>
<mount-state>online</mount-state>
<plex-count>1</plex-count>
<plexes>
<plex-attributes>
<is-online>true</is-online>
<is-resyncing>false</is-resyncing>
<plex-name>/%(aggr1)s/plex0</plex-name>
<plex-status>normal,active</plex-status>
<raidgroups>
<raidgroup-attributes>
<checksum-style>block</checksum-style>
<is-cache-tier>false</is-cache-tier>
<is-recomputing-parity>false</is-recomputing-parity>
<is-reconstructing>false</is-reconstructing>
<raidgroup-name>/%(aggr1)s/plex0/rg0</raidgroup-name>
<recomputing-parity-percentage>0</recomputing-parity-percentage>
<reconstruction-percentage>0</reconstruction-percentage>
</raidgroup-attributes>
</raidgroups>
<resyncing-percentage>0</resyncing-percentage>
</plex-attributes>
</plexes>
<raid-lost-write-state>on</raid-lost-write-state>
<raid-size>16</raid-size>
<raid-status>raid_dp, normal</raid-status>
<raid-type>raid_dp</raid-type>
<state>online</state>
</aggr-raid-attributes>
<aggr-snaplock-attributes>
<is-snaplock>false</is-snaplock>
</aggr-snaplock-attributes>
<aggr-snapshot-attributes>
<files-total>0</files-total>
<files-used>0</files-used>
<is-snapshot-auto-create-enabled>true</is-snapshot-auto-create-enabled>
<is-snapshot-auto-delete-enabled>true</is-snapshot-auto-delete-enabled>
<maxfiles-available>0</maxfiles-available>
<maxfiles-possible>0</maxfiles-possible>
<maxfiles-used>0</maxfiles-used>
<percent-inode-used-capacity>0</percent-inode-used-capacity>
<percent-used-capacity>0</percent-used-capacity>
<size-available>0</size-available>
<size-total>0</size-total>
<size-used>0</size-used>
<snapshot-reserve-percent>0</snapshot-reserve-percent>
</aggr-snapshot-attributes>
<aggr-space-attributes>
<aggregate-metadata>245760</aggregate-metadata>
<hybrid-cache-size-total>0</hybrid-cache-size-total>
<percent-used-capacity>95</percent-used-capacity>
<size-available>45670400</size-available>
<size-total>943718400</size-total>
<size-used>898048000</size-used>
<total-reserved-space>0</total-reserved-space>
<used-including-snapshot-reserve>898048000</used-including-snapshot-reserve>
<volume-footprints>897802240</volume-footprints>
</aggr-space-attributes>
<aggr-volume-count-attributes>
<flexvol-count>1</flexvol-count>
<flexvol-count-collective>0</flexvol-count-collective>
<flexvol-count-striped>0</flexvol-count-striped>
</aggr-volume-count-attributes>
<aggregate-name>%(aggr1)s</aggregate-name>
<aggregate-uuid>15863632-ea49-49a8-9c88-2bd2d57c6d7a</aggregate-uuid>
<nodes>
<node-name>cluster3-01</node-name>
</nodes>
<striping-type>unknown</striping-type>
</aggr-attributes>
<aggr-attributes>
<aggr-64bit-upgrade-attributes>
<aggr-status-attributes>
<is-64-bit-upgrade-in-progress>false</is-64-bit-upgrade-in-progress>
</aggr-status-attributes>
</aggr-64bit-upgrade-attributes>
<aggr-fs-attributes>
<block-type>64_bit</block-type>
<fsid>706602229</fsid>
<type>aggr</type>
</aggr-fs-attributes>
<aggr-inode-attributes>
<files-private-used>528</files-private-used>
<files-total>31142</files-total>
<files-used>96</files-used>
<inodefile-private-capacity>31142</inodefile-private-capacity>
<inodefile-public-capacity>31142</inodefile-public-capacity>
<maxfiles-available>31142</maxfiles-available>
<maxfiles-possible>1945584</maxfiles-possible>
<maxfiles-used>96</maxfiles-used>
<percent-inode-used-capacity>0</percent-inode-used-capacity>
</aggr-inode-attributes>
<aggr-ownership-attributes>
<home-id>4082368507</home-id>
<home-name>cluster3-01</home-name>
<owner-id>4082368507</owner-id>
<owner-name>cluster3-01</owner-name>
</aggr-ownership-attributes>
<aggr-performance-attributes>
<free-space-realloc>off</free-space-realloc>
<max-write-alloc-blocks>0</max-write-alloc-blocks>
</aggr-performance-attributes>
<aggr-raid-attributes>
<checksum-status>active</checksum-status>
<checksum-style>block</checksum-style>
<disk-count>10</disk-count>
<ha-policy>sfo</ha-policy>
<has-local-root>false</has-local-root>
<has-partner-root>false</has-partner-root>
<is-checksum-enabled>true</is-checksum-enabled>
<is-hybrid>false</is-hybrid>
<is-hybrid-enabled>false</is-hybrid-enabled>
<is-inconsistent>false</is-inconsistent>
<mirror-status>unmirrored</mirror-status>
<mount-state>online</mount-state>
<plex-count>1</plex-count>
<plexes>
<plex-attributes>
<is-online>true</is-online>
<is-resyncing>false</is-resyncing>
<plex-name>/%(aggr2)s/plex0</plex-name>
<plex-status>normal,active</plex-status>
<raidgroups>
<raidgroup-attributes>
<checksum-style>block</checksum-style>
<is-cache-tier>false</is-cache-tier>
<is-recomputing-parity>false</is-recomputing-parity>
<is-reconstructing>false</is-reconstructing>
<raidgroup-name>/%(aggr2)s/plex0/rg0</raidgroup-name>
<recomputing-parity-percentage>0</recomputing-parity-percentage>
<reconstruction-percentage>0</reconstruction-percentage>
</raidgroup-attributes>
<raidgroup-attributes>
<checksum-style>block</checksum-style>
<is-cache-tier>false</is-cache-tier>
<is-recomputing-parity>false</is-recomputing-parity>
<is-reconstructing>false</is-reconstructing>
<raidgroup-name>/%(aggr2)s/plex0/rg1</raidgroup-name>
<recomputing-parity-percentage>0</recomputing-parity-percentage>
<reconstruction-percentage>0</reconstruction-percentage>
</raidgroup-attributes>
</raidgroups>
<resyncing-percentage>0</resyncing-percentage>
</plex-attributes>
</plexes>
<raid-lost-write-state>on</raid-lost-write-state>
<raid-size>8</raid-size>
<raid-status>raid4, normal</raid-status>
<raid-type>raid4</raid-type>
<state>online</state>
</aggr-raid-attributes>
<aggr-snaplock-attributes>
<is-snaplock>false</is-snaplock>
</aggr-snaplock-attributes>
<aggr-snapshot-attributes>
<files-total>0</files-total>
<files-used>0</files-used>
<is-snapshot-auto-create-enabled>true</is-snapshot-auto-create-enabled>
<is-snapshot-auto-delete-enabled>true</is-snapshot-auto-delete-enabled>
<maxfiles-available>0</maxfiles-available>
<maxfiles-possible>0</maxfiles-possible>
<maxfiles-used>0</maxfiles-used>
<percent-inode-used-capacity>0</percent-inode-used-capacity>
<percent-used-capacity>0</percent-used-capacity>
<size-available>0</size-available>
<size-total>0</size-total>
<size-used>0</size-used>
<snapshot-reserve-percent>0</snapshot-reserve-percent>
</aggr-snapshot-attributes>
<aggr-space-attributes>
<aggregate-metadata>425984</aggregate-metadata>
<hybrid-cache-size-total>0</hybrid-cache-size-total>
<percent-used-capacity>15</percent-used-capacity>
<size-available>6448431104</size-available>
<size-total>7549747200</size-total>
<size-used>1101316096</size-used>
<total-reserved-space>0</total-reserved-space>
<used-including-snapshot-reserve>1101316096</used-including-snapshot-reserve>
<volume-footprints>1100890112</volume-footprints>
</aggr-space-attributes>
<aggr-volume-count-attributes>
<flexvol-count>2</flexvol-count>
<flexvol-count-collective>0</flexvol-count-collective>
<flexvol-count-striped>0</flexvol-count-striped>
</aggr-volume-count-attributes>
<aggregate-name>%(aggr2)s</aggregate-name>
<aggregate-uuid>2a741934-1aaf-42dd-93ca-aaf231be108a</aggregate-uuid>
<nodes>
<node-name>cluster3-01</node-name>
</nodes>
<striping-type>not_striped</striping-type>
</aggr-attributes>
</attributes-list>
<num-records>2</num-records>
</results>
""" % {
'aggr1': VOLUME_AGGREGATE_NAMES[0],
'aggr2': VOLUME_AGGREGATE_NAMES[1],
})
AGGR_GET_SPACE_RESPONSE = etree.XML("""
<results status="passed">
<attributes-list>
<aggr-attributes>
<aggr-raid-attributes>
<plexes>
<plex-attributes>
<plex-name>/%(aggr1)s/plex0</plex-name>
<raidgroups>
<raidgroup-attributes>
<raidgroup-name>/%(aggr1)s/plex0/rg0</raidgroup-name>
</raidgroup-attributes>
</raidgroups>
</plex-attributes>
</plexes>
</aggr-raid-attributes>
<aggr-space-attributes>
<size-available>45670400</size-available>
<size-total>943718400</size-total>
<size-used>898048000</size-used>
</aggr-space-attributes>
<aggregate-name>%(aggr1)s</aggregate-name>
</aggr-attributes>
<aggr-attributes>
<aggr-raid-attributes>
<plexes>
<plex-attributes>
<plex-name>/%(aggr2)s/plex0</plex-name>
<raidgroups>
<raidgroup-attributes>
<raidgroup-name>/%(aggr2)s/plex0/rg0</raidgroup-name>
</raidgroup-attributes>
<raidgroup-attributes>
<raidgroup-name>/%(aggr2)s/plex0/rg1</raidgroup-name>
</raidgroup-attributes>
</raidgroups>
</plex-attributes>
</plexes>
</aggr-raid-attributes>
<aggr-space-attributes>
<size-available>4267659264</size-available>
<size-total>7549747200</size-total>
<size-used>3282087936</size-used>
</aggr-space-attributes>
<aggregate-name>%(aggr2)s</aggregate-name>
</aggr-attributes>
</attributes-list>
<num-records>2</num-records>
</results>
""" % {
'aggr1': VOLUME_AGGREGATE_NAMES[0],
'aggr2': VOLUME_AGGREGATE_NAMES[1],
})
AGGR_GET_NODE_RESPONSE = etree.XML("""
<results status="passed">
<attributes-list>
<aggr-attributes>
<aggr-ownership-attributes>
<home-name>%(node)s</home-name>
</aggr-ownership-attributes>
<aggregate-name>%(aggr)s</aggregate-name>
</aggr-attributes>
</attributes-list>
<num-records>1</num-records>
</results>
""" % {
'aggr': VOLUME_AGGREGATE_NAME,
'node': NODE_NAME,
})
AGGREGATE_RAID_TYPE = 'raid_dp'
AGGR_GET_ITER_SSC_RESPONSE = etree.XML("""
<results status="passed">
<attributes-list>
<aggr-attributes>
<aggr-raid-attributes>
<plexes>
<plex-attributes>
<plex-name>/%(aggr)s/plex0</plex-name>
<raidgroups>
<raidgroup-attributes>
<raidgroup-name>/%(aggr)s/plex0/rg0</raidgroup-name>
</raidgroup-attributes>
</raidgroups>
</plex-attributes>
</plexes>
<raid-type>%(raid)s</raid-type>
<is-hybrid>true</is-hybrid>
</aggr-raid-attributes>
<aggregate-name>%(aggr)s</aggregate-name>
</aggr-attributes>
</attributes-list>
<num-records>1</num-records>
</results>
""" % {'aggr': VOLUME_AGGREGATE_NAME, 'raid': AGGREGATE_RAID_TYPE})
AGGR_INFO_SSC = {
'name': VOLUME_AGGREGATE_NAME,
'raid-type': AGGREGATE_RAID_TYPE,
'is-hybrid': True,
}
AGGR_SIZE_TOTAL = 107374182400
AGGR_SIZE_AVAILABLE = 59055800320
AGGR_USED_PERCENT = 45
AGGR_GET_ITER_CAPACITY_RESPONSE = etree.XML("""
<results status="passed">
<attributes-list>
<aggr-attributes>
<aggr-space-attributes>
<percent-used-capacity>%(used)s</percent-used-capacity>
<size-total>%(total_size)s</size-total>
<size-available>%(available_size)s</size-available>
</aggr-space-attributes>
<aggregate-name>%(aggr)s</aggregate-name>
</aggr-attributes>
</attributes-list>
<num-records>1</num-records>
</results>
""" % {
'aggr': VOLUME_AGGREGATE_NAME,
'used': AGGR_USED_PERCENT,
'available_size': AGGR_SIZE_AVAILABLE,
'total_size': AGGR_SIZE_TOTAL,
})
VOLUME_SIZE_TOTAL = 19922944
VOLUME_SIZE_AVAILABLE = 19791872
VOLUME_GET_ITER_CAPACITY_RESPONSE = etree.XML("""
<results status="passed">
<num-records>1</num-records>
<attributes-list>
<volume-attributes>
<volume-space-attributes>
<size-available>%(available_size)s</size-available>
<size-total>%(total_size)s</size-total>
</volume-space-attributes>
</volume-attributes>
</attributes-list>
</results>
""" % {
'available_size': VOLUME_SIZE_AVAILABLE,
'total_size': VOLUME_SIZE_TOTAL,
})
VOLUME_GET_ITER_LIST_RESPONSE = etree.XML("""
<results status="passed">
<attributes-list>
<volume-attributes>
<volume-id-attributes>
<name>%(volume1)s</name>
<owning-vserver-name>%(vserver)s</owning-vserver-name>
</volume-id-attributes>
</volume-attributes>
<volume-attributes>
<volume-id-attributes>
<name>%(volume2)s</name>
<owning-vserver-name>%(vserver)s</owning-vserver-name>
</volume-id-attributes>
</volume-attributes>
</attributes-list>
<num-records>2</num-records>
</results>
""" % {
'volume1': VOLUME_NAMES[0],
'volume2': VOLUME_NAMES[1],
'vserver': VOLUME_VSERVER_NAME,
})
VOLUME_GET_ITER_SSC_RESPONSE = etree.XML("""
<results status="passed">
<attributes-list>
<volume-attributes>
<volume-id-attributes>
<containing-aggregate-name>%(aggr)s</containing-aggregate-name>
<junction-path>/%(volume)s</junction-path>
<name>%(volume)s</name>
<owning-vserver-name>%(vserver)s</owning-vserver-name>
<type>rw</type>
</volume-id-attributes>
<volume-mirror-attributes>
<is-data-protection-mirror>false</is-data-protection-mirror>
<is-replica-volume>false</is-replica-volume>
</volume-mirror-attributes>
<volume-qos-attributes>
<policy-group-name>fake_qos_policy_group_name</policy-group-name>
</volume-qos-attributes>
<volume-space-attributes>
<is-space-guarantee-enabled>true</is-space-guarantee-enabled>
<space-guarantee>none</space-guarantee>
<percentage-snapshot-reserve>5</percentage-snapshot-reserve>
<size>12345</size>
</volume-space-attributes>
<volume-snapshot-attributes>
<snapshot-policy>default</snapshot-policy>
</volume-snapshot-attributes>
<volume-language-attributes>
<language-code>en_US</language-code>
</volume-language-attributes>
</volume-attributes>
</attributes-list>
<num-records>1</num-records>
</results>
""" % {
'aggr': VOLUME_AGGREGATE_NAMES[0],
'volume': VOLUME_NAMES[0],
'vserver': VOLUME_VSERVER_NAME,
})
VOLUME_INFO_SSC = {
'name': VOLUME_NAMES[0],
'vserver': VOLUME_VSERVER_NAME,
'junction-path': '/%s' % VOLUME_NAMES[0],
'aggregate': VOLUME_AGGREGATE_NAMES[0],
'space-guarantee-enabled': True,
'language': 'en_US',
'percentage-snapshot-reserve': '5',
'snapshot-policy': 'default',
'type': 'rw',
'size': '12345',
'space-guarantee': 'none',
'qos-policy-group': 'fake_qos_policy_group_name',
}
SIS_GET_ITER_SSC_RESPONSE = etree.XML("""
<results status="passed">
<attributes-list>
<sis-status-info>
<is-compression-enabled>false</is-compression-enabled>
<state>enabled</state>
<logical-data-size>211106232532992</logical-data-size>
<logical-data-limit>703687441776640</logical-data-limit>
</sis-status-info>
</attributes-list>
<num-records>1</num-records>
</results>
""")
VOLUME_DEDUPE_INFO_SSC = {
'compression': False,
'dedupe': True,
'logical-data-size': 211106232532992,
'logical-data-limit': 703687441776640,
}
SIS_GET_ITER_SSC_NO_LOGICAL_DATA_RESPONSE = etree.XML("""
<results status="passed">
<attributes-list>
<sis-status-info>
<is-compression-enabled>false</is-compression-enabled>
<state>disabled</state>
</sis-status-info>
</attributes-list>
<num-records>1</num-records>
</results>
""")
VOLUME_DEDUPE_INFO_SSC_NO_LOGICAL_DATA = {
'compression': False,
'dedupe': False,
'logical-data-size': 0,
'logical-data-limit': 1,
}
CLONE_SPLIT_STATUS_RESPONSE = etree.XML("""
<results status="passed">
<clone-split-info>
<unsplit-clone-count>1234</unsplit-clone-count>
<unsplit-size>316659348799488</unsplit-size>
</clone-split-info>
</results>
""")
VOLUME_CLONE_SPLIT_STATUS = {
'unsplit-size': 316659348799488,
'unsplit-clone-count': 1234,
}
CLONE_SPLIT_STATUS_NO_DATA_RESPONSE = etree.XML("""
<results status="passed">
<clone-split-info>
</clone-split-info>
</results>
""")
VOLUME_GET_ITER_ENCRYPTION_SSC_RESPONSE = etree.XML("""
<results status="passed">
<attributes-list>
<volume-attributes>
<encrypt>true</encrypt>
<volume-id-attributes>
<containing-aggregate-name>%(aggr)s</containing-aggregate-name>
<junction-path>/%(volume)s</junction-path>
<name>%(volume)s</name>
<owning-vserver-name>%(vserver)s</owning-vserver-name>
<type>rw</type>
</volume-id-attributes>
<volume-mirror-attributes>
<is-data-protection-mirror>false</is-data-protection-mirror>
<is-replica-volume>false</is-replica-volume>
</volume-mirror-attributes>
<volume-qos-attributes>
<policy-group-name>fake_qos_policy_group_name</policy-group-name>
</volume-qos-attributes>
<volume-space-attributes>
<is-space-guarantee-enabled>true</is-space-guarantee-enabled>
<space-guarantee>none</space-guarantee>
<percentage-snapshot-reserve>5</percentage-snapshot-reserve>
<size>12345</size>
</volume-space-attributes>
<volume-snapshot-attributes>
<snapshot-policy>default</snapshot-policy>
</volume-snapshot-attributes>
<volume-language-attributes>
<language-code>en_US</language-code>
</volume-language-attributes>
</volume-attributes>
</attributes-list>
<num-records>1</num-records>
</results>
""" % {
'aggr': VOLUME_AGGREGATE_NAMES[0],
'volume': VOLUME_NAMES[0],
'vserver': VOLUME_VSERVER_NAME,
})
STORAGE_DISK_GET_ITER_RESPONSE_PAGE_1 = etree.XML("""
<results status="passed">
<attributes-list>
<storage-disk-info>
<disk-name>cluster3-01:v4.16</disk-name>
</storage-disk-info>
<storage-disk-info>
<disk-name>cluster3-01:v4.17</disk-name>
</storage-disk-info>
<storage-disk-info>
<disk-name>cluster3-01:v4.18</disk-name>
</storage-disk-info>
<storage-disk-info>
<disk-name>cluster3-01:v4.19</disk-name>
</storage-disk-info>
<storage-disk-info>
<disk-name>cluster3-01:v4.20</disk-name>
</storage-disk-info>
<storage-disk-info>
<disk-name>cluster3-01:v4.21</disk-name>
</storage-disk-info>
<storage-disk-info>
<disk-name>cluster3-01:v4.22</disk-name>
</storage-disk-info>
<storage-disk-info>
<disk-name>cluster3-01:v4.24</disk-name>
</storage-disk-info>
<storage-disk-info>
<disk-name>cluster3-01:v4.25</disk-name>
</storage-disk-info>
<storage-disk-info>
<disk-name>cluster3-01:v4.26</disk-name>
</storage-disk-info>
</attributes-list>
<next-tag>next_tag_1</next-tag>
<num-records>10</num-records>
</results>
""")
STORAGE_DISK_GET_ITER_RESPONSE_PAGE_2 = etree.XML("""
<results status="passed">
<attributes-list>
<storage-disk-info>
<disk-name>cluster3-01:v4.27</disk-name>
</storage-disk-info>
<storage-disk-info>
<disk-name>cluster3-01:v4.28</disk-name>
</storage-disk-info>
<storage-disk-info>
<disk-name>cluster3-01:v4.29</disk-name>
</storage-disk-info>
<storage-disk-info>
<disk-name>cluster3-01:v4.32</disk-name>
</storage-disk-info>
<storage-disk-info>
<disk-name>cluster3-01:v5.16</disk-name>
</storage-disk-info>
<storage-disk-info>
<disk-name>cluster3-01:v5.17</disk-name>
</storage-disk-info>
<storage-disk-info>
<disk-name>cluster3-01:v5.18</disk-name>
</storage-disk-info>
<storage-disk-info>
<disk-name>cluster3-01:v5.19</disk-name>
</storage-disk-info>
<storage-disk-info>
<disk-name>cluster3-01:v5.20</disk-name>
</storage-disk-info>
<storage-disk-info>
<disk-name>cluster3-01:v5.21</disk-name>
</storage-disk-info>
</attributes-list>
<next-tag>next_tag_2</next-tag>
<num-records>10</num-records>
</results>
""")
STORAGE_DISK_GET_ITER_RESPONSE_PAGE_3 = etree.XML("""
<results status="passed">
<attributes-list>
<storage-disk-info>
<disk-name>cluster3-01:v5.22</disk-name>
</storage-disk-info>
<storage-disk-info>
<disk-name>cluster3-01:v5.24</disk-name>
</storage-disk-info>
<storage-disk-info>
<disk-name>cluster3-01:v5.25</disk-name>
</storage-disk-info>
<storage-disk-info>
<disk-name>cluster3-01:v5.26</disk-name>
</storage-disk-info>
<storage-disk-info>
<disk-name>cluster3-01:v5.27</disk-name>
</storage-disk-info>
<storage-disk-info>
<disk-name>cluster3-01:v5.28</disk-name>
</storage-disk-info>
<storage-disk-info>
<disk-name>cluster3-01:v5.29</disk-name>
</storage-disk-info>
<storage-disk-info>
<disk-name>cluster3-01:v5.32</disk-name>
</storage-disk-info>
</attributes-list>
<num-records>8</num-records>
</results>
""")
AGGREGATE_DISK_TYPES = ['SATA', 'SSD']
STORAGE_DISK_GET_ITER_RESPONSE = etree.XML("""
<results status="passed">
<attributes-list>
<storage-disk-info>
<disk-name>cluster3-01:v5.19</disk-name>
<disk-raid-info>
<effective-disk-type>%(type0)s</effective-disk-type>
</disk-raid-info>
</storage-disk-info>
<storage-disk-info>
<disk-name>cluster3-01:v5.20</disk-name>
<disk-raid-info>
<effective-disk-type>%(type0)s</effective-disk-type>
</disk-raid-info>
</storage-disk-info>
<storage-disk-info>
<disk-name>cluster3-01:v5.20</disk-name>
<disk-raid-info>
<effective-disk-type>%(type1)s</effective-disk-type>
</disk-raid-info>
</storage-disk-info>
<storage-disk-info>
<disk-name>cluster3-01:v5.20</disk-name>
<disk-raid-info>
<effective-disk-type>%(type1)s</effective-disk-type>
</disk-raid-info>
</storage-disk-info>
</attributes-list>
<num-records>4</num-records>
</results>
""" % {
'type0': AGGREGATE_DISK_TYPES[0],
'type1': AGGREGATE_DISK_TYPES[1],
})
SYSTEM_USER_CAPABILITY_GET_ITER_RESPONSE = etree.XML("""
<results status="passed">
<attributes-list>
<capability-info>
<object-name>object</object-name>
<operation-list>
<operation-info>
<api-name>api,api2,api3</api-name>
<name>operation</name>
</operation-info>
</operation-list>
</capability-info>
</attributes-list>
<num-records>1</num-records>
</results>
""")
PERF_OBJECT_COUNTER_TOTAL_CP_MSECS_LABELS = [
'SETUP', 'PRE_P0', 'P0_SNAP_DEL', 'P1_CLEAN', 'P1_QUOTA', 'IPU_DISK_ADD',
'P2V_INOFILE', 'P2V_INO_PUB', 'P2V_INO_PRI', 'P2V_FSINFO', 'P2V_DLOG1',
'P2V_DLOG2', 'P2V_REFCOUNT', 'P2V_TOPAA', 'P2V_DF_SCORES_SUB', 'P2V_BM',
'P2V_SNAP', 'P2V_DF_SCORES', 'P2V_VOLINFO', 'P2V_CONT', 'P2A_INOFILE',
'P2A_INO', 'P2A_DLOG1', 'P2A_HYA', 'P2A_DLOG2', 'P2A_FSINFO',
'P2A_IPU_BITMAP_GROW', 'P2A_REFCOUNT', 'P2A_TOPAA', 'P2A_HYABC', 'P2A_BM',
'P2A_SNAP', 'P2A_VOLINFO', 'P2_FLUSH', 'P2_FINISH', 'P3_WAIT',
'P3V_VOLINFO', 'P3A_VOLINFO', 'P3_FINISH', 'P4_FINISH', 'P5_FINISH',
]
PERF_OBJECT_COUNTER_LIST_INFO_WAFL_RESPONSE = etree.XML("""
<results status="passed">
<counters>
<counter-info>
<desc>No. of times 8.3 names are accessed per second.</desc>
<name>access_8_3_names</name>
<privilege-level>diag</privilege-level>
<properties>rate</properties>
<unit>per_sec</unit>
</counter-info>
<counter-info>
<desc>Array of counts of different types of CPs</desc>
<labels>
<label-info>wafl_timer generated CP</label-info>
<label-info>snapshot generated CP</label-info>
<label-info>wafl_avail_bufs generated CP</label-info>
<label-info>dirty_blk_cnt generated CP</label-info>
<label-info>full NV-log generated CP,back-to-back CP</label-info>
<label-info>flush generated CP,sync generated CP</label-info>
<label-info>deferred back-to-back CP</label-info>
<label-info>low mbufs generated CP</label-info>
<label-info>low datavecs generated CP</label-info>
<label-info>nvlog replay takeover time limit CP</label-info>
</labels>
<name>cp_count</name>
<privilege-level>diag</privilege-level>
<properties>delta</properties>
<type>array</type>
<unit>none</unit>
</counter-info>
<counter-info>
<base-counter>total_cp_msecs</base-counter>
<desc>Array of percentage time spent in different phases of CP</desc>
<labels>
<label-info>%(labels)s</label-info>
</labels>
<name>cp_phase_times</name>
<privilege-level>diag</privilege-level>
<properties>percent</properties>
<type>array</type>
<unit>percent</unit>
</counter-info>
</counters>
</results>
""" % {'labels': ','.join(PERF_OBJECT_COUNTER_TOTAL_CP_MSECS_LABELS)})
PERF_OBJECT_GET_INSTANCES_SYSTEM_RESPONSE_CMODE = etree.XML("""
<results status="passed">
<instances>
<instance-data>
<counters>
<counter-data>
<name>avg_processor_busy</name>
<value>5674745133134</value>
</counter-data>
</counters>
<name>system</name>
<uuid>%(node1)s:kernel:system</uuid>
</instance-data>
<instance-data>
<counters>
<counter-data>
<name>avg_processor_busy</name>
<value>4077649009234</value>
</counter-data>
</counters>
<name>system</name>
<uuid>%(node2)s:kernel:system</uuid>
</instance-data>
</instances>
<timestamp>1453412013</timestamp>
</results>
""" % {'node1': NODE_NAMES[0], 'node2': NODE_NAMES[1]})
PERF_OBJECT_GET_INSTANCES_SYSTEM_RESPONSE_7MODE = etree.XML("""
<results status="passed">
<timestamp>1454146292</timestamp>
<instances>
<instance-data>
<name>system</name>
<counters>
<counter-data>
<name>avg_processor_busy</name>
<value>13215732322</value>
</counter-data>
</counters>
</instance-data>
</instances>
</results>""")
PERF_OBJECT_INSTANCE_LIST_INFO_ITER_RESPONSE = etree.XML("""
<results status="passed">
<attributes-list>
<instance-info>
<name>system</name>
<uuid>%(node)s:kernel:system</uuid>
</instance-info>
</attributes-list>
<num-records>1</num-records>
</results>
""" % {'node': NODE_NAME})
PERF_OBJECT_INSTANCE_LIST_INFO_RESPONSE = etree.XML("""
<results status="passed">
<instances>
<instance-info>
<name>processor0</name>
</instance-info>
<instance-info>
<name>processor1</name>
</instance-info>
</instances>
</results>""")
SYSTEM_GET_INFO_RESPONSE = etree.XML("""
<results status="passed">
<system-info>
<system-name>%(node)s</system-name>
<system-id>4082368508</system-id>
<system-model>SIMBOX</system-model>
<system-machine-type>SIMBOX</system-machine-type>
<vendor-id>NetApp</vendor-id>
<system-serial-number>4082368508</system-serial-number>
<board-speed>2593</board-speed>
<board-type>NetApp VSim</board-type>
<cpu-serial-number>999999</cpu-serial-number>
<number-of-processors>2</number-of-processors>
<memory-size>1599</memory-size>
<cpu-processor-id>0x40661</cpu-processor-id>
<cpu-microcode-version>15</cpu-microcode-version>
<maximum-aggregate-size>2199023255552</maximum-aggregate-size>
<maximum-flexible-volume-size>17592186044416</maximum-flexible-volume-size>
<maximum-flexible-volume-count>500</maximum-flexible-volume-count>
<supports-raid-array>true</supports-raid-array>
</system-info>
</results>
""" % {'node': NODE_NAME})
ISCSI_INITIATOR_GET_AUTH_ELEM = etree.XML("""
<iscsi-initiator-get-auth>
<initiator>%s</initiator>
</iscsi-initiator-get-auth>""" % INITIATOR_IQN)
ISCSI_INITIATOR_AUTH_LIST_INFO_FAILURE = etree.XML("""
<results status="failed" errno="13112" reason="Initiator %s not found,
please use default authentication." />""" % INITIATOR_IQN)
CLUSTER_NAME = 'fake_cluster'
REMOTE_CLUSTER_NAME = 'fake_cluster_2'
CLUSTER_ADDRESS_1 = 'fake_cluster_address'
CLUSTER_ADDRESS_2 = 'fake_cluster_address_2'
VSERVER_NAME = 'fake_vserver'
VSERVER_NAME_2 = 'fake_vserver_2'
ADMIN_VSERVER_NAME = 'fake_admin_vserver'
NODE_VSERVER_NAME = 'fake_node_vserver'
SM_SOURCE_VSERVER = 'fake_source_vserver'
SM_SOURCE_VOLUME = 'fake_source_volume'
SM_DEST_VSERVER = 'fake_destination_vserver'
SM_DEST_VOLUME = 'fake_destination_volume'
CLUSTER_PEER_GET_ITER_RESPONSE = etree.XML("""
<results status="passed">
<attributes-list>
<cluster-peer-info>
<active-addresses>
<remote-inet-address>%(addr1)s</remote-inet-address>
<remote-inet-address>%(addr2)s</remote-inet-address>
</active-addresses>
<availability>available</availability>
<cluster-name>%(cluster)s</cluster-name>
<cluster-uuid>fake_uuid</cluster-uuid>
<peer-addresses>
<remote-inet-address>%(addr1)s</remote-inet-address>
</peer-addresses>
<remote-cluster-name>%(remote_cluster)s</remote-cluster-name>
<serial-number>fake_serial_number</serial-number>
<timeout>60</timeout>
</cluster-peer-info>
</attributes-list>
<num-records>1</num-records>
</results>
""" % {
'addr1': CLUSTER_ADDRESS_1,
'addr2': CLUSTER_ADDRESS_2,
'cluster': CLUSTER_NAME,
'remote_cluster': REMOTE_CLUSTER_NAME,
})
CLUSTER_PEER_POLICY_GET_RESPONSE = etree.XML("""
<results status="passed">
<attributes>
<cluster-peer-policy>
<is-unauthenticated-access-permitted>false</is-unauthenticated-access-permitted>
<passphrase-minimum-length>8</passphrase-minimum-length>
</cluster-peer-policy>
</attributes>
</results>
""")
VSERVER_PEER_GET_ITER_RESPONSE = etree.XML("""
<results status="passed">
<attributes-list>
<vserver-peer-info>
<applications>
<vserver-peer-application>snapmirror</vserver-peer-application>
</applications>
<peer-cluster>%(cluster)s</peer-cluster>
<peer-state>peered</peer-state>
<peer-vserver>%(vserver2)s</peer-vserver>
<vserver>%(vserver1)s</vserver>
</vserver-peer-info>
</attributes-list>
<num-records>2</num-records>
</results>
""" % {
'cluster': CLUSTER_NAME,
'vserver1': VSERVER_NAME,
'vserver2': VSERVER_NAME_2
})
SNAPMIRROR_GET_ITER_RESPONSE = etree.XML("""
<results status="passed">
<attributes-list>
<snapmirror-info>
<destination-location>%(vserver)s:%(volume2)s</destination-location>
<destination-volume>%(volume2)s</destination-volume>
<destination-volume-node>fake_destination_node</destination-volume-node>
<destination-vserver>%(vserver)s</destination-vserver>
<exported-snapshot>fake_snapshot</exported-snapshot>
<exported-snapshot-timestamp>1442701782</exported-snapshot-timestamp>
<is-constituent>false</is-constituent>
<is-healthy>true</is-healthy>
<lag-time>2187</lag-time>
<last-transfer-duration>109</last-transfer-duration>
<last-transfer-end-timestamp>1442701890</last-transfer-end-timestamp>
<last-transfer-from>test:manila</last-transfer-from>
<last-transfer-size>1171456</last-transfer-size>
<last-transfer-type>initialize</last-transfer-type>
<max-transfer-rate>0</max-transfer-rate>
<mirror-state>snapmirrored</mirror-state>
<newest-snapshot>fake_snapshot</newest-snapshot>
<newest-snapshot-timestamp>1442701782</newest-snapshot-timestamp>
<policy>DPDefault</policy>
<relationship-control-plane>v2</relationship-control-plane>
<relationship-id>ea8bfcc6-5f1d-11e5-8446-123478563412</relationship-id>
<relationship-status>idle</relationship-status>
<relationship-type>data_protection</relationship-type>
<schedule>daily</schedule>
<source-location>%(vserver)s:%(volume1)s</source-location>
<source-volume>%(volume1)s</source-volume>
<source-vserver>%(vserver)s</source-vserver>
<vserver>fake_destination_vserver</vserver>
</snapmirror-info>
</attributes-list>
<num-records>1</num-records>
</results>
""" % {
'volume1': VOLUME_NAMES[0],
'volume2': VOLUME_NAMES[1],
'vserver': VOLUME_VSERVER_NAME,
})
SNAPMIRROR_GET_ITER_FILTERED_RESPONSE = etree.XML("""
<results status="passed">
<attributes-list>
<snapmirror-info>
<destination-vserver>fake_destination_vserver</destination-vserver>
<destination-volume>fake_destination_volume</destination-volume>
<is-healthy>true</is-healthy>
<mirror-state>snapmirrored</mirror-state>
<schedule>daily</schedule>
<source-vserver>fake_source_vserver</source-vserver>
<source-volume>fake_source_volume</source-volume>
</snapmirror-info>
</attributes-list>
<num-records>1</num-records>
</results>
""")
SNAPMIRROR_INITIALIZE_RESULT = etree.XML("""
<results status="passed">
<result-status>succeeded</result-status>
</results>
""")
VSERVER_DATA_LIST_RESPONSE = etree.XML("""
<results status="passed">
<attributes-list>
<vserver-info>
<vserver-name>%(vserver)s</vserver-name>
<vserver-type>data</vserver-type>
</vserver-info>
</attributes-list>
<num-records>1</num-records>
</results>
""" % {'vserver': VSERVER_NAME})
SYSTEM_NODE_GET_ITER_RESPONSE = etree.XML("""
<results status="passed">
<attributes-list>
<node-details-info>
<node>%s</node>
</node-details-info>
</attributes-list>
<num-records>1</num-records>
</results>
""" % NODE_NAME)
|
{
"content_hash": "0aabccce779aca9fe538ab5029b6e7cb",
"timestamp": "",
"source": "github",
"line_count": 1397,
"max_line_length": 88,
"avg_line_length": 35.27415891195419,
"alnum_prop": 0.6169893258654978,
"repo_name": "ge0rgi/cinder",
"id": "40bb4e022d64e94cf56899d6a99daea535cd9335",
"size": "49969",
"binary": false,
"copies": "2",
"ref": "refs/heads/stable/ocata",
"path": "cinder/tests/unit/volume/drivers/netapp/dataontap/client/fakes.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "19963591"
}
],
"symlink_target": ""
}
|
import os
from pathlib import Path
from setuptools import Extension, find_namespace_packages, setup
PROJECT_ROOT = Path(__file__).parent
long_description = (PROJECT_ROOT / "README.md").read_text(encoding="utf8")
setup(
name="pyinstrument",
packages=find_namespace_packages(include=["pyinstrument*"]),
version="4.4.0",
ext_modules=[
Extension(
"pyinstrument.low_level.stat_profile",
sources=["pyinstrument/low_level/stat_profile.c"],
)
],
description="Call stack profiler for Python. Shows you why your code is slow!",
long_description=long_description,
long_description_content_type="text/markdown",
author="Joe Rickerby",
author_email="joerick@mac.com",
url="https://github.com/joerick/pyinstrument",
keywords=["profiling", "profile", "profiler", "cpu", "time", "sampling"],
install_requires=[],
extras_require={"jupyter": ["ipython"]},
include_package_data=True,
python_requires=">=3.7",
entry_points={"console_scripts": ["pyinstrument = pyinstrument.__main__:main"]},
zip_safe=False,
classifiers=[
"Environment :: Console",
"Environment :: Web Environment",
"Intended Audience :: Developers",
"License :: OSI Approved :: BSD License",
"Operating System :: MacOS",
"Operating System :: Microsoft :: Windows",
"Operating System :: POSIX",
"Topic :: Software Development :: Debuggers",
"Topic :: Software Development :: Testing",
],
)
|
{
"content_hash": "42908aa48b92942e91f7e1fabc26d9dd",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 84,
"avg_line_length": 35.58139534883721,
"alnum_prop": 0.6385620915032679,
"repo_name": "joerick/pyinstrument",
"id": "0fc734cd9eb2b2ed984aa29884dfc72c3ceb192a",
"size": "1530",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "22285"
},
{
"name": "CSS",
"bytes": "235"
},
{
"name": "Dockerfile",
"bytes": "1274"
},
{
"name": "HTML",
"bytes": "696"
},
{
"name": "JavaScript",
"bytes": "207"
},
{
"name": "Python",
"bytes": "184685"
},
{
"name": "Shell",
"bytes": "1817"
},
{
"name": "Svelte",
"bytes": "9739"
},
{
"name": "TypeScript",
"bytes": "4711"
}
],
"symlink_target": ""
}
|
"""
The Maven Project module handles creating Maven Jenkins projects.
To create a Maven project, specify ``maven`` in the ``project-type``
attribute to the :ref:`Job` definition. It also requires a ``maven`` section
in the :ref:`Job` definition.
:Job Parameters:
* **root-module**:
* **group-id** (`str`): GroupId.
* **artifact-id** (`str`): ArtifactId.
* **root-pom** (`str`): The path to the pom.xml file. (defaults to pom.xml)
* **goals** (`str`): Goals to execute. (required)
* **maven-opts** (`str`): Java options to pass to maven (aka MAVEN_OPTS)
* **maven-name** (`str`): Installation of maven which should be used.
Not setting ``maven-name`` appears to use the first maven install
defined in the global jenkins config.
* **ignore-upstream-changes** (`bool`): Do not start a build whenever
a SNAPSHOT dependency is built or not. (defaults to true)
* **automatic-archiving** (`bool`): Activate automatic artifact archiving
(defaults to true).
Example::
job:
name: doc_job
project-type: maven
maven:
root-module:
group-id: org.example.docs
artifact-id: example-guide
root-pom: doc/src/pom.xml
goals: "clean generate-sources"
maven-opts: '-Dmyvar=/path/somewhere'
maven-name: Maven3
automatic-archiving: true
"""
import xml.etree.ElementTree as XML
import jenkins_jobs.modules.base
class Maven(jenkins_jobs.modules.base.Base):
sequence = 0
def root_xml(self, data):
xml_parent = XML.Element('maven2-moduleset')
if 'maven' not in data:
return xml_parent
if 'root-module' in data['maven']:
root_module = XML.SubElement(xml_parent, 'rootModule')
XML.SubElement(root_module, 'groupId').text = \
data['maven']['root-module']['group-id']
XML.SubElement(root_module, 'artifactId').text = \
data['maven']['root-module']['artifact-id']
XML.SubElement(xml_parent, 'goals').text = data['maven']['goals']
maven_opts = data['maven'].get('maven-opts')
if maven_opts:
XML.SubElement(xml_parent, 'mavenOpts').text = maven_opts
maven_name = data['maven'].get('maven-name')
if maven_name:
XML.SubElement(xml_parent, 'mavenName').text = maven_name
XML.SubElement(xml_parent, 'ignoreUpstremChanges').text = str(
data['maven'].get('ignore-upstream-changes', True)).lower()
XML.SubElement(xml_parent, 'rootPOM').text = \
data['maven'].get('root-pom', 'pom.xml')
XML.SubElement(xml_parent, 'aggregatorStyleBuild').text = 'true'
XML.SubElement(xml_parent, 'incrementalBuild').text = 'false'
XML.SubElement(xml_parent, 'perModuleEmail').text = 'true'
XML.SubElement(xml_parent, 'archivingDisabled').text = str(
not data['maven'].get('automatic-archiving', True)).lower()
XML.SubElement(xml_parent, 'resolveDependencies').text = 'false'
XML.SubElement(xml_parent, 'processPlugins').text = 'false'
XML.SubElement(xml_parent, 'mavenValidationLevel').text = '-1'
XML.SubElement(xml_parent, 'runHeadless').text = 'false'
XML.SubElement(xml_parent, 'settingConfigId')
XML.SubElement(xml_parent, 'globalSettingConfigId')
run_post_steps = XML.SubElement(xml_parent, 'runPostStepsIfResult')
XML.SubElement(run_post_steps, 'name').text = 'FAILURE'
XML.SubElement(run_post_steps, 'ordinal').text = '2'
XML.SubElement(run_post_steps, 'color').text = 'red'
return xml_parent
|
{
"content_hash": "000660aa8daa9aa041edd4b94280aaa0",
"timestamp": "",
"source": "github",
"line_count": 89,
"max_line_length": 79,
"avg_line_length": 40.87640449438202,
"alnum_prop": 0.6283672347443651,
"repo_name": "gtest-org/test15",
"id": "005203060b708e5b6125ea01c99f2475e710e3d7",
"size": "4241",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "jenkins_jobs/modules/project_maven.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "410254"
}
],
"symlink_target": ""
}
|
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import pickle
import os.path
from include.dataset_fnames import generate_station_data_fname, generate_data_fname
from include.feature_lists import numeric_features
def merge_numeric_features_with_time():
for i, station_id in enumerate(sorted(numeric_features)):
print station_id,
fname_numeric = generate_station_data_fname(station_id, sample_type='test', data_type='numeric', allow_nan_values=False)
features = ['Id'] + numeric_features[station_id]
station_df_numeric = pd.read_csv(fname_numeric, index_col=['Id'])
fname_date = generate_station_data_fname(station_id, sample_type='test', data_type='date', missing_values=True, allow_nan_values=False)
station_df_date = pd.read_csv(fname_date, index_col=['Id'])
if (station_df_numeric.shape[0] == 0):
print 'Skipping'
continue
station_df_date = station_df_date.loc[station_df_numeric.index]
last_feature_in_list = station_df_date.columns[-1]
# station_df_date.rename(columns={last_feature_in_list: 'time'}, inplace=True)
# station_df_numeric.loc[station_df_numeric.index, 'time'] = station_df_date['time']
station_df_numeric['time'] = station_df_date[last_feature_in_list]
station_df_numeric.to_csv(fname_numeric)
merge_numeric_features_with_time()
|
{
"content_hash": "f63a705bee15d4c53771907d0614c3c3",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 144,
"avg_line_length": 38.76315789473684,
"alnum_prop": 0.6707399864222675,
"repo_name": "zakkum42/Bosch",
"id": "5c823c2399ba8e264749ca963c7b912664433703",
"size": "1473",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/01-features/numeric_features_with_time.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "11904112"
},
{
"name": "Python",
"bytes": "541304"
},
{
"name": "Shell",
"bytes": "1566"
},
{
"name": "sed",
"bytes": "112773"
}
],
"symlink_target": ""
}
|
"""
This module is a scratchpad for general development, testing & debugging
Well, even more so than pcmd.py. You best ignore p2cmd.py.
"""
import sys
import time
from pprint import pprint
from random import Random
from django.core.management import BaseCommand
from django.db import connection
from pexp.models import *
rnd = Random()
def show_queries():
print()
print("QUERIES:", len(connection.queries))
pprint(connection.queries)
print()
connection.queries = []
def print_timing(func, message="", iterations=1):
def wrapper(*arg):
results = []
connection.queries_log.clear()
for i in range(iterations):
t1 = time.time()
x = func(*arg)
t2 = time.time()
results.append((t2 - t1) * 1000.0)
res_sum = 0
for r in results:
res_sum += r
print(
"%s%-19s: %.4f ms, %i queries (%i times)"
% (message, func.func_name, res_sum, len(connection.queries), iterations)
)
sys.stdout.flush()
return wrapper
class Command(BaseCommand):
help = ""
def handle_noargs(self, **options):
if False:
TestModelA.objects.all().delete()
a = TestModelA.objects.create(field1="A1")
b = TestModelB.objects.create(field1="B1", field2="B2")
c = TestModelC.objects.create(field1="C1", field2="C2", field3="C3")
connection.queries_log.clear()
print(TestModelC.base_objects.all())
show_queries()
if False:
TestModelA.objects.all().delete()
for i in range(1000):
a = TestModelA.objects.create(field1=str(i % 100))
b = TestModelB.objects.create(field1=str(i % 100), field2=str(i % 200))
c = TestModelC.objects.create(
field1=str(i % 100), field2=str(i % 200), field3=str(i % 300)
)
if i % 100 == 0:
print(i)
f = print_timing(poly_sql_query, iterations=1000)
f()
f = print_timing(poly_sql_query2, iterations=1000)
f()
return
NormalModelA.objects.all().delete()
a = NormalModelA.objects.create(field1="A1")
b = NormalModelB.objects.create(field1="B1", field2="B2")
c = NormalModelC.objects.create(field1="C1", field2="C2", field3="C3")
qs = TestModelA.objects.raw("SELECT * from pexp_testmodela")
for o in list(qs):
print(o)
def poly_sql_query():
cursor = connection.cursor()
cursor.execute(
"""
SELECT id, pexp_testmodela.field1, pexp_testmodelb.field2, pexp_testmodelc.field3
FROM pexp_testmodela
LEFT OUTER JOIN pexp_testmodelb
ON pexp_testmodela.id = pexp_testmodelb.testmodela_ptr_id
LEFT OUTER JOIN pexp_testmodelc
ON pexp_testmodelb.testmodela_ptr_id = pexp_testmodelc.testmodelb_ptr_id
WHERE pexp_testmodela.field1=%i
ORDER BY pexp_testmodela.id
"""
% rnd.randint(0, 100)
)
# row=cursor.fetchone()
return
def poly_sql_query2():
cursor = connection.cursor()
cursor.execute(
"""
SELECT id, pexp_testmodela.field1
FROM pexp_testmodela
WHERE pexp_testmodela.field1=%i
ORDER BY pexp_testmodela.id
"""
% rnd.randint(0, 100)
)
# row=cursor.fetchone()
return
|
{
"content_hash": "46a5d6accdb3f7228a7000db3c4af8e1",
"timestamp": "",
"source": "github",
"line_count": 119,
"max_line_length": 89,
"avg_line_length": 29.016806722689076,
"alnum_prop": 0.5792064871126557,
"repo_name": "chrisglass/django_polymorphic",
"id": "b26026ae94bb84c966cb2861ebed3fd87efdb699",
"size": "3477",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "example/pexp/management/commands/p2cmd.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "872"
},
{
"name": "Python",
"bytes": "181136"
}
],
"symlink_target": ""
}
|
from django.http import Http404
from django.contrib.auth import get_user_model
from django.db.models import Count, Max, Prefetch
from rest_framework import generics, permissions, status
from rest_framework.response import Response
from .models import Thread, Post, Category
from .serializers import ThreadListSerializer, ThreadCreateSerializer, PostSerializer, CategoryListSerializer, \
CategoryDetailSerializer, ThreadDetailSerializer, PostNewSerializer
User = get_user_model()
class CategoryListView(generics.ListAPIView):
"""
Returns all Categories
Along with the Counts of Threads and Posts in each category
"""
queryset = Category.objects.all().prefetch_related(
'threads',
'threads__posts'
).annotate(
thread_count=Count('threads')
).annotate(
latest=Max('threads__posts__created_date')
).annotate(
post_count=Count('threads__posts')
)
serializer_class = CategoryListSerializer
permission_classes = (permissions.IsAuthenticatedOrReadOnly,)
class CategoryDetailView(generics.RetrieveAPIView):
"""
Returns a Single Category and all associated threads
"""
queryset = Category.objects.all().prefetch_related(
Prefetch('threads', queryset=Thread.objects.prefetch_related('posts').annotate(
post_count=Count('posts')
).annotate(
latest=Max('posts__created_date')
))
)
serializer_class = CategoryDetailSerializer
permission_classes = (permissions.IsAuthenticatedOrReadOnly,)
class ThreadNewView(generics.CreateAPIView):
"""
Creates a new Thread and its first post
"""
queryset = Thread.objects.all()
serializer_class = ThreadCreateSerializer
permission_classes = (permissions.IsAuthenticated, )
def create(self, request, *args, **kwargs):
request.data['author'] = request.user.id
post_text = request.data.pop('post_content')
# Save thread info
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
thread = serializer.save()
# Create first post
Post(author=request.user, text=post_text, thread=thread).save()
headers = self.get_success_headers(serializer.data)
return Response(serializer.data, status=status.HTTP_201_CREATED, headers=headers)
class ThreadDetailView(generics.RetrieveAPIView):
"""
Gets a single thread and all its associated posts
"""
queryset = Thread.objects.prefetch_related('posts', 'posts__author').all()
permission_classes = (permissions.IsAuthenticatedOrReadOnly,)
serializer_class = ThreadDetailSerializer
class PostNewView(generics.CreateAPIView):
"""
Creates a new post
"""
queryset = Post.objects.all()
serializer_class = PostNewSerializer
permission_classes = (permissions.IsAuthenticated,)
def create(self, request, *args, **kwargs):
request.data['author'] = request.user.id
return super(PostNewView,self).create(request,*args,**kwargs)
|
{
"content_hash": "e5cb254dceba6be2db1dde168ddb8e33",
"timestamp": "",
"source": "github",
"line_count": 93,
"max_line_length": 112,
"avg_line_length": 33.67741935483871,
"alnum_prop": 0.6864623243933589,
"repo_name": "Axiologue/AxiologueAPI",
"id": "8c52849729719a4eda56c27ed31c4c9a82cd8754",
"size": "3132",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "forum/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "8480"
},
{
"name": "JavaScript",
"bytes": "2510"
},
{
"name": "Python",
"bytes": "150246"
}
],
"symlink_target": ""
}
|
"""
Modules required to work with ironic_inspector:
https://pypi.python.org/pypi/ironic-inspector
"""
import eventlet
from futurist import periodics
from oslo_log import log as logging
from oslo_utils import importutils
from ironic.common import exception
from ironic.common.i18n import _
from ironic.common.i18n import _LE
from ironic.common.i18n import _LI
from ironic.common import keystone
from ironic.common import states
from ironic.conductor import task_manager
from ironic.conf import CONF
from ironic.drivers import base
LOG = logging.getLogger(__name__)
CONF.import_opt('auth_strategy', 'ironic.api.app')
client = importutils.try_import('ironic_inspector_client')
INSPECTOR_API_VERSION = (1, 0)
class Inspector(base.InspectInterface):
"""In-band inspection via ironic-inspector project."""
@classmethod
def create_if_enabled(cls, driver_name):
"""Create instance of Inspector if it's enabled.
Reports log warning with given driver_name if it's not.
:return: Inspector instance or None
"""
if CONF.inspector.enabled:
return cls()
else:
LOG.info(_LI("Inspection via ironic-inspector is disabled in "
"configuration for driver %s. To enable, change "
"[inspector] enabled = True."), driver_name)
def __init__(self):
if not CONF.inspector.enabled:
raise exception.DriverLoadError(
_('ironic-inspector support is disabled'))
if not client:
raise exception.DriverLoadError(
_('python-ironic-inspector-client Python module not found'))
def get_properties(self):
"""Return the properties of the interface.
:returns: dictionary of <property name>:<property description> entries.
"""
return {} # no properties
def validate(self, task):
"""Validate the driver-specific inspection information.
If invalid, raises an exception; otherwise returns None.
:param task: a task from TaskManager.
"""
# NOTE(deva): this is not callable if inspector is disabled
# so don't raise an exception -- just pass.
pass
def inspect_hardware(self, task):
"""Inspect hardware to obtain the hardware properties.
This particular implementation only starts inspection using
ironic-inspector. Results will be checked in a periodic task.
:param task: a task from TaskManager.
:returns: states.INSPECTING
"""
LOG.debug('Starting inspection for node %(uuid)s using '
'ironic-inspector', {'uuid': task.node.uuid})
# NOTE(dtantsur): we're spawning a short-living green thread so that
# we can release a lock as soon as possible and allow ironic-inspector
# to operate on a node.
eventlet.spawn_n(_start_inspection, task.node.uuid, task.context)
return states.INSPECTING
@periodics.periodic(spacing=CONF.inspector.status_check_period,
enabled=CONF.inspector.enabled)
def _periodic_check_result(self, manager, context):
"""Periodic task checking results of inspection."""
filters = {'provision_state': states.INSPECTING}
node_iter = manager.iter_nodes(filters=filters)
for node_uuid, driver in node_iter:
try:
lock_purpose = 'checking hardware inspection status'
with task_manager.acquire(context, node_uuid,
shared=True,
purpose=lock_purpose) as task:
_check_status(task)
except (exception.NodeLocked, exception.NodeNotFound):
continue
def _call_inspector(func, uuid, context):
"""Wrapper around calls to inspector."""
# NOTE(dtantsur): due to bug #1428652 None is not accepted for base_url.
kwargs = {'api_version': INSPECTOR_API_VERSION}
if CONF.inspector.service_url:
kwargs['base_url'] = CONF.inspector.service_url
return func(uuid, auth_token=context.auth_token, **kwargs)
def _start_inspection(node_uuid, context):
"""Call to inspector to start inspection."""
context.ensure_thread_contain_context()
try:
_call_inspector(client.introspect, node_uuid, context)
except Exception as exc:
LOG.exception(_LE('Exception during contacting ironic-inspector '
'for inspection of node %(node)s: %(err)s'),
{'node': node_uuid, 'err': exc})
# NOTE(dtantsur): if acquire fails our last option is to rely on
# timeout
lock_purpose = 'recording hardware inspection error'
with task_manager.acquire(context, node_uuid,
purpose=lock_purpose) as task:
task.node.last_error = _('Failed to start inspection: %s') % exc
task.process_event('fail')
else:
LOG.info(_LI('Node %s was sent to inspection to ironic-inspector'),
node_uuid)
def _check_status(task):
"""Check inspection status for node given by a task."""
node = task.node
if node.provision_state != states.INSPECTING:
return
if not isinstance(task.driver.inspect, Inspector):
return
LOG.debug('Calling to inspector to check status of node %s',
task.node.uuid)
# NOTE(dtantsur): periodic tasks do not have proper tokens in context
if CONF.auth_strategy == 'keystone':
task.context.auth_token = keystone.get_admin_auth_token()
try:
status = _call_inspector(client.get_status, node.uuid, task.context)
except Exception:
# NOTE(dtantsur): get_status should not normally raise
# let's assume it's a transient failure and retry later
LOG.exception(_LE('Unexpected exception while getting '
'inspection status for node %s, will retry later'),
node.uuid)
return
error = status.get('error')
finished = status.get('finished')
if not error and not finished:
return
# If the inspection has finished or failed, we need to update the node, so
# upgrade our lock to an exclusive one.
task.upgrade_lock()
node = task.node
if error:
LOG.error(_LE('Inspection failed for node %(uuid)s '
'with error: %(err)s'),
{'uuid': node.uuid, 'err': error})
node.last_error = (_('ironic-inspector inspection failed: %s')
% error)
task.process_event('fail')
elif finished:
LOG.info(_LI('Inspection finished successfully for node %s'),
node.uuid)
task.process_event('done')
|
{
"content_hash": "6650169c626dcbc363d6affc59c9c720",
"timestamp": "",
"source": "github",
"line_count": 188,
"max_line_length": 79,
"avg_line_length": 36.32446808510638,
"alnum_prop": 0.6216137062527456,
"repo_name": "bacaldwell/ironic",
"id": "907ad21960ec28a06d942d5ce1e666ee308666c8",
"size": "7375",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ironic/drivers/modules/inspector.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "349"
},
{
"name": "Python",
"bytes": "4207766"
},
{
"name": "Shell",
"bytes": "69242"
}
],
"symlink_target": ""
}
|
"""
Content negotiation deals with selecting an appropriate renderer given the
incoming request. Typically this will be based on the request's Accept header.
"""
from __future__ import unicode_literals
from django.http import Http404
from rest_framework import HTTP_HEADER_ENCODING, exceptions
from rest_framework.settings import api_settings
from rest_framework.utils.mediatypes import order_by_precedence, media_type_matches
from rest_framework.utils.mediatypes import _MediaType
class BaseContentNegotiation(object):
def select_parser(self, request, parsers):
raise NotImplementedError('.select_parser() must be implemented')
def select_renderer(self, request, renderers, format_suffix=None):
raise NotImplementedError('.select_renderer() must be implemented')
class DefaultContentNegotiation(BaseContentNegotiation):
settings = api_settings
def select_parser(self, request, parsers):
"""
Given a list of parsers and a media type, return the appropriate
parser to handle the incoming request.
"""
for parser in parsers:
if media_type_matches(parser.media_type, request.content_type):
return parser
return None
def select_renderer(self, request, renderers, format_suffix=None):
"""
Given a request and a list of renderers, return a two-tuple of:
(renderer, media type).
"""
# Allow URL style format override. eg. "?format=json
format_query_param = self.settings.URL_FORMAT_OVERRIDE
format = format_suffix or request.query_params.get(format_query_param)
if format:
renderers = self.filter_renderers(renderers, format)
accepts = self.get_accept_list(request)
# Check the acceptable media types against each renderer,
# attempting more specific media types first
# NB. The inner loop here isn't as bad as it first looks :)
# Worst case is we're looping over len(accept_list) * len(self.renderers)
for media_type_set in order_by_precedence(accepts):
for renderer in renderers:
for media_type in media_type_set:
if media_type_matches(renderer.media_type, media_type):
# Return the most specific media type as accepted.
media_type_wrapper = _MediaType(media_type)
if (
_MediaType(renderer.media_type).precedence >
media_type_wrapper.precedence
):
# Eg client requests '*/*'
# Accepted media type is 'application/json'
full_media_type = ';'.join(
(renderer.media_type,) +
tuple('{0}={1}'.format(
key, value.decode(HTTP_HEADER_ENCODING))
for key, value in media_type_wrapper.params.items()))
return renderer, full_media_type
else:
# Eg client requests 'application/json; indent=8'
# Accepted media type is 'application/json; indent=8'
return renderer, media_type
raise exceptions.NotAcceptable(available_renderers=renderers)
def filter_renderers(self, renderers, format):
"""
If there is a '.json' style format suffix, filter the renderers
so that we only negotiation against those that accept that format.
"""
renderers = [renderer for renderer in renderers
if renderer.format == format]
if not renderers:
raise Http404
return renderers
def get_accept_list(self, request):
"""
Given the incoming request, return a tokenised list of media
type strings.
Allows URL style accept override. eg. "?accept=application/json"
"""
header = request.META.get('HTTP_ACCEPT', '*/*')
header = request.query_params.get(self.settings.URL_ACCEPT_OVERRIDE, header)
return [token.strip() for token in header.split(',')]
|
{
"content_hash": "78ffa5033dc5ad78da15f66a0ab1a9d3",
"timestamp": "",
"source": "github",
"line_count": 97,
"max_line_length": 89,
"avg_line_length": 44.09278350515464,
"alnum_prop": 0.5997194295066636,
"repo_name": "hnarayanan/django-rest-framework",
"id": "663ec4c8a20f4c5c268a5b745c8606c879f2a188",
"size": "4277",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "rest_framework/negotiation.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "9950"
},
{
"name": "HTML",
"bytes": "44338"
},
{
"name": "JavaScript",
"bytes": "2546"
},
{
"name": "Python",
"bytes": "841538"
}
],
"symlink_target": ""
}
|
from data_importers.management.commands import BaseXpressDemocracyClubCsvImporter
class Command(BaseXpressDemocracyClubCsvImporter):
council_id = "WAE"
addresses_name = "2021-03-01T10:16:29.232903/Democracy_Club__06May2021.tsv"
stations_name = "2021-03-01T10:16:29.232903/Democracy_Club__06May2021.tsv"
elections = ["2021-05-06"]
csv_delimiter = "\t"
def address_record_to_dict(self, record):
uprn = record.property_urn.strip().lstrip("0")
if uprn in [
"100061614139", # KINGSLEY, CATTESHALL LANE, GODALMING
"200001293683", # 1 GREEN LANE VILLAS, GREEN LANE, CHURT, FARNHAM
"100061610563", # 75 UPPER HALE ROAD, FARNHAM
]:
return None
if record.addressline6 in [
"GU9 9JT",
"GU9 0HR",
"GU10 2JT",
"GU8 4BH",
"GU7 3LG",
"GU7 1LN",
]:
return None
return super().address_record_to_dict(record)
def station_record_to_dict(self, record):
# Clock Barn Hall, Clock Barn Farm, Hambledon Road, Busbrudge, Godalming
if record.polling_place_id == "4959":
record = record._replace(polling_place_address_3="Busbridge")
return super().station_record_to_dict(record)
|
{
"content_hash": "b43172ad5e07ad1014d1e2431b9440c7",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 81,
"avg_line_length": 33.53846153846154,
"alnum_prop": 0.6062691131498471,
"repo_name": "DemocracyClub/UK-Polling-Stations",
"id": "83c8586d12919343be0f9c8f97f60aadb122225f",
"size": "1308",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "polling_stations/apps/data_importers/management/commands/import_waverley.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "32"
},
{
"name": "HTML",
"bytes": "85540"
},
{
"name": "JavaScript",
"bytes": "3399"
},
{
"name": "Procfile",
"bytes": "49"
},
{
"name": "Python",
"bytes": "1111337"
},
{
"name": "SCSS",
"bytes": "5742"
}
],
"symlink_target": ""
}
|
from mongo_connector.doc_managers.formatters import DefaultDocumentFormatter
class HzkgDocumentFormatter(DefaultDocumentFormatter):
"""
original formatter:
{
"_id": ObjectId("57edbe3843ece042bb10ca9d"),
"source": {
"confidence": "0.6",
"trackingId": "fd6d245b75096dfcf10a9905c377e28a0e53b103"
},
"claims": [
{
"p": "name",
"o": "apple"
},
{
"p": "date",
"o": "2016-09-29"
}
]
}
tranformed formatter:
{
"_id": ObjectId("57edbe3843ece042bb10ca9d"),
"source_confidence": "0.6",
"source_trackingId": "fd6d245b75096dfcf10a9905c377e28a0e53b103",
"name": "apple",
"date": "2016-09-29"
}
"""
def transform_element(self, key, value):
if isinstance(value, list):
if len(value) > 0 and isinstance(value[0], dict):
for podict in value: # Field name [*] cannot contain '.'
yield podict["p"].replace(u'.', u'点'), podict["o"]
else: # list of string
yield key, value
elif isinstance(value, dict):
formatted = self.format_document(value)
for doc_key in formatted:
yield "%s_%s" % (key, doc_key), formatted[doc_key]
else:
# We assume that transform_value will return a 'flat' value,
# not a list or dict
if key == u'名称':
yield "name_suggest", { "input": [value] } # autocomplete
yield key, self.transform_value(value)
def format_document(self, document):
def flatten(doc, path):
top_level = (len(path) == 0)
if not top_level:
path_string = ".".join(path)
for k in doc:
v = doc[k]
if isinstance(v, dict):
path.append(k)
for inner_k, inner_v in flatten(v, path):
yield inner_k, inner_v
path.pop()
else:
transformed = self.transform_element(k, v)
for new_k, new_v in transformed:
if top_level:
yield new_k, new_v
else:
yield "%s_%s" % (path_string, new_k), new_v
return dict(flatten(document, []))
|
{
"content_hash": "1749e5dc6c166ad47a74c12b53586841",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 76,
"avg_line_length": 33.028169014084504,
"alnum_prop": 0.4997867803837953,
"repo_name": "LaoLiulaoliu/hzkgelastic2-doc-manager",
"id": "8a6c55bedc72691ae2e9e6bef073d9d4671e7e44",
"size": "2448",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mongo_connector/doc_managers/hzkgformatter.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "49937"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
import numpy as np
import theano
import theano.tensor as T
from theano.tensor.signal import downsample
from theano.tensor.nnet import conv
# globals
RNG = np.random.RandomState()
class SoftmaxLayer(object):
def __init__(self, x, in_dim, out_dim, layer_id):
self.weights = theano.shared(value=np.zeros([in_dim, out_dim], dtype=theano.config.floatX),
name=layer_id + 'weights',
borrow=True
)
self.biases = theano.shared(value=np.zeros([out_dim], dtype=theano.config.floatX),
name=layer_id + 'biases',
borrow=True
)
self.params = [self.weights, self.biases]
self.input = x
self.output = self.prob_y_given_x(x)
# maybe put a switch here to check for nan/equivalent probs
self.y_predict = T.argmax(self.output, axis=1)
def prob_y_given_x(self, input_data):
return T.nnet.softmax(T.dot(input_data, self.weights) + self.biases)
def negative_log_likelihood(self, labels):
return -T.mean(T.log(self.output)[T.arange(labels.shape[0]), labels])
def errors(self, labels):
return T.mean(T.neq(self.y_predict, labels))
class HiddenLayer(object):
def __init__(self, x, in_dim, out_dim, layer_id, W=None, b=None, activation=T.tanh):
if W is None:
W_values = np.asarray(RNG.uniform(low=-np.sqrt(6. / (in_dim + out_dim)),
high=np.sqrt(6. / (in_dim + out_dim)),
size=(in_dim, out_dim)),
dtype=theano.config.floatX
)
W = theano.shared(value=W_values, name=layer_id + 'weights', borrow=True)
if b is None:
b_values = np.zeros((out_dim,), dtype=theano.config.floatX)
b = theano.shared(value=b_values, name=layer_id + 'biases', borrow=True)
self.weights = W
self.biases = b
self.params = [self.weights, self.biases]
self.input = x
lin_out = T.dot(x, self.weights) + self.biases
self.output = lin_out if activation is None else activation(lin_out)
class ConvPoolLayer(object):
def __init__(self, x, filter_shape, image_shape, poolsize, layer_id):
assert(image_shape[1] == filter_shape[1])
self.input = x
fan_in = np.prod(filter_shape[1:])
fan_out = (filter_shape[0] * np.prod(filter_shape[2:]) / np.prod(poolsize))
W_bound = np.sqrt(6. / (fan_in + fan_out))
self.weights = theano.shared(
np.asarray(
RNG.uniform(low=-W_bound, high=W_bound, size=filter_shape),
dtype=theano.config.floatX
),
borrow=True
)
b_values = np.zeros((filter_shape[0],), dtype=theano.config.floatX)
self.biases = theano.shared(value=b_values, borrow=True)
conv_out = conv.conv2d(
input=x,
filters=self.weights,
filter_shape=filter_shape,
image_shape=image_shape
)
pooled_out = downsample.max_pool_2d(
input=conv_out,
ds=poolsize,
# todo put more options here
ignore_border=True
)
self.output = T.tanh(pooled_out + self.biases.dimshuffle('x', 0, 'x', 'x'))
self.params = [self.weights, self.biases]
|
{
"content_hash": "75813a54e7338f3f0e240f9b6aba928f",
"timestamp": "",
"source": "github",
"line_count": 101,
"max_line_length": 99,
"avg_line_length": 35.524752475247524,
"alnum_prop": 0.5398550724637681,
"repo_name": "mitenjain/R3N",
"id": "18e6b9a1282a7ae68b5624a3df48d68a6f4f2e39",
"size": "3610",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/layers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "95719"
}
],
"symlink_target": ""
}
|
import inspect
from django.template.loader import render_to_string
from django.db import connection
import settings
from xformmanager.models import Metadata, FormDefModel, ElementDefModel
from reports.models import Case, SqlReport
from reports.util import get_whereclause
from shared import monitoring_report, Mother
'''Report file for custom Grameen reports'''
# see mvp.py for an explanation of how these are used.
# temporarily "privatizing" the name because grameen doesn't
# want this report to show up in the UI
def _monitoring(request):
'''Safe Pregnancy Monitoring Report'''
safe_preg_case_name = "Grameen Safe Pregnancies"
try:
case = Case.objects.get(name=safe_preg_case_name)
except Case.DoesNotExist:
return '''Sorry, it doesn't look like the forms that this report
depends on have been uploaded.'''
return monitoring_report(request, case)
def _mother_summary(request):
'''Individual Mother Summary'''
# this is intentionally private, as it's only accessed from within other
# reports that explicitly know about it. We don't want to list it because
# we don't know what id to use.
safe_preg_case_name = "Grameen Safe Pregnancies"
try:
case = Case.objects.get(name=safe_preg_case_name)
except Case.DoesNotExist:
return '''Sorry, it doesn't look like the forms that this report
depends on have been uploaded.'''
if not "case_id" in request.GET:
return '''Sorry, you have to specify a mother using the case id
in the URL.'''
case_id = request.GET["case_id"]
data = case.get_data_map_for_case(case_id)
mom = Mother(case, case_id, data)
mother_name = request.GET["mother_name"]
if mom.mother_name != mother_name:
return '''<p class="error">Sorry it appears that this id has been used by the CHW for
more than one mother. Unfortunately, this means we can't
yet show you her data here. Please remind your CHW's to
use unique case Ids!</p>
'''
attrs = [name for name in dir(mom) if not name.startswith("_")]
attrs.remove("data_map")
display_attrs = [attr.replace("_", " ") for attr in attrs]
all_attrs = zip(attrs, display_attrs)
mom.hi_risk_reasons = _get_hi_risk_reason(mom)
return render_to_string("custom/grameen/mother_details.html",
{"mother": mom, "attrs": all_attrs,
"MEDIA_URL": settings.MEDIA_URL, # we pretty sneakly have to explicitly pass this
})
def _get_hi_risk_reason(mom):
reasons = []
if (mom.mother_age >= 35): reasons.append("35 or older")
if (mom.mother_age <= 18): reasons.append("18 or younger")
if (mom.mother_height == 'under_150'): reasons.append("mother height under 150cm")
if (mom.previous_csection == 'yes'): reasons.append("previous c-section")
if (mom.previous_newborn_death == 'yes'): reasons.append("previous newborn death")
if (mom.previous_bleeding == 'yes'): reasons.append("previous bleeding")
if (mom.previous_terminations >= 3): reasons.append("%s previous terminations" % mom.previous_terminations)
if (mom.previous_pregnancies >= 5): reasons.append("%s previous pregnancies" % mom.previous_pregnancies)
if (mom.heart_problems == 'yes'): reasons.append("heart problems")
if (mom.diabetes == 'yes'): reasons.append("diabetes")
if (mom.hip_problems == 'yes'): reasons.append("hip problems")
if (mom.card_results_syphilis_result == 'positive'): reasons.append("positive for syphilis")
if (mom.card_results_hepb_result == 'positive'): reasons.append("positive for hepb")
if (mom.over_5_years == 'yes'): reasons.append("over 5 years since last pregnancy")
if (mom.card_results_hb_test == 'below_normal'): reasons.append("low hb test")
if (mom.card_results_blood_group == 'onegative'): reasons.append("o-negative blood group")
if (mom.card_results_blood_group == 'anegative'): reasons.append("a-negative blood group")
if (mom.card_results_blood_group == 'abnegative'): reasons.append("ab-negative blood group")
if (mom.card_results_blood_group == 'bnegative'): reasons.append("b-negative blood group")
return ", ".join(reasons)
def hi_risk_pregnancies(request):
'''Hi-Risk Pregnancy Summary'''
# just pass on to the helper view, but ensure that hi-risk is set to yes
params = request.GET.copy()
params["sampledata_hi_risk"]="yes"
return _chw_submission_summary(request, params)
def chw_submission_details(request):
'''Health Worker Submission Details'''
return _chw_submission_summary(request, request.GET)
def _chw_submission_summary(request, params):
# this was made a private method so that we can call it from multiple reports
# with an extra parameter.
# had to move this form a sql report to get in the custom annotations
# this is a pretty ugly/hacky hybrid approach, and should certainly
# be cleaned up
# hard coded to our fixture. bad bad!
grameen_submission_details_id = 2
# hard coded to our schema. bad bad!
form_def = ElementDefModel.objects.get(table_name="schema_intel_grameen_safe_motherhood_registration_v0_3").form
report = SqlReport.objects.get(id=grameen_submission_details_id)
cols = ('meta_username', 'sampledata_hi_risk')
where_cols = dict([(key, val) for key, val in params.items() if key in cols])
whereclause = get_whereclause(where_cols)
follow_filter = None
if "follow" in params:
if params["follow"] == "yes":
follow_filter = True
elif params["follow"] == "no":
follow_filter = False
cols, data = report.get_data({"whereclause": whereclause})
new_data = []
for row in data:
new_row_data = dict(zip(cols, row))
row_id = new_row_data["Instance ID"]
meta = Metadata.objects.get(formdefmodel=form_def, raw_data=row_id)
follow = meta.attachment.annotations.count() > 0
if follow_filter is not None:
if follow_filter and not follow:
# filtering on true, but none found, don't include this
continue
elif not follow_filter and follow:
# filtering on false, but found follows, don't include this
continue
new_row_data["Follow up?"] = "yes" if follow else "no"
new_row_data["meta"] = meta
new_row_data["attachment"] = meta.attachment
new_data.append(new_row_data)
cols = cols[:6]
return render_to_string("custom/grameen/chw_submission_details.html",
{"MEDIA_URL": settings.MEDIA_URL, # we pretty sneakly have to explicitly pass this
"columns": cols,
"data": new_data})
|
{
"content_hash": "6cc553754c50bf1ec3d4b6d1f65ae25b",
"timestamp": "",
"source": "github",
"line_count": 146,
"max_line_length": 116,
"avg_line_length": 48.63013698630137,
"alnum_prop": 0.6305633802816901,
"repo_name": "commtrack/temp-aquatest",
"id": "5c969aff149993f20e2f62595d369bc82ce86cda",
"size": "7153",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "apps/reports/custom/grameen.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "742874"
},
{
"name": "PHP",
"bytes": "2863"
},
{
"name": "Python",
"bytes": "3707591"
},
{
"name": "Shell",
"bytes": "490"
}
],
"symlink_target": ""
}
|
import os
import shutil
from collections import OrderedDict
from git import Repo
def get_raw_data():
cldr_version = '31.0.1'
raw_data_directory = "../raw_data"
cldr_data = {
'dates_full': {
'url': 'https://github.com/unicode-cldr/cldr-dates-full.git',
'dir': "{}/cldr_dates_full/".format(raw_data_directory)
},
'core': {
'url': 'https://github.com/unicode-cldr/cldr-core.git',
'dir': "{}/cldr_core/".format(raw_data_directory)
},
'rbnf': {
'url': 'https://github.com/unicode-cldr/cldr-rbnf.git',
'dir': "{}/cldr_rbnf/".format(raw_data_directory)
},
}
if os.path.isdir(raw_data_directory):
# remove current raw data
shutil.rmtree(raw_data_directory)
os.mkdir(raw_data_directory)
for name, data in cldr_data.items():
print('Clonning "{}" from: {}'.format(name, data['url']))
repo = Repo.clone_from(data['url'], data['dir'], branch='master')
repo.git.co(cldr_version)
def get_dict_difference(parent_dict, child_dict):
difference_dict = OrderedDict()
for key, child_value in child_dict.items():
parent_value = parent_dict.get(key)
child_specific_value = None
if not parent_value:
child_specific_value = child_value
elif isinstance(child_value, list):
child_specific_value = sorted(set(child_value) - set(parent_value))
elif isinstance(child_value, dict):
child_specific_value = get_dict_difference(parent_value, child_value)
elif child_value != parent_value:
child_specific_value = child_value
if child_specific_value:
difference_dict[key] = child_specific_value
return difference_dict
def combine_dicts(primary_dict, supplementary_dict):
combined_dict = OrderedDict()
for key, value in primary_dict.items():
if key in supplementary_dict:
if isinstance(value, list):
combined_dict[key] = value + supplementary_dict[key]
elif isinstance(value, dict):
combined_dict[key] = combine_dicts(value, supplementary_dict[key])
else:
combined_dict[key] = supplementary_dict[key]
else:
combined_dict[key] = primary_dict[key]
remaining_keys = [key for key in supplementary_dict.keys() if key not in primary_dict.keys()]
for key in remaining_keys:
combined_dict[key] = supplementary_dict[key]
return combined_dict
|
{
"content_hash": "8841212a68032322855b1fea51c1acfc",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 97,
"avg_line_length": 36.08450704225352,
"alnum_prop": 0.6030444964871194,
"repo_name": "scrapinghub/dateparser",
"id": "d43843892a67434b55db4fd00956e38f0752d73d",
"size": "2562",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dateparser_scripts/utils.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "1564308"
}
],
"symlink_target": ""
}
|
from .aws import Action as BaseAction
from .aws import BaseARN
service_name = "Amazon Macie Classic"
prefix = "macie"
class Action(BaseAction):
def __init__(self, action: str = None) -> None:
super().__init__(prefix, action)
class ARN(BaseARN):
def __init__(self, resource: str = "", region: str = "", account: str = "") -> None:
super().__init__(
service=prefix, resource=resource, region=region, account=account
)
AcceptInvitation = Action("AcceptInvitation")
ArchiveFindings = Action("ArchiveFindings")
AssociateMemberAccount = Action("AssociateMemberAccount")
AssociateS3Resources = Action("AssociateS3Resources")
BatchGetCustomDataIdentifiers = Action("BatchGetCustomDataIdentifiers")
CreateClassificationJob = Action("CreateClassificationJob")
CreateCustomDataIdentifier = Action("CreateCustomDataIdentifier")
CreateFindingsFilter = Action("CreateFindingsFilter")
CreateInvitations = Action("CreateInvitations")
CreateMember = Action("CreateMember")
CreateSampleFindings = Action("CreateSampleFindings")
DeclineInvitations = Action("DeclineInvitations")
DeleteCustomDataIdentifier = Action("DeleteCustomDataIdentifier")
DeleteFindingsFilter = Action("DeleteFindingsFilter")
DeleteInvitations = Action("DeleteInvitations")
DeleteMember = Action("DeleteMember")
DescribeBuckets = Action("DescribeBuckets")
DescribeClassificationJob = Action("DescribeClassificationJob")
DescribeOrganizationConfiguration = Action("DescribeOrganizationConfiguration")
DisableMacie = Action("DisableMacie")
DisableOrganizationAdminAccount = Action("DisableOrganizationAdminAccount")
DisassociateFromMasterAccount = Action("DisassociateFromMasterAccount")
DisassociateMember = Action("DisassociateMember")
DisassociateMemberAccount = Action("DisassociateMemberAccount")
DisassociateS3Resources = Action("DisassociateS3Resources")
EnableMacie = Action("EnableMacie")
EnableOrganizationAdminAccount = Action("EnableOrganizationAdminAccount")
GetBucketStatistics = Action("GetBucketStatistics")
GetClassificationExportConfiguration = Action("GetClassificationExportConfiguration")
GetCustomDataIdentifier = Action("GetCustomDataIdentifier")
GetFindingStatistics = Action("GetFindingStatistics")
GetFindings = Action("GetFindings")
GetFindingsFilter = Action("GetFindingsFilter")
GetInvitationsCount = Action("GetInvitationsCount")
GetMacieSession = Action("GetMacieSession")
GetMasterAccount = Action("GetMasterAccount")
GetMember = Action("GetMember")
GetUsageStatistics = Action("GetUsageStatistics")
GetUsageTotals = Action("GetUsageTotals")
ListClassificationJobs = Action("ListClassificationJobs")
ListCustomDataIdentifiers = Action("ListCustomDataIdentifiers")
ListFindings = Action("ListFindings")
ListFindingsFilters = Action("ListFindingsFilters")
ListInvitations = Action("ListInvitations")
ListMemberAccounts = Action("ListMemberAccounts")
ListMembers = Action("ListMembers")
ListOrganizationAdminAccounts = Action("ListOrganizationAdminAccounts")
ListS3Resources = Action("ListS3Resources")
ListTagsForResources = Action("ListTagsForResources")
PutClassificationExportConfiguration = Action("PutClassificationExportConfiguration")
TagResource = Action("TagResource")
TestCustomDataIdentifier = Action("TestCustomDataIdentifier")
UnarchiveFindings = Action("UnarchiveFindings")
UntagResource = Action("UntagResource")
UpdateClassificationJob = Action("UpdateClassificationJob")
UpdateFindingsFilter = Action("UpdateFindingsFilter")
UpdateMemberSession = Action("UpdateMemberSession")
UpdateOrganizationConfiguration = Action("UpdateOrganizationConfiguration")
UpdateS3Resources = Action("UpdateS3Resources")
UpdateSession = Action("UpdateSession")
|
{
"content_hash": "727c79150d1c3fcc2ff5deda4849acae",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 88,
"avg_line_length": 46.721518987341774,
"alnum_prop": 0.8206448117041453,
"repo_name": "cloudtools/awacs",
"id": "f38b384a160f8b8c68067be620eff4ba46c58cfd",
"size": "3807",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "awacs/macie.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Makefile",
"bytes": "343"
},
{
"name": "Python",
"bytes": "963483"
}
],
"symlink_target": ""
}
|
from neutron.api.v2 import attributes as attr
from neutron.common.test_lib import test_config
from neutron import context
from neutron.db import db_base_plugin_v2
from neutron.db import portsecurity_db
from neutron.db import securitygroups_db
from neutron.extensions import portsecurity as psec
from neutron.extensions import securitygroup as ext_sg
from neutron.manager import NeutronManager
from neutron import policy
from neutron.tests.unit import test_db_plugin
DB_PLUGIN_KLASS = ('neutron.tests.unit.test_extension_portsecurity.'
'PortSecurityTestPlugin')
class PortSecurityTestCase(test_db_plugin.NeutronDbPluginV2TestCase):
def setUp(self, plugin=None):
super(PortSecurityTestCase, self).setUp()
# Check if a plugin supports security groups
plugin_obj = NeutronManager.get_plugin()
self._skip_security_group = ('security-group' not in
plugin_obj.supported_extension_aliases)
def tearDown(self):
super(PortSecurityTestCase, self).tearDown()
self._skip_security_group = None
class PortSecurityTestPlugin(db_base_plugin_v2.NeutronDbPluginV2,
securitygroups_db.SecurityGroupDbMixin,
portsecurity_db.PortSecurityDbMixin):
"""Test plugin that implements necessary calls on create/delete port for
associating ports with security groups and port security.
"""
supported_extension_aliases = ["security-group", "port-security"]
port_security_enabled_create = "create_port:port_security_enabled"
port_security_enabled_update = "update_port:port_security_enabled"
def _enforce_set_auth(self, context, resource, action):
return policy.enforce(context, action, resource)
def create_network(self, context, network):
tenant_id = self._get_tenant_id_for_create(context, network['network'])
self._ensure_default_security_group(context, tenant_id)
with context.session.begin(subtransactions=True):
neutron_db = super(PortSecurityTestPlugin, self).create_network(
context, network)
neutron_db.update(network['network'])
self._process_network_create_port_security(
context, neutron_db)
self._extend_network_port_security_dict(context, neutron_db)
return neutron_db
def update_network(self, context, id, network):
with context.session.begin(subtransactions=True):
neutron_db = super(PortSecurityTestPlugin, self).update_network(
context, id, network)
if psec.PORTSECURITY in network['network']:
self._update_network_security_binding(
context, id, network['network'][psec.PORTSECURITY])
self._extend_network_port_security_dict(
context, neutron_db)
return neutron_db
def get_network(self, context, id, fields=None):
with context.session.begin(subtransactions=True):
net = super(PortSecurityTestPlugin, self).get_network(
context, id)
self._extend_network_port_security_dict(context, net)
return self._fields(net, fields)
def create_port(self, context, port):
if attr.is_attr_set(port['port'][psec.PORTSECURITY]):
self._enforce_set_auth(context, port,
self.port_security_enabled_create)
p = port['port']
with context.session.begin(subtransactions=True):
p[ext_sg.SECURITYGROUPS] = self._get_security_groups_on_port(
context, port)
neutron_db = super(PortSecurityTestPlugin, self).create_port(
context, port)
p.update(neutron_db)
(port_security, has_ip) = self._determine_port_security_and_has_ip(
context, p)
p[psec.PORTSECURITY] = port_security
self._process_port_security_create(context, p)
if (attr.is_attr_set(p.get(ext_sg.SECURITYGROUPS)) and
not (port_security and has_ip)):
raise psec.PortSecurityAndIPRequiredForSecurityGroups()
# Port requires ip and port_security enabled for security group
if has_ip and port_security:
self._ensure_default_security_group_on_port(context, port)
if (p.get(ext_sg.SECURITYGROUPS) and p[psec.PORTSECURITY]):
self._process_port_create_security_group(
context, p, p[ext_sg.SECURITYGROUPS])
self._extend_port_port_security_dict(context, p)
return port['port']
def update_port(self, context, id, port):
self._enforce_set_auth(context, port,
self.port_security_enabled_update)
delete_security_groups = self._check_update_deletes_security_groups(
port)
has_security_groups = self._check_update_has_security_groups(port)
with context.session.begin(subtransactions=True):
ret_port = super(PortSecurityTestPlugin, self).update_port(
context, id, port)
# copy values over - but not fixed_ips
port['port'].pop('fixed_ips', None)
ret_port.update(port['port'])
# populate port_security setting
if psec.PORTSECURITY not in ret_port:
ret_port[psec.PORTSECURITY] = self._get_port_security_binding(
context, id)
has_ip = self._ip_on_port(ret_port)
# checks if security groups were updated adding/modifying
# security groups, port security is set and port has ip
if (has_security_groups and (not ret_port[psec.PORTSECURITY]
or not has_ip)):
raise psec.PortSecurityAndIPRequiredForSecurityGroups()
# Port security/IP was updated off. Need to check that no security
# groups are on port.
if (ret_port[psec.PORTSECURITY] is not True or not has_ip):
if has_security_groups:
raise psec.PortSecurityAndIPRequiredForSecurityGroups()
# get security groups on port
filters = {'port_id': [id]}
security_groups = (super(PortSecurityTestPlugin, self).
_get_port_security_group_bindings(
context, filters))
if security_groups and not delete_security_groups:
raise psec.PortSecurityPortHasSecurityGroup()
if (delete_security_groups or has_security_groups):
# delete the port binding and read it with the new rules.
self._delete_port_security_group_bindings(context, id)
sgids = self._get_security_groups_on_port(context, port)
# process port create sec groups needs port id
port['id'] = id
self._process_port_create_security_group(context,
ret_port, sgids)
if psec.PORTSECURITY in port['port']:
self._update_port_security_binding(
context, id, ret_port[psec.PORTSECURITY])
self._extend_port_port_security_dict(context, ret_port)
return ret_port
class PortSecurityDBTestCase(PortSecurityTestCase):
def setUp(self, plugin=None):
test_config['plugin_name_v2'] = DB_PLUGIN_KLASS
super(PortSecurityDBTestCase, self).setUp()
def tearDown(self):
del test_config['plugin_name_v2']
super(PortSecurityDBTestCase, self).tearDown()
class TestPortSecurity(PortSecurityDBTestCase):
def test_create_network_with_portsecurity_mac(self):
res = self._create_network('json', 'net1', True)
net = self.deserialize('json', res)
self.assertEqual(net['network'][psec.PORTSECURITY], True)
def test_create_network_with_portsecurity_false(self):
res = self._create_network('json', 'net1', True,
arg_list=('port_security_enabled',),
port_security_enabled=False)
net = self.deserialize('json', res)
self.assertEqual(net['network'][psec.PORTSECURITY], False)
def test_updating_network_port_security(self):
res = self._create_network('json', 'net1', True,
port_security_enabled='True')
net = self.deserialize('json', res)
self.assertEqual(net['network'][psec.PORTSECURITY], True)
update_net = {'network': {psec.PORTSECURITY: False}}
req = self.new_update_request('networks', update_net,
net['network']['id'])
net = self.deserialize('json', req.get_response(self.api))
self.assertEqual(net['network'][psec.PORTSECURITY], False)
req = self.new_show_request('networks', net['network']['id'])
net = self.deserialize('json', req.get_response(self.api))
self.assertEqual(net['network'][psec.PORTSECURITY], False)
def test_create_port_default_true(self):
with self.network() as net:
res = self._create_port('json', net['network']['id'])
port = self.deserialize('json', res)
self.assertEqual(port['port'][psec.PORTSECURITY], True)
self._delete('ports', port['port']['id'])
def test_create_port_passing_true(self):
res = self._create_network('json', 'net1', True,
arg_list=('port_security_enabled',),
port_security_enabled=True)
net = self.deserialize('json', res)
res = self._create_port('json', net['network']['id'])
port = self.deserialize('json', res)
self.assertEqual(port['port'][psec.PORTSECURITY], True)
self._delete('ports', port['port']['id'])
def test_create_port_on_port_security_false_network(self):
res = self._create_network('json', 'net1', True,
arg_list=('port_security_enabled',),
port_security_enabled=False)
net = self.deserialize('json', res)
res = self._create_port('json', net['network']['id'])
port = self.deserialize('json', res)
self.assertEqual(port['port'][psec.PORTSECURITY], False)
self._delete('ports', port['port']['id'])
def test_create_port_security_overrides_network_value(self):
res = self._create_network('json', 'net1', True,
arg_list=('port_security_enabled',),
port_security_enabled=False)
net = self.deserialize('json', res)
res = self._create_port('json', net['network']['id'],
arg_list=('port_security_enabled',),
port_security_enabled=True)
port = self.deserialize('json', res)
self.assertEqual(port['port'][psec.PORTSECURITY], True)
self._delete('ports', port['port']['id'])
def test_create_port_with_default_security_group(self):
if self._skip_security_group:
self.skipTest("Plugin does not support security groups")
with self.network() as net:
with self.subnet(network=net):
res = self._create_port('json', net['network']['id'])
port = self.deserialize('json', res)
self.assertEqual(port['port'][psec.PORTSECURITY], True)
self.assertEqual(len(port['port'][ext_sg.SECURITYGROUPS]), 1)
self._delete('ports', port['port']['id'])
def test_update_port_security_off_with_security_group(self):
if self._skip_security_group:
self.skipTest("Plugin does not support security groups")
with self.network() as net:
with self.subnet(network=net):
res = self._create_port('json', net['network']['id'])
port = self.deserialize('json', res)
self.assertEqual(port['port'][psec.PORTSECURITY], True)
update_port = {'port': {psec.PORTSECURITY: False}}
req = self.new_update_request('ports', update_port,
port['port']['id'])
res = req.get_response(self.api)
self.assertEqual(res.status_int, 409)
# remove security group on port
update_port = {'port': {ext_sg.SECURITYGROUPS: None}}
req = self.new_update_request('ports', update_port,
port['port']['id'])
self.deserialize('json', req.get_response(self.api))
self._delete('ports', port['port']['id'])
def test_update_port_remove_port_security_security_group(self):
if self._skip_security_group:
self.skipTest("Plugin does not support security groups")
with self.network() as net:
with self.subnet(network=net):
res = self._create_port('json', net['network']['id'],
arg_list=('port_security_enabled',),
port_security_enabled=True)
port = self.deserialize('json', res)
self.assertEqual(port['port'][psec.PORTSECURITY], True)
# remove security group on port
update_port = {'port': {ext_sg.SECURITYGROUPS: None,
psec.PORTSECURITY: False}}
req = self.new_update_request('ports', update_port,
port['port']['id'])
port = self.deserialize('json', req.get_response(self.api))
self.assertEqual(port['port'][psec.PORTSECURITY], False)
self.assertEqual(len(port['port'][ext_sg.SECURITYGROUPS]), 0)
self._delete('ports', port['port']['id'])
def test_update_port_remove_port_security_security_group_read(self):
if self._skip_security_group:
self.skipTest("Plugin does not support security groups")
with self.network() as net:
with self.subnet(network=net):
res = self._create_port('json', net['network']['id'],
arg_list=('port_security_enabled',),
port_security_enabled=True)
port = self.deserialize('json', res)
self.assertEqual(port['port'][psec.PORTSECURITY], True)
# remove security group on port
update_port = {'port': {ext_sg.SECURITYGROUPS: None,
psec.PORTSECURITY: False}}
req = self.new_update_request('ports', update_port,
port['port']['id'])
self.deserialize('json', req.get_response(self.api))
sg_id = port['port'][ext_sg.SECURITYGROUPS]
update_port = {'port': {ext_sg.SECURITYGROUPS: [sg_id[0]],
psec.PORTSECURITY: True}}
req = self.new_update_request('ports', update_port,
port['port']['id'])
port = self.deserialize('json', req.get_response(self.api))
self.assertEqual(port['port'][psec.PORTSECURITY], True)
self.assertEqual(len(port['port'][ext_sg.SECURITYGROUPS]), 1)
self._delete('ports', port['port']['id'])
def test_create_port_security_off_shared_network(self):
with self.network(shared=True) as net:
with self.subnet(network=net):
res = self._create_port('json', net['network']['id'],
arg_list=('port_security_enabled',),
port_security_enabled=False,
tenant_id='not_network_owner',
set_context=True)
self.deserialize('json', res)
self.assertEqual(res.status_int, 403)
def test_update_port_security_off_shared_network(self):
with self.network(shared=True, do_delete=False) as net:
with self.subnet(network=net, do_delete=False):
res = self._create_port('json', net['network']['id'],
tenant_id='not_network_owner',
set_context=True)
port = self.deserialize('json', res)
# remove security group on port
update_port = {'port': {ext_sg.SECURITYGROUPS: None,
psec.PORTSECURITY: False}}
req = self.new_update_request('ports', update_port,
port['port']['id'])
req.environ['neutron.context'] = context.Context(
'', 'not_network_owner')
res = req.get_response(self.api)
# TODO(salvatore-orlando): Expected error is 404 because
# the current API controller always returns this error
# code for any policy check failures on update.
# It should be 404 when the caller cannot access the whole
# resource, and 403 when it cannot access a single attribute
self.assertEqual(res.status_int, 404)
|
{
"content_hash": "ebaf24e8bb90bf25c0f9b8d4a764f6a9",
"timestamp": "",
"source": "github",
"line_count": 356,
"max_line_length": 79,
"avg_line_length": 49.40449438202247,
"alnum_prop": 0.5638503525130771,
"repo_name": "CiscoSystems/quantum",
"id": "695fb01bb2a52e763f76325437b748ce19b0ac43",
"size": "18180",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "neutron/tests/unit/test_extension_portsecurity.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "37307"
},
{
"name": "JavaScript",
"bytes": "67928"
},
{
"name": "Python",
"bytes": "4563108"
},
{
"name": "Shell",
"bytes": "9109"
},
{
"name": "XSLT",
"bytes": "50907"
}
],
"symlink_target": ""
}
|
import asposecellscloud
from asposecellscloud.CellsApi import CellsApi
from asposecellscloud.CellsApi import ApiException
import asposestoragecloud
from asposestoragecloud.StorageApi import StorageApi
apiKey = "XXXXX" #sepcify App Key
appSid = "XXXXX" #sepcify App SID
apiServer = "http://api.aspose.com/v1.1"
data_folder = "../../data/"
#Instantiate Aspose Storage API SDK
storage_apiClient = asposestoragecloud.ApiClient.ApiClient(apiKey, appSid, True)
storageApi = StorageApi(storage_apiClient)
#Instantiate Aspose Cells API SDK
api_client = asposecellscloud.ApiClient.ApiClient(apiKey, appSid, True)
cellsApi = CellsApi(api_client);
#set input file name
filename = "Sample_Test_Book.xls"
sheetName = "Sheet1"
formula = "SUM(A5:A10)"
#upload file to aspose cloud storage
storageApi.PutCreate(Path=filename, file=data_folder + filename)
try:
#invoke Aspose.Cells Cloud SDK API to calculate formula in a worksheet
response = cellsApi.GetWorkSheetCalculateFormula(name=filename, sheetName=sheetName, formula=formula)
if response.Status == "OK":
print "Result :: " + str(response.Value.Value)
except ApiException as ex:
print "ApiException:"
print "Code:" + str(ex.code)
print "Message:" + ex.message
|
{
"content_hash": "58a26e12472855019f2f9e7b547d7f47",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 105,
"avg_line_length": 33.26315789473684,
"alnum_prop": 0.7507911392405063,
"repo_name": "asposecells/Aspose_Cells_Cloud",
"id": "1cbd1eeccc37b5c1684c28dca9fa651876b29fe4",
"size": "1264",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Examples/Python/Examples/CalculateFormulaInWorksheet.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "203"
},
{
"name": "C#",
"bytes": "897367"
},
{
"name": "HTML",
"bytes": "110"
},
{
"name": "Java",
"bytes": "900042"
},
{
"name": "JavaScript",
"bytes": "664643"
},
{
"name": "Objective-C",
"bytes": "1142444"
},
{
"name": "PHP",
"bytes": "626745"
},
{
"name": "Python",
"bytes": "833397"
},
{
"name": "Ruby",
"bytes": "799033"
}
],
"symlink_target": ""
}
|
import numpy as np
from .img import rgb2lum
from .const import Path
from .os import open_file
from .imprt import preset_import
from .log import get_logger
logger = get_logger()
def compute_ci(data, level=0.95):
r"""Computes confidence interval.
Args:
data (list(float)): Samples.
level (float, optional): Confidence level. Defaults to :math:`0.95`.
Returns:
float: One-sided interval (i.e., mean :math:`\pm` this number).
"""
from scipy import stats
data = np.array(data).astype(float)
n = len(data)
se = stats.sem(data)
return se * stats.t.ppf((1 + level) / 2., n - 1)
class Base():
"""The base metric.
Attributes:
dtype (numpy.dtype): Data type, with which data dynamic range is
derived.
drange (float): Dynamic range, i.e., difference between the maximum and
minimum allowed.
"""
def __init__(self, dtype):
"""
Args:
dtype (str or numpy.dtype): Data type, from which dynamic range will
be derived.
"""
self.dtype = np.dtype(dtype)
if self.dtype.kind == 'f':
self.drange = 1.
logger.warning(
"Input type is float, so assuming dynamic range to be 1")
elif self.dtype.kind == 'u':
iinfo = np.iinfo(self.dtype)
self.drange = float(iinfo.max - iinfo.min)
else:
raise NotImplementedError(self.dtype.kind)
def _assert_type(self, im):
assert im.dtype == self.dtype, (
"Input data type ({in_dtype}) different from what was "
"specified ({dtype})"
).format(in_dtype=im.dtype, dtype=self.dtype)
def _assert_drange(self, im):
actual = im.max() - im.min()
assert self.drange >= actual, (
"The actual dynamic range ({actual}) is larger than what was "
"derived from the data type ({derived})"
).format(actual=actual, derived=self.drange)
@staticmethod
def _assert_same_shape(im1, im2):
assert im1.shape == im2.shape, \
"The two images are not even of the same shape"
@staticmethod
def _ensure_3d(im):
if im.ndim == 2:
return np.expand_dims(im, -1)
if im.ndim == 3:
assert im.shape[2] in (1, 3), (
"If 3D, input must have either 1 or 3 channels, but has %d"
) % im.shape[2]
return im
raise ValueError(
"Input must be 2D (H-by-W) or 3D (H-by-W-by-C), but is %dD"
% im.ndim)
def __call__(self, im1, im2, **kwargs):
"""
Args:
im1 (numpy.ndarray): An image of shape H-by-W, H-by-W-by-1,
or H-by-W-by-3.
im2
Returns:
float: The metric computed.
"""
raise NotImplementedError
class PSNR(Base):
"""Peak Signal-to-Noise Ratio (PSNR) in dB (higher is better).
If the inputs are RGB, they are first converted to luma (or relative
luminance, if the inputs are not gamma-corrected). PSNR is computed
on the luma.
"""
def __call__(self, im1, im2, mask=None):
"""
Args:
im1
im2
mask (numpy.ndarray, optional): An H-by-W logical array indicating
pixels that contribute to the computation.
Returns:
float: PSNR in dB.
"""
self._assert_type(im1)
self._assert_type(im2)
im1 = im1.astype(float) # must be cast to an unbounded type
im2 = im2.astype(float)
im1 = self._ensure_3d(im1)
im2 = self._ensure_3d(im2)
self._assert_same_shape(im1, im2)
self._assert_drange(im1)
self._assert_drange(im2)
# To luma
if im1.shape[2] == 3:
im1 = np.expand_dims(rgb2lum(im1), -1)
im2 = np.expand_dims(rgb2lum(im2), -1)
# Inputs guaranteed to be HxWx1 now
if mask is None:
mask = np.ones(im1.shape)
elif mask.ndim == 2:
mask = np.expand_dims(mask, -1)
# Mask guaranteed to be 3D
assert mask.shape == im1.shape, (
"Mask must be of shape {input_shape}, but is of shape "
"{mask_shape}"
).format(input_shape=im1.shape, mask_shape=mask.shape)
# Mask guaranteed to be HxWx1 now
mask = mask.astype(bool) # in case it's not logical yet
se = np.square(im1[mask] - im2[mask])
mse = np.sum(se) / np.sum(mask)
psnr = 10 * np.log10((self.drange ** 2) / mse) # dB
return psnr
class SSIM(Base):
r"""The (multi-scale) Structural Similarity Index (SSIM) :math:`\in [0,1]`
(higher is better).
If the inputs are RGB, they are first converted to luma (or relative
luminance, if the inputs are not gamma-corrected). SSIM is computed
on the luma.
"""
def __call__(self, im1, im2, multiscale=False):
"""
Args:
im1
im2
multiscale (bool, optional): Whether to compute MS-SSIM.
Returns:
float: SSIM computed (higher is better).
"""
tf = preset_import('tf', assert_success=True)
self._assert_type(im1)
self._assert_type(im2)
im1 = im1.astype(float) # must be cast to an unbounded type
im2 = im2.astype(float)
im1 = self._ensure_3d(im1)
im2 = self._ensure_3d(im2)
self._assert_same_shape(im1, im2)
self._assert_drange(im1)
self._assert_drange(im2)
# To luma
if im1.shape[2] == 3:
im1 = np.expand_dims(rgb2lum(im1), -1)
im2 = np.expand_dims(rgb2lum(im2), -1)
# Guaranteed to be HxWx1 now
im1 = tf.convert_to_tensor(im1)
im2 = tf.convert_to_tensor(im2)
if multiscale:
ssim_func = tf.image.ssim_multiscale
else:
ssim_func = tf.image.ssim
similarity = ssim_func(im1, im2, max_val=self.drange)
similarity = similarity.numpy()
return similarity
class LPIPS(Base):
r"""The Learned Perceptual Image Patch Similarity (LPIPS) metric (lower is
better).
Project page: https://richzhang.github.io/PerceptualSimilarity/
Note:
This implementation assumes the minimum value allowed is :math:`0`, so
data dynamic range becomes the maximum value allowed.
Attributes:
dtype (numpy.dtype): Data type, with which data dynamic range is
derived.
drange (float): Dynamic range, i.e., difference between the maximum and
minimum allowed.
lpips_func (tf.function): The LPIPS network packed into a function.
"""
def __init__(self, dtype, weight_pb=None):
"""
Args:
dtype (str or numpy.dtype): Data type, from which maximum allowed
will be derived.
weight_pb (str, optional): Path to the network weight protobuf.
Defaults to the bundled ``net-lin_alex_v0.1.pb``.
"""
super().__init__(dtype)
tf = preset_import('tf', assert_success=True)
if weight_pb is None:
weight_pb = Path.lpips_weights
# Pack LPIPS network into a tf function
graph_def = tf.compat.v1.GraphDef()
with open_file(weight_pb, 'rb') as h:
graph_def.ParseFromString(h.read())
self.lpips_func = tf.function(self._wrap_frozen_graph(
graph_def, inputs=['0:0', '1:0'], outputs='Reshape_10:0'))
@staticmethod
def _wrap_frozen_graph(graph_def, inputs, outputs):
tf = preset_import('tf', assert_success=True)
def _imports_graph_def():
tf.compat.v1.import_graph_def(graph_def, name="")
wrapped_import = tf.compat.v1.wrap_function(_imports_graph_def, [])
import_graph = wrapped_import.graph
return wrapped_import.prune(
tf.nest.map_structure(import_graph.as_graph_element, inputs),
tf.nest.map_structure(import_graph.as_graph_element, outputs))
def __call__(self, im1, im2):
"""
Args:
im1
im2
Returns:
float: LPIPS computed (lower is better).
"""
tf = preset_import('tf', assert_success=True)
self._assert_type(im1)
self._assert_type(im2)
im1 = im1.astype(float) # must be cast to an unbounded type
im2 = im2.astype(float)
im1 = self._ensure_3d(im1)
im2 = self._ensure_3d(im2)
self._assert_same_shape(im1, im2)
self._assert_drange(im1)
self._assert_drange(im2)
if im1.shape[2] == 1:
im1 = np.dstack([im1] * 3)
im2 = np.dstack([im2] * 3)
# Guaranteed to be HxWx3 now
maxv = self.drange + 0 # NOTE: assumes the minimum value allowed is 0
im1t = tf.convert_to_tensor(
np.expand_dims(im1, axis=0), dtype=float) / maxv * 2 - 1
im2t = tf.convert_to_tensor(
np.expand_dims(im2, axis=0), dtype=float) / maxv * 2 - 1
# Now 1xHxWx3 and all values in [-1, 1]
lpips = self.lpips_func(
tf.transpose(im1t, [0, 3, 1, 2]), # to 1x3xHxW
tf.transpose(im2t, [0, 3, 1, 2])
).numpy().squeeze()[()]
return lpips
|
{
"content_hash": "2e4bbe93be0c9d724d2f9fb607ac29e9",
"timestamp": "",
"source": "github",
"line_count": 276,
"max_line_length": 80,
"avg_line_length": 33.79710144927536,
"alnum_prop": 0.5596054888507719,
"repo_name": "google/nerfactor",
"id": "f877e021198ecb0a3d2a96c3d318c29188a27324",
"size": "9364",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "third_party/xiuminglib/xiuminglib/metric.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "301538"
},
{
"name": "Shell",
"bytes": "9603"
}
],
"symlink_target": ""
}
|
import six
import logging
from aleph.authz import get_public_roles
from aleph.util import dict_list
from aleph.model import Role
from aleph.datasets.query import DBQuery, CSVQuery
log = logging.getLogger(__name__)
class Dataset(object):
"""A dataset describes one set of data to be loaded."""
def __init__(self, name, data):
self.name = six.text_type(name)
self.data = data
self.label = data.get('label', name)
self.info_url = data.get('info_url')
self.category = data.get('category')
self.roles = []
self.entities_count = None
self.public = False
for role in dict_list(data, 'roles', 'role'):
role_id = Role.load_id(role)
if role_id is not None:
self.roles.append(role_id)
else:
log.warning("Could not find role: %s", role)
if role_id in get_public_roles():
self.public = True
if not len(self.roles):
raise ValueError("No roles for dataset: %s" % self.name)
self._queries = dict_list(data, 'queries', 'query')
@property
def countries(self):
# This is cached only once for each run-time, basically as a really
# stupid cache. Perhaps configuring countries explicitly, or giving
# this into a memoization tool that timeouts every N hours would be
# a good idea.
if not hasattr(self, '_countries'):
from aleph.search.entities import get_dataset_countries
self._countries = get_dataset_countries(self.name)
return self._countries
@property
def queries(self):
for query in self._queries:
if 'database' in query or 'databases' in query:
yield DBQuery(self, query)
else:
yield CSVQuery(self, query)
def to_dict(self):
return {
'name': self.name,
'label': self.label,
'info_url': self.info_url,
'roles': self.roles,
'public': self.public,
'category': self.category,
'countries': self.countries,
'entities_count': self.entities_count
}
def __repr__(self):
return '<Dataset(%r, %r)>' % (self.name, self.label)
class DatasetSet(object):
def __init__(self, datasets):
self.datasets = []
for name, dconfig in datasets.get('datasets', {}).items():
self.datasets.append(Dataset(name, dconfig))
def get(self, name):
for dataset in self.datasets:
if dataset.name == name:
return dataset
raise NameError("No such dataset: %s" % name)
def __iter__(self):
return iter(self.datasets)
def __repr__(self):
return '<DatasetSet(%r)>' % self.datasets
|
{
"content_hash": "55740f7226c1117ea55ec028c0b41744",
"timestamp": "",
"source": "github",
"line_count": 91,
"max_line_length": 75,
"avg_line_length": 31.12087912087912,
"alnum_prop": 0.5723870056497176,
"repo_name": "OpenGazettes/aleph",
"id": "064e7010abbc84054a491e7a14b1d02f12062db5",
"size": "2832",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "aleph/datasets/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "16691"
},
{
"name": "HTML",
"bytes": "129730"
},
{
"name": "JavaScript",
"bytes": "113910"
},
{
"name": "Makefile",
"bytes": "1445"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "449779"
},
{
"name": "Shell",
"bytes": "821"
}
],
"symlink_target": ""
}
|
import feedparser
from flask import Response
import json
from sqlalchemy.orm.exc import NoResultFound
from werkzeug.exceptions import NotFound, BadRequest
from server.database import helpers
from server.database.models import Entry
from server.import_opml import create_db_from_opml
def halify_entry_list(entry_list, total, feed=None, next_page=None,
predicate="all"):
if feed is None:
entries = {}
href = "/api/entry/{}".format(predicate)
else:
entries = feed
href = "/api/feed/{}/{}".format(feed["id"], predicate)
entries["total"] = total
entries["_links"] = {"self":
{"href": href}}
if next_page is not None:
entries["_links"]["next"] = {"href": "{}?page={}".format(
href,
next_page)}
entries["_embedded"] = {"entries": entry_list}
return entries
def get_all_feeds():
response = {}
response["_links"] = {"self": {"href": "/feeds/"},
"find": {"href": "/feeds{?id}",
"templated": True}}
feeds = [feed.serialize() for feed in helpers.query_all_feeds().all()]
response["_embedded"] = {"feeds": feeds}
return Response(json.dumps(response), mimetype="application/json")
def post_feed(url):
parsed = feedparser.parse(url).feed
if parsed == {}:
raise NotFound
title = parsed.get("title", "No title")
feed = helpers.add_feed(title, url).serialize()
headers = {"location": feed["_links"]["self"]["href"]}
return Response(json.dumps(feed), mimetype="application/json",
headers=headers)
def get_entries(predicate="all", page=1, **kwargs):
if "feed_id" in kwargs.keys():
try:
feed = helpers.query_feed_by_id(kwargs["feed_id"]).serialize()
except NoResultFound:
raise NotFound
else:
feed = None
if predicate == "all":
pass
elif predicate == "read":
kwargs["read"] = True
elif predicate == "unread":
kwargs["read"] = False
elif predicate == "marked":
kwargs["marked"] = True
else:
raise BadRequest
query, total, last = helpers.query_entries_paged(page, **kwargs)
next_page = None
if not last:
next_page = page + 1
entry_list = [e.serialize() for e in query]
entries = halify_entry_list(entry_list, total, feed=feed,
next_page=next_page,
predicate=predicate)
return Response(json.dumps(entries),
mimetype="application/json")
def get_entry(entry_id):
entry = helpers.query_entry_by_id(entry_id)
feed = helpers.query_feed_by_id(entry.feed_id).serialize()
feed["_embedded"] = {"entry": entry.serialize()}
return Response(json.dumps(feed), mimetype="application/json")
def refresh_feed(feed_id):
try:
feed = helpers.query_feed_by_id(feed_id).serialize()
new_feed = feedparser.parse(feed["url"])
entries = [parse_entry(feed_id, e) for e in new_feed.entries]
helpers.add_entries(feed_id, entries)
feed = helpers.query_feed_by_id(feed_id).serialize()
return Response(json.dumps(feed),
mimetype="application/json")
except NoResultFound:
raise NotFound
def delete_feed(feed_id):
try:
feed = helpers.query_feed_by_id(feed_id)
except NoResultFound:
raise NotFound
helpers.delete_feed(feed)
return get_all_feeds()
def parse_entry(feed_id, feed_entry):
title = feed_entry.get("title", "No title")
guid = feed_entry.get("id", "No ID")
summary = feed_entry.get("summary", title)
link = feed_entry.get("link", "/page_not_found.html")
pub_date = feed_entry.get("published_parsed", None)
entry = Entry(title, guid, link, summary, feed_id, pub_date=pub_date)
return entry
def toggle_status(entry_id, data):
if "read" not in data and "marked" not in data:
raise BadRequest
data = {p: data[p] for p in ["read", "marked"] if p in data}
try:
entry = helpers.query_entry_by_id(entry_id)
feed_id = entry.feed_id
helpers.update_entry_status(feed_id, entry_id,
data)
return get_entry(entry_id)
except NoResultFound:
raise NotFound
# TODO: should have a better response here
def mark_all_read():
helpers.update_all_entries({"read": True})
return Response(json.dumps({"read": True}), mimetype="application/json")
def import_opml(f):
if f.mimetype != "text/x-opml+xml":
print(f.mimetype)
raise BadRequest
create_db_from_opml(f)
return get_all_feeds()
|
{
"content_hash": "9d2332b995f3af675ada8cebf5c1f4ee",
"timestamp": "",
"source": "github",
"line_count": 146,
"max_line_length": 76,
"avg_line_length": 32.43150684931507,
"alnum_prop": 0.5947201689545935,
"repo_name": "flacerdk/smoke-signal",
"id": "848eff10148c21e54c52cfb28a41f111ff359c61",
"size": "4772",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "server/main/methods.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3638"
},
{
"name": "HTML",
"bytes": "1680"
},
{
"name": "JavaScript",
"bytes": "36667"
},
{
"name": "Python",
"bytes": "36540"
}
],
"symlink_target": ""
}
|
import uuid
from openstack_dashboard.test import helpers as test
from openstack_dashboard.utils.filters import get_int_or_uuid
class UtilsFilterTests(test.TestCase):
def test_accept_valid_integer(self):
val = 100
ret = get_int_or_uuid(val)
self.assertEqual(val, ret)
def test_accept_valid_integer_string(self):
val = '100'
ret = get_int_or_uuid(val)
self.assertEqual(int(val), ret)
def test_accept_valid_uuid(self):
val = str(uuid.uuid4())
ret = get_int_or_uuid(val)
self.assertEqual(val, ret)
def test_reject_random_string(self):
val = '55WbJTpJDf'
self.assertRaises(ValueError, get_int_or_uuid, val)
|
{
"content_hash": "fbb97cb9285ec53c8c59771c4f9b58bf",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 61,
"avg_line_length": 28.48,
"alnum_prop": 0.6460674157303371,
"repo_name": "tuskar/tuskar-ui",
"id": "2096b26766689f9e7b303fc8e5beb82658b53cb5",
"size": "1397",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "openstack_dashboard/test/tests/utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "159761"
},
{
"name": "JavaScript",
"bytes": "467747"
},
{
"name": "Python",
"bytes": "2393436"
},
{
"name": "Shell",
"bytes": "12884"
}
],
"symlink_target": ""
}
|
"""Student (Model) query functions.
"""
__authors__ = [
'"Lennard de Rijk" <ljvderijk@gmail.com>',
]
from soc.logic.models import role
from soc.logic.models import program as program_logic
import soc.models.role
import soc.models.student
class Logic(role.Logic):
"""Logic methods for the Student model.
"""
def __init__(self, model=soc.models.student.Student,
base_model=soc.models.role.Role, scope_logic=program_logic,
role_name='student', disallow_last_resign=False):
"""Defines the name, key_name and model for this entity.
"""
super(Logic, self).__init__(model=model, base_model=base_model,
scope_logic=scope_logic,
role_name=role_name,
disallow_last_resign=disallow_last_resign)
logic = Logic()
|
{
"content_hash": "c0752749dd35c5ce26bc461fa5d188bd",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 74,
"avg_line_length": 26.8125,
"alnum_prop": 0.6072261072261073,
"repo_name": "SRabbelier/Melange",
"id": "67ccc9ce750b1fe5f47613115fedcf7887af11ee",
"size": "1468",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "app/soc/logic/models/student.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "400472"
},
{
"name": "C++",
"bytes": "20"
},
{
"name": "Java",
"bytes": "1496"
},
{
"name": "JavaScript",
"bytes": "1623582"
},
{
"name": "PHP",
"bytes": "1032"
},
{
"name": "Perl",
"bytes": "177565"
},
{
"name": "Python",
"bytes": "15317793"
},
{
"name": "Ruby",
"bytes": "59"
},
{
"name": "Shell",
"bytes": "15303"
}
],
"symlink_target": ""
}
|
from decimal import Decimal as D
from django.test import TestCase
from nose.plugins.attrib import attr
from oscar.apps.shipping.models import OrderAndItemCharges, WeightBased
from oscar.core.compat import get_user_model
from oscar.test import factories
User = get_user_model()
@attr('shipping')
class TestOrderAndItemCharges(TestCase):
def setUp(self):
self.method = OrderAndItemCharges(
price_per_order=D('5.00'), price_per_item=D('1.00'))
def test_tax_is_known(self):
basket = factories.create_basket(empty=True)
charge = self.method.calculate(basket)
self.assertTrue(charge.is_tax_known)
def test_returns_order_level_charge_for_empty_basket(self):
basket = factories.create_basket(empty=True)
charge = self.method.calculate(basket)
self.assertEqual(D('5.00'), charge.incl_tax)
def test_single_item_basket(self):
basket = factories.create_basket(empty=False)
charge = self.method.calculate(basket)
self.assertEqual(D('5.00') + D('1.00'),
charge.incl_tax)
def test_single_item_basket_that_doesnt_require_shipping(self):
# Create a product that doesn't require shipping
record = factories.create_stockrecord()
product = record.product
product.product_class.requires_shipping = False
product.product_class.save()
basket = factories.create_basket(empty=True)
basket.add_product(record.product)
charge = self.method.calculate(basket)
self.assertEqual(D('5.00'), charge.incl_tax)
def test_multi_item_basket(self):
basket = factories.create_basket(empty=True)
record = factories.create_stockrecord()
basket.add_product(record.product, 7)
charge = self.method.calculate(basket)
self.assertEqual(D('5.00') + 7*D('1.00'), charge.incl_tax)
@attr('shipping')
class ZeroFreeThresholdTest(TestCase):
def setUp(self):
self.method = OrderAndItemCharges(
price_per_order=D('10.00'), free_shipping_threshold=D('0.00'))
self.basket = factories.create_basket(empty=True)
def test_free_shipping_with_empty_basket(self):
charge = self.method.calculate(self.basket)
self.assertEqual(D('0.00'), charge.incl_tax)
def test_free_shipping_with_nonempty_basket(self):
record = factories.create_stockrecord(price_excl_tax=D('5.00'))
self.basket.add_product(record.product)
charge = self.method.calculate(self.basket)
self.assertEqual(D('0.00'), charge.incl_tax)
@attr('shipping')
class TestNonZeroFreeThreshold(TestCase):
def setUp(self):
self.method = OrderAndItemCharges(
price_per_order=D('10.00'), free_shipping_threshold=D('20.00'))
self.basket = factories.create_basket(empty=True)
def test_basket_below_threshold(self):
record = factories.create_stockrecord(price_excl_tax=D('5.00'))
self.basket.add_product(record.product)
charge = self.method.calculate(self.basket)
self.assertEqual(D('10.00'), charge.incl_tax)
def test_basket_on_threshold(self):
record = factories.create_stockrecord(price_excl_tax=D('5.00'))
self.basket.add_product(record.product, quantity=4)
charge = self.method.calculate(self.basket)
self.assertEqual(D('0.00'), charge.incl_tax)
def test_basket_above_threshold(self):
record = factories.create_stockrecord(price_excl_tax=D('5.00'))
self.basket.add_product(record.product, quantity=8)
charge = self.method.calculate(self.basket)
self.assertEqual(D('0.00'), charge.incl_tax)
@attr('shipping')
class WeightBasedMethodTests(TestCase):
def setUp(self):
self.standard = WeightBased.objects.create(name='Standard')
self.express = WeightBased.objects.create(name='Express')
def test_get_band_for_lower_weight(self):
band = self.standard.bands.create(upper_limit=1, charge=D('4.00'))
fetched_band = self.standard.get_band_for_weight(0.5)
self.assertEqual(band.id, fetched_band.id)
def test_get_band_for_higher_weight(self):
self.standard.bands.create(upper_limit=1, charge=D('4.00'))
fetched_band = self.standard.get_band_for_weight(1.5)
self.assertIsNone(fetched_band)
def test_get_band_for_matching_weight(self):
band = self.standard.bands.create(upper_limit=1, charge=D('4.00'))
fetched_band = self.standard.get_band_for_weight(1)
self.assertEqual(band.id, fetched_band.id)
def test_weight_to_is_upper_bound(self):
band = self.standard.bands.create(upper_limit=1, charge=D('4.00'))
self.assertEqual(1, band.weight_to)
def test_weight_from_for_single_band(self):
band = self.standard.bands.create(upper_limit=1, charge=D('4.00'))
self.assertEqual(0, band.weight_from)
def test_weight_from_for_multiple_bands(self):
self.standard.bands.create(upper_limit=1, charge=D('4.00'))
band = self.express.bands.create(upper_limit=2, charge=D('8.00'))
self.assertEqual(0, band.weight_from)
def test_get_band_for_series_of_bands(self):
self.standard.bands.create(upper_limit=1, charge=D('4.00'))
self.standard.bands.create(upper_limit=2, charge=D('8.00'))
self.standard.bands.create(upper_limit=3, charge=D('12.00'))
self.assertEqual(D('4.00'), self.standard.get_band_for_weight(0.5).charge)
self.assertEqual(D('8.00'), self.standard.get_band_for_weight(1.5).charge)
self.assertEqual(D('12.00'), self.standard.get_band_for_weight(2.5).charge)
def test_get_band_for_series_of_bands_from_different_methods(self):
self.express.bands.create(upper_limit=2, charge=D('8.00'))
self.standard.bands.create(upper_limit=1, charge=D('4.00'))
self.standard.bands.create(upper_limit=3, charge=D('12.00'))
self.assertEqual(D('12.00'), self.standard.get_band_for_weight(2.5).charge)
def test_for_smoke_with_basket_charge(self):
basket = factories.create_basket(empty=True)
charge = self.standard.calculate(basket)
self.assertEqual(D('0.00'), charge.incl_tax)
self.assertTrue(charge.is_tax_known)
def test_simple_shipping_cost_scenario_handled_correctly(self):
basket = factories.BasketFactory()
product_attribute_value = factories.ProductAttributeValueFactory(
value_float=2.5)
basket.add_product(product_attribute_value.product)
expected_charge = D('3.00')
self.standard.bands.create(upper_limit=3, charge=expected_charge)
charge = self.standard.calculate(basket)
self.assertEqual(expected_charge, charge.excl_tax)
def test_overflow_shipping_cost_scenario_handled_correctly(self):
basket = factories.BasketFactory()
product_attribute_value = factories.ProductAttributeValueFactory(
value_float=2.5)
basket.add_product(product_attribute_value.product)
self.standard.bands.create(upper_limit=1, charge=D('1.00'))
self.standard.bands.create(upper_limit=2, charge=D('2.00'))
charge = self.standard.calculate(basket)
self.assertEqual(D('1.00') + D('2.00'), charge.excl_tax)
|
{
"content_hash": "ac1b42dcd5a814a0eaa87b228d3bc7d7",
"timestamp": "",
"source": "github",
"line_count": 189,
"max_line_length": 83,
"avg_line_length": 38.68783068783069,
"alnum_prop": 0.6686269146608315,
"repo_name": "kapt/django-oscar",
"id": "c67565fcbac7428dfa7921a168140bb8b09c5dc5",
"size": "7312",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tests/integration/shipping/model_method_tests.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "1013938"
},
{
"name": "JavaScript",
"bytes": "926045"
},
{
"name": "Python",
"bytes": "5840384"
},
{
"name": "Shell",
"bytes": "6015"
},
{
"name": "XSLT",
"bytes": "49764"
}
],
"symlink_target": ""
}
|
from pycoin.tx import Tx, pay_to, Spendable
from pycoin.serialize import b2h, b2h_rev, h2b, h2b_rev
from pycoin.merkle import merkle
from pycoin.encoding import double_sha256
from pycoin.convention import tx_fee, satoshi_to_mbtc
from pycoin.networks import address_prefix_for_netcode
from pycoin.services import spendables_for_address, get_tx_db
import json
import multisigcore
import multisigcore.oracle
import sys
from . import cosign
def full_leaf_path(account_path, leaf_path):
return '/%s/%s' % (account_path.strip('/'), leaf_path.strip('/'))
class Batch(object):
@classmethod
def from_file(cls, file_path):
with open(file_path, 'r') as fp:
data = json.load(fp)
header = data['header']
if header['merkle_root'] is None:
raise ValueError('header.merkle_root always has to be present in loaded batch files')
return cls(
original_master_xpubs=header['original_master_xpubs'], destination_master_xpubs=header['destination_master_xpubs'],
merkle_root=header['merkle_root'], total_out=header['total_out'], checksum=header['checksum'],
batchable_txs=[BatchableTx.from_dict(tx) for tx in data['txs']],
)
def __init__(self, original_master_xpubs, destination_master_xpubs, batchable_txs, merkle_root=None, total_out=None, total_fee=None, checksum=None):
self.original_master_xpubs = original_master_xpubs
self.destination_master_xpubs = destination_master_xpubs
self.batchable_txs = batchable_txs
self.merkle_root = merkle_root or self.build_merkle_root() # set merkle_root when creating a new batch
self.total_out = total_out or sum([batchable_tx.total_out() for batchable_tx in batchable_txs])
self.checksum = checksum or -1 # todo - checksum
def build_merkle_root(self):
# shouldn't get called without transactions
if not len(self.batchable_txs):
return None
else:
return b2h_rev(merkle(sorted([tx.hash() for tx in self.batchable_txs]), double_sha256))
def to_file(self, file_path):
data = {
'header': {
'original_master_xpubs': self.original_master_xpubs, 'destination_master_xpubs': self.destination_master_xpubs,
'merkle_root': self.merkle_root, 'total_out': self.total_out, 'checksum': self.checksum
},
'txs': [batchable_tx.as_dict() for batchable_tx in self.batchable_txs],
}
with open(file_path, 'w') as fp:
print "save %s" % file_path
json.dump(data, fp)
def validate(self, provider=None):
if self.merkle_root != self.build_merkle_root():
raise ValueError("calculated merkle_root %s does not match stated merkle_root %s from header" % (self.build_merkle_root(), self.merkle_root))
self.total_out = 0
if provider is not None:
print "Doing full, online validation."
self.total_in = 0
self.total_fee = 0
else:
print "Doing limited, offline validation."
print "Validating %d transactions" % len(self.batchable_txs)
for tx_index, tx in enumerate(self.batchable_txs):
print "\n\nValidating tx#%d - %s" % (tx_index+1, tx.id())
print "- Total out", tx.total_out()
self.total_out += tx.total_out()
if provider:
print "Fetching %d UTXO..." % len(tx.txs_in)
for idx, tx_in in enumerate(tx.txs_in):
unspent_tx = provider.get_tx(tx_in.previous_hash)
tx.unspents.append(unspent_tx.txs_out[tx_in.previous_index])
print "- Total in", tx.total_in()
self.total_in += tx.total_in()
print "- Fee", tx.fee()
self.total_fee += tx.fee()
print "- Transaction Size", len(tx.as_hex())
print "- Recommended Fee for Size ", tx_fee.recommended_fee_for_tx(tx)
if tx.fee() > 100000 and tx.fee() > 2 * tx_fee.recommended_fee_for_tx(tx):
raise ValueError("Very high fee in transaction %s" % tx.id())
print "- Fee Percent", (tx.fee() * 100.00 / tx.total_out())
print "- Bad Signatures", tx.bad_signature_count(), "of", len(tx.txs_in)
def sign(self, master_private_key): # todo - test to see if this needs to be cached to FS when signing 100k txs
for tx_i, batchable_tx in enumerate(self.batchable_txs):
try:
cosign(batchable_tx, keys=[master_private_key.subkey_for_path(path.strip('/')) for path in batchable_tx.input_paths])
print 'signed: %s' % batchable_tx.id()
except Exception as err:
print '! could not sign tx %s, skipping' % batchable_tx.id(), err
self.merkle_root = self.build_merkle_root()
def broadcast(self, provider): # todo - broadcasting status will need to be cached to FS + checking blockchain until all txs pushed
for batchable_tx in self.batchable_txs:
try:
provider.send_tx(batchable_tx)
print 'broadcasted %s %s' % (batchable_tx.id(), batchable_tx.as_hex())
except Exception, err:
sys.stderr.write("! tx %s failed to propagate [%s] (%s)\n" %(batchable_tx.id(), batchable_tx.as_hex(), str(err)))
def __repr__(self):
return "Batch(%s)" % str(self.merkle_root)
def __eq__(self, other): # todo - think through more
"""used for extensive validation - compare received copy with self-created copy"""
return self.merkle_root == other.merkle_root and self.original_master_xpubs == other.master_xpubs and self.total_out == other.total_out and \
self.total_fee == other.total_fee and self.checksum == other.checksum and len(self.batchable_txs) == len(other.batchable_txs)
class BatchableTx(Tx):
@classmethod
def from_dict(cls, data):
batchable_tx = cls.tx_from_hex(data['bytes'])
batchable_tx.output_paths = data['output_paths']
batchable_tx.input_paths = data['input_paths']
return batchable_tx
@classmethod
def from_tx(cls, tx, output_paths, backup_account_path):
batchable_tx = cls(tx.version, tx.txs_in, tx.txs_out, tx.lock_time, tx.unspents)
batchable_tx.input_paths = [full_leaf_path(backup_account_path, leaf_path) for leaf_path in tx.input_chain_paths()]
batchable_tx.output_paths = [full_leaf_path(backup_account_path, leaf_path) for leaf_path in output_paths]
return batchable_tx
def as_dict(self):
hex = self.as_hex()
big = self.as_hex(include_unspents=True)
return { # todo - make it more similar to multisigcore.hierarchy.AccountTx
'bytes': big,
'input_paths': self.input_paths,
'output_paths': self.output_paths,
}
|
{
"content_hash": "969a359f1e1b0e73bedfa99b1da39267",
"timestamp": "",
"source": "github",
"line_count": 146,
"max_line_length": 149,
"avg_line_length": 41.897260273972606,
"alnum_prop": 0.6988719960765081,
"repo_name": "bit-oasis/multisig-recovery",
"id": "268b5b272a169ad243c6b370d65c2c88f42e49d5",
"size": "6117",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "multisigrecovery/batch.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "38539"
}
],
"symlink_target": ""
}
|
import sys
sys.path.append("..")
import sympy
a=sympy.Symbol('a')
b=sympy.Symbol('b')
e=(a+b)**5
print e
print e.expand()
|
{
"content_hash": "12bc5f7eb475b6b13383c0f9d9cbdf3e",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 21,
"avg_line_length": 13.666666666666666,
"alnum_prop": 0.6585365853658537,
"repo_name": "certik/sympy-oldcore",
"id": "8ce8a2d87546ce26c86f719139295a4d36687c9d",
"size": "123",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/expansion.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
import json
import os
from eg import config
from eg import substitute
from eg import util
from mock import Mock
from mock import patch
PATH_UNSQUEEZED_FILE = os.path.join(
'test',
'assets',
'pwd_unsqueezed.md'
)
PATH_SQUEEZED_FILE = os.path.join(
'test',
'assets',
'pwd_squeezed.md'
)
def _create_config(
examples_dir=None,
custom_dir=None,
color_config=None,
use_color=True,
pager_cmd=None,
editor_cmd=None,
squeeze=False,
subs=None
):
"""
Create a config.Config object with default values for expediency in
testing.
"""
return config.Config(
examples_dir=examples_dir,
custom_dir=custom_dir,
color_config=color_config,
use_color=use_color,
pager_cmd=pager_cmd,
editor_cmd=editor_cmd,
squeeze=squeeze,
subs=subs
)
@patch('os.walk')
def test_get_file_paths_for_program_with_single(mock_walk):
program = 'cp'
examples_dir = '/Users/tyrion'
program_file = program + util.EXAMPLE_FILE_SUFFIX
expected = ['/Users/tyrion/cp.md']
mock_walk.return_value = [
[examples_dir, [], [program_file, 'cp.txt', 'other_file.md']],
]
actual = util.get_file_paths_for_program(program, examples_dir)
assert actual == expected
mock_walk.assert_called_once_with(examples_dir)
@patch('os.walk')
def test_get_file_paths_for_program_with_nested(mock_walk):
program = 'cp'
examples_dir = '/Users/tyrion'
program_file = 'cp.md'
mock_walk.return_value = [
[
examples_dir,
['dirA', 'dirB'],
[program_file, 'cp.txt', 'other_file.md'],
],
[
examples_dir + '/dirA',
['dirA-child'],
[program_file, 'bad.md'],
],
[
examples_dir + '/dirA/dirA-child',
[],
['bad.md', program_file, 'wtf.md'],
],
[
examples_dir + '/dirB',
[],
['foo.md', program_file],
],
]
expected = [
'/Users/tyrion/cp.md',
'/Users/tyrion/dirA/cp.md',
'/Users/tyrion/dirA/dirA-child/cp.md',
'/Users/tyrion/dirB/cp.md',
]
actual = util.get_file_paths_for_program(program, examples_dir)
assert actual == expected
mock_walk.assert_called_once_with(examples_dir)
@patch('os.walk')
def test_get_file_paths_for_program_with_none(mock_walk):
expected = []
mock_walk.return_value = []
actual = util.get_file_paths_for_program('cp', '/Users/tyrion')
assert actual == expected
mock_walk.assert_called_once_with('/Users/tyrion')
@patch('os.walk')
def test_get_file_paths_for_program_with_no_dir(mock_walk):
assert util.get_file_paths_for_program('cp', None) == []
@patch('eg.util.page_string')
@patch('eg.util.get_formatted_contents')
@patch('eg.util.get_contents_from_files')
@patch('eg.util.get_resolved_program')
def test_handle_program_no_entries(
mock_resolve_program,
mock_get_contents,
mock_format,
mock_page_string,
):
"""
We should do the right thing if there are no entries for a given program.
"""
program = 'cp'
test_config = _create_config()
mock_resolve_program.return_value = program
util.handle_program(program, test_config)
mock_resolve_program.assert_called_once_with(
program,
test_config
)
# We should have aborted and not called any of the
# other methods.
assert mock_get_contents.call_count == 0
assert mock_format.call_count == 0
assert mock_page_string.call_count == 0
@patch('eg.util.get_resolved_program')
@patch('eg.util.get_contents_from_files')
@patch('eg.util.get_file_paths_for_program')
@patch('eg.util.get_formatted_contents')
@patch('eg.util.page_string')
def test_handle_program_finds_paths_and_calls_open_pager_no_alias(
mock_page,
mock_format,
mock_get_paths,
mock_get_contents,
mock_resolve,
):
"""
If there are entries for the program, handle_program needs to get the
paths, get the contents, format the contents, and page the resulting
string.
"""
program = 'mv'
examples_dir = 'test-eg-dir'
custom_dir = 'test-custom-dir'
color_config = None
use_color = False
pager_cmd = 'foo bar'
squeeze = False
subs = ['foo', 'bar']
file_contents = 'I am the contents of mv.md.'
formatted_contents = 'and I am the formatted contents of mv.md.'
test_config = _create_config(
examples_dir=examples_dir,
custom_dir=custom_dir,
color_config=color_config,
use_color=use_color,
pager_cmd=pager_cmd,
squeeze=squeeze,
subs=subs
)
default_paths = ['test-eg-dir/mv.md', 'test-eg-dir/foo/mv.md']
custom_paths = ['test-custom-dir/mv.md', 'test-custom-dir/bar.md']
def return_correct_path(*args, **kwargs):
program_param = args[0]
dir_param = args[1]
if program_param != program:
raise NameError('expected ' + program + ', got ' + program_param)
if dir_param == examples_dir:
return default_paths
elif dir_param == custom_dir:
return custom_paths
else:
raise NameError(
'got ' +
dir_param +
', expected ' +
examples_dir +
' or ' +
custom_dir)
mock_format.return_value = formatted_contents
mock_get_paths.side_effect=return_correct_path
mock_get_contents.return_value = file_contents
mock_resolve.return_value = program
util.handle_program(program, test_config)
mock_resolve.assert_called_once_with(
program,
test_config
)
mock_get_paths.assert_any_call(
program,
examples_dir
)
mock_get_paths.assert_any_call(
program,
custom_dir,
)
mock_get_contents.assert_called_once_with(
custom_paths[0],
custom_paths[1],
default_paths[0],
default_paths[1],
)
mock_format.assert_called_once_with(
file_contents,
use_color=test_config.use_color,
color_config=test_config.color_config,
squeeze=test_config.squeeze,
subs=test_config.subs
)
mock_page.assert_called_once_with(
formatted_contents,
test_config.pager_cmd
)
@patch('eg.util.get_resolved_program')
@patch('eg.util.get_contents_from_files')
@patch('eg.util.get_file_paths_for_program')
@patch('eg.util.get_formatted_contents')
@patch('eg.util.page_string')
def test_handle_program_finds_paths_and_calls_open_pager_with_alias(
mock_page,
mock_format,
mock_get_paths,
mock_get_contents,
mock_resolve,
):
"""
If there are entries for the program, handle_program needs to get the
paths, get the contents, format the contents, and page the resulting
string.
"""
alias_for_program = 'link'
resolved_program = 'ln'
examples_dir = 'test-eg-dir'
custom_dir = 'test-custom-dir'
color_config = None
use_color = False
pager_cmd = 'foo bar'
squeeze = False
subs = ['foo', 'bar']
file_contents = 'I am the contents of ln.md.'
formatted_contents = 'and I am the formatted contents of ln.md.'
test_config = _create_config(
examples_dir=examples_dir,
custom_dir=custom_dir,
color_config=color_config,
use_color=use_color,
pager_cmd=pager_cmd,
squeeze=squeeze,
subs=subs
)
default_paths = ['test-eg-dir/ln.md']
custom_paths = ['test-custom-dir/ln.md']
def return_correct_path(*args, **kwargs):
program_param = args[0]
dir_param = args[1]
if program_param != resolved_program:
raise NameError(
'expected ' +
resolved_program +
', got ' +
program_param
)
if dir_param == examples_dir:
return default_paths
elif dir_param == custom_dir:
return custom_paths
else:
raise NameError(
'got ' +
dir_param +
', expected ' +
examples_dir +
' or ' +
custom_dir)
mock_format.return_value = formatted_contents
mock_get_paths.side_effect = return_correct_path
mock_get_contents.return_value = file_contents
mock_resolve.return_value = resolved_program
util.handle_program(
alias_for_program,
test_config
)
mock_resolve.assert_called_once_with(
alias_for_program,
test_config
)
mock_get_paths.assert_any_call(
resolved_program,
examples_dir
)
mock_get_paths.assert_any_call(
resolved_program,
custom_dir,
)
mock_get_contents.assert_called_once_with(
custom_paths[0],
default_paths[0]
)
mock_format.assert_called_once_with(
file_contents,
use_color=test_config.use_color,
color_config=test_config.color_config,
squeeze=test_config.squeeze,
subs=test_config.subs
)
mock_page.assert_called_once_with(
formatted_contents,
test_config.pager_cmd
)
def test_get_list_of_all_supported_commands(tmpdir):
dir_example = tmpdir.mkdir('examples')
dir_custom = tmpdir.mkdir('custom')
config = _create_config(
examples_dir=str(dir_example),
custom_dir=str(dir_custom),
)
expected = [
'a-only-default',
'b-both *',
'c-only-custom +',
'd-only-custom-nested +',
'e-only-default-nested',
'f-default-custom-nested',
'g-both-different-levels *',
't-a-only-default-alias -> a-only-default',
'u-b-both-alias -> b-both *',
'v-c-only-custom-alias -> c-only-custom +'
]
aliases = {
't-a-only-default-alias': 'a-only-default',
'u-b-both-alias': 'b-both',
'v-c-only-custom-alias': 'c-only-custom'
}
# Make the directory structure we expect.
dir_example_nested = dir_example.mkdir('default-nested')
dir_custom_nested = dir_custom.mkdir('custom-nested')
dir_example.join('a-only-default.md').write('foo')
dir_example.join('b-both.md').write('foo')
dir_custom.join('b-both.md').write('foo')
dir_custom.join('c-only-custom.md').write('foo')
dir_custom_nested.join('d-only-custom-nested.md').write('foo')
dir_example_nested.join('e-only-default-nested.md').write('foo')
dir_example_nested.join('f-default-custom-nested.md').write('foo')
dir_example.join('g-both-different-levels.md').write('foo')
dir_custom_nested.join('g-both-different-levels.md').write('foo')
# Use the 'with' context manager rather than the @decorator, because the
# tmpdir fixture doesn't play nice with the decorator.
with patch('eg.util.get_alias_dict') as mock_get_alias:
mock_get_alias.return_value = aliases
actual = util.get_list_of_all_supported_commands(config)
assert actual == expected
mock_get_alias.assert_called_once_with(config)
def test_list_supported_programs_fails_gracefully_if_no_dirs():
test_config = _create_config()
actual = util.get_list_of_all_supported_commands(test_config)
target = []
assert actual == target
def test_calls_pipepager_if_not_less():
"""
We're special casing less a bit, as it is the default value, so if a custom
command has been set that is NOT less, we should call pipepager straight
away.
"""
_helper_assert_about_pager('page me plz', 'cat', False)
def test_calls_fallback_pager_if_none():
"""
If pager_cmd is None, we should just use the fallback pager.
"""
_helper_assert_about_pager('page me plz', None, True)
def test_calls_pipepager_if_less():
"""
We should call pipepager if we ask to use less and less is installed on the
machine.
"""
_helper_assert_about_pager('a fancy value to page', 'less -R', False)
def test_calls_fallback_if_cmd_is_flag_string():
"""
We are using a flag string to indicate if we should use the fallback pager.
"""
_helper_assert_about_pager(
'page via fallback',
util.FLAG_FALLBACK,
True
)
@patch('pydoc.pager')
@patch('pydoc.pipepager')
def _helper_assert_about_pager(
str_to_page,
pager_cmd,
use_fallback,
pipepager,
default_pager,
):
"""
Help with asserting about pager.
str_to_page: what you're paging
pager_cmd: the string you're passing to pipepager (or None)
use_default: false if we should actually use pydoc.pipepager, true if we
instead are going to fallback to pydoc.pager
"""
util.page_string(str_to_page, pager_cmd)
if use_fallback:
default_pager.assert_called_once_with(str_to_page)
assert pipepager.call_count == 0
else:
assert default_pager.call_count == 0
pipepager.assert_called_once_with(
str_to_page,
cmd=pager_cmd
)
@patch('eg.util.pydoc.pipepager', side_effect=KeyboardInterrupt)
def test_page_string_excepts_keyboard_interrupt_if_not_less(pipepager_mock):
"""
Do not fail when user hits ctrl-c while in pager.
"""
try:
util.page_string('page me plz', 'cat')
except KeyboardInterrupt:
raise AssertionError('Should not have got this far')
pipepager_mock.assert_called_once_with('page me plz', cmd='cat')
@patch('eg.util.pydoc.pager', side_effect=KeyboardInterrupt)
def test_page_string_excepts_keyboard_interrupt_if_none(pager_mock):
"""
Do not fail when user hits ctrl-c while in pipepager.
"""
try:
util.page_string('page me plz', None)
except KeyboardInterrupt:
raise AssertionError('Should not have got this far')
pager_mock.assert_called_once_with('page me plz')
def test_get_contents_from_files_handles_none():
"""
Empty string if no files.
"""
_helper_assert_file_contents(
[],
''
)
def test_get_contents_from_files_handles_one():
file_infos = [
{
'path': 'test/path',
'contents': 'contents of file'
}
]
combined_contents = 'contents of file'
_helper_assert_file_contents(
file_infos,
combined_contents
)
def test_get_contents_from_files_handles_multiple():
file_infos = [
{
'path': 'path/1',
'contents': 'foo\n'
},
{
'path': 'path/2/foo',
'contents': 'bar\n'
},
{
'path': 'another/path',
'contents': 'baz'
}
]
combined_contents = 'foo\nbar\nbaz'
_helper_assert_file_contents(
file_infos,
combined_contents
)
@patch('eg.util._get_contents_of_file')
def _helper_assert_file_contents(
file_infos,
target_contents,
get_contents_mock,
):
"""
Helper method to assert things about the get_contents_from_files method.
Does not actually hit the disk.
file_infos: array of { path, contents } dicts representing files. Array so
that we can assert proper order calling
target_contents: the final combined contents that should be returned by the
get_contents_from_files method.
"""
# This method will be used by the mock framework to return the right file
# contents based on the file name.
def return_file_contents(*args, **kwargs):
for file_info in file_infos:
if file_info['path'] == args[0]:
return file_info['contents']
raise TypeError('did not find path in test obj')
get_contents_mock.side_effect = return_file_contents
paths = [el['path'] for el in file_infos]
actual = util.get_contents_from_files(*paths)
assert actual == target_contents
@patch('eg.util.get_colorized_contents')
@patch('eg.util.get_squeezed_contents')
@patch('eg.util.get_substituted_contents')
def _helper_assert_formatted_contents(
starting_contents,
use_color,
color_config,
squeeze,
subs,
colorized_contents,
squeezed_contents,
subbed_contents,
formatted_result,
sub_method,
squeeze_method,
color_method,
):
"""
Helper method to assist in asserting things about the
get_formatted_contents method.
starting_contents: the starting string that we are working with
use_color: True if we should use color
color_config: the color config to be passed to get_colorized_contents
squeeze: True if we should squeeze
subs: the list of Substitutions that we should pass to
get_substituted_contents
colored_contents: the result of get_colorized_contents
squeezed_contents: the result of get_squeezed_contents
subbed_contents: the result of subbed_contents
formatted_result: the final, formatted string that should be returned
"""
sub_method.return_value = subbed_contents
squeeze_method.return_value = squeezed_contents
color_method.return_value = colorized_contents
actual = util.get_formatted_contents(
starting_contents,
use_color,
color_config,
squeeze,
subs
)
# We'll update the contents as they get formatted to make sure
# we pass the right thing to the various methods.
contents_thus_far = starting_contents
if use_color:
color_method.assert_called_once_with(
contents_thus_far,
color_config
)
contents_thus_far = colorized_contents
else:
assert color_method.call_count == 0
if squeeze:
squeeze_method.assert_called_once_with(contents_thus_far)
contents_thus_far = squeezed_contents
else:
assert squeeze_method.call_count == 0
if subs:
sub_method.assert_called_once_with(
contents_thus_far,
subs
)
contents_thus_far = subbed_contents
else:
assert sub_method.call_count == 0
assert actual == formatted_result
def test_get_formatted_contents_does_not_format_methods_if_all_falsey():
"""
We should invoke none of the formatter methods if the flags are false and
subs is not truthy.
"""
starting_contents = 'this is where we start'
_helper_assert_formatted_contents(
starting_contents,
False,
'some color config',
False,
None,
'this was colored',
'this was squeezed',
'these contents were subbed',
starting_contents
)
def test_get_formatted_contents_calls_colorize_if_use_color():
"""
Colorize the contents if use_color = True.
"""
starting_contents = 'this is where we start'
colorized_contents = 'COLORIZED: this is where we start'
_helper_assert_formatted_contents(
starting_contents,
True,
'some color config',
False,
None,
colorized_contents,
'this was squeezed',
'these contents were subbed',
colorized_contents
)
def test_get_formatted_contents_squeezes():
"""If squeeze, we need to squeeze."""
starting_contents = 'this is where we start'
squeezed_contents = 'this is the result of a squeezing'
_helper_assert_formatted_contents(
starting_contents,
False,
'some color config',
True,
None,
'this was colored',
squeezed_contents,
'these contents were subbed',
squeezed_contents
)
def test_get_formatted_contents_subsitutes():
"""If subs is truthy, get_substituted contents should be called."""
starting_contents = 'this is where we start'
subbed_contents = 'substituted like a teacher'
_helper_assert_formatted_contents(
starting_contents,
False,
'some color config',
False,
['truthy', 'list'],
'this was colored',
'this was squeezed',
subbed_contents,
subbed_contents
)
def test_perform_all_formatting():
"""
When use_color, squeeze, and subs are all truthy, all the formatting
should be applied in that order.
"""
starting_contents = 'the starting point for grand formatting'
subbed_contents = 'subbed is the last thing called so should be the result'
_helper_assert_formatted_contents(
starting_contents,
True,
'some color config',
True,
['truthy', 'list'],
'this was colored',
'this was squeezed',
subbed_contents,
subbed_contents
)
def _get_file_as_string(path):
"""Get the contents of the file as a string."""
with open(path, 'r') as f:
data = f.read()
return data
def test_get_squeezed_contents_correctly_squeezes():
"""
Our squeeze method should follow our convention, which is to remove the
blank line between a description and an example, to keep two blank lines
between sections, and otherwise have only single blank lines.
"""
unsqueezed = _get_file_as_string(PATH_UNSQUEEZED_FILE)
# the target squeezed output is a reference implementation in
# pwd_squeezed.md.
target = _get_file_as_string(PATH_SQUEEZED_FILE)
actual = util.get_squeezed_contents(unsqueezed)
assert actual == target
def test_get_substituted_contents_handles_empty_subs():
"""Nothing should be formatted if there are no substitutions."""
raw_contents = 'this should not be subbed'
actual = util.get_substituted_contents(raw_contents, [])
assert actual == raw_contents
def test_get_substituted_contents_substitutes_calls_correct_methods():
"""
The get_substituted_contents method calls things in the correct order.
"""
sub_one = Mock(auto_spec=substitute.Substitution)
sub_one_result = 'result of sub one'
sub_one.apply_and_get_result.return_value = sub_one_result
sub_two = Mock(auto_spec=substitute.Substitution)
sub_two_result = 'result of sub two'
sub_two.apply_and_get_result.return_value = sub_two_result
starting_contents = 'the string we should be substituting into'
target = sub_two_result
subs = [sub_one, sub_two]
actual = util.get_substituted_contents(starting_contents, subs)
sub_one.apply_and_get_result.assert_called_once_with(starting_contents)
sub_two.apply_and_get_result.assert_called_once_with(sub_one_result)
assert actual == target
def test_get_substituted_contents_substitutes_correctly():
"""
Basic test to make sure Substitutions can get applied correctly.
"""
sub_one = substitute.Substitution('foo', 'bar', False)
sub_two = substitute.Substitution('bar\n\n', 'baz\n', True)
start = 'foo\n\n something else\n\n bar\n\n'
target = 'baz\n something else\n\n baz\n'
subs = [sub_one, sub_two]
actual = util.get_substituted_contents(start, subs)
assert actual == target
@patch('eg.color.EgColorizer')
def test_get_colorized_contents_calls_methods(patched_colorizer_class):
"""
We should call the correct methods on the EgColorizer objects when we color
a file.
"""
raw_contents = 'these are uncolored contents'
colored_contents = 'COLORED: ' + raw_contents
color_config = 'some color config'
# The actual instance created by these calls is stored at return_value.
colorizer_instance = patched_colorizer_class.return_value
colorizer_instance.colorize_text.return_value = colored_contents
actual = util.get_colorized_contents(raw_contents, color_config)
assert actual == colored_contents
colorizer_instance.colorize_text.assert_called_once_with(raw_contents)
@patch('eg.util.get_alias_dict')
def _helper_assert_get_resolved_program(
program,
resolved_program,
config_obj,
alias_dict,
mock_dict,
):
"""
program: the program to resolved for as an alias
resolved_program: the result of the resolution.
config_obj: the config_obj to use toe resolve the alias path
alias_dict: the dict of aliases to be returned
"""
mock_dict.return_value = alias_dict
actual = util.get_resolved_program(program, config_obj)
assert actual == resolved_program
mock_dict.assert_called_once_with(config_obj)
def test_get_resolved_program_no_alias():
"""
A program that is not an alias should return itself.
"""
alias_dict = {
'link': 'ln',
'nc': 'netcat'
}
config_obj = 'a config'
_helper_assert_get_resolved_program('link', 'ln', config_obj, alias_dict)
def test_get_resolved_program_is_alias():
"""
A program that is an alias should return the resolved value.
"""
alias_dict = {
'link': 'ln',
'nc': 'netcat'
}
config_obj = 'some new config'
_helper_assert_get_resolved_program('cp', 'cp', config_obj, alias_dict)
def test_get_alias_dict_returns_contents_of_correct_file():
"""
get_alias_dict should read data from the file at the default path.
"""
alias_dict = {
'link': 'ln',
'nc': 'netcat'
}
config_obj = _create_config(
examples_dir='path/to/examples/dir',
)
alias_file_path = 'path/to/alias/file'
alias_dict_str = json.dumps(alias_dict)
_helper_assert_get_alias_dict(
alias_dict_str,
alias_dict,
config_obj,
alias_file_path,
True
)
def test_get_alias_dict_fails_gracefully_if_not_file():
"""
Since users can specify a directory for examples that might not contain the
aliases file, we want to fail gracefully if the file doesn't exist.
"""
contents_of_alias_dict_file = 'should never be reached'
config_obj = _create_config(
examples_dir='path/to/examples/dir',
)
alias_file_path = 'path/to/the/alias/file'
_helper_assert_get_alias_dict(
contents_of_alias_dict_file,
{},
config_obj,
alias_file_path,
False
)
@patch('eg.util._get_contents_of_file')
@patch('eg.util._get_alias_file_path')
@patch('os.path.isfile')
def _helper_assert_get_alias_dict(
contents_of_alias_dict_file,
target_alias_dict,
config_obj,
alias_file_path,
alias_file_path_is_file,
mock_is_file,
mock_get_alias_file_path,
mock_get_contents,
):
"""
contents_of_alias_dict_file: the string contents of the file storing the
dictionary of aliases
target_alias_dict: the target result of get_alias_dict
config_obj: the Config object
alias_file_path: the path to be returned by _get_alias_file_path
alias_file_path_is_file: True if the alias path is a file, else False
"""
mock_is_file.return_value = alias_file_path_is_file
mock_get_alias_file_path.return_value = alias_file_path
mock_get_contents.return_value = contents_of_alias_dict_file
actual = util.get_alias_dict(config_obj)
assert actual == target_alias_dict
mock_get_alias_file_path.assert_called_once_with(config_obj)
mock_is_file.assert_called_once_with(alias_file_path)
if alias_file_path_is_file:
mock_get_contents.assert_called_once_with(alias_file_path)
else:
assert mock_get_contents.call_count == 0
@patch('os.path.join')
def test_get_alias_file_path(mock_join):
"""
_get_alias_file_path should just join the example dir and the alias file
name, to make sure we look in the right place for the file.
"""
config_obj = _create_config(
examples_dir='handy/dandy/examples/dir',
)
join_result = 'joined path'
mock_join.return_value = join_result
actual = util._get_alias_file_path(config_obj)
assert actual == join_result
mock_join.assert_called_once_with(
config_obj.examples_dir,
util.ALIAS_FILE_NAME
)
def test_is_example_file_true_if_has_suffix():
"""
Should be true if ends in EXAMPLE_FILE_SUFFIX.
"""
file_name = 'find.md'
actual = util._is_example_file(file_name)
assert actual == True
def test_is_example_file_true_if_not_suffix():
"""
Should be false if the file does not end in EXAMPLE_FILE_SUFFIX.
"""
file_name = 'aliases.json'
actual = util._is_example_file(file_name)
assert actual == False
def test_can_parse_alias_file():
"""
Make sure aliases.json file can be parsed.
This is to make sure an edit doesn't accidentally corrupt it.
"""
# We'll have to hardcode this.
alias_file_path = os.path.join(
config.DEFAULT_EXAMPLES_DIR,
util.ALIAS_FILE_NAME
)
alias_file_contents = util._get_contents_of_file(alias_file_path)
alias_dict = json.loads(alias_file_contents)
# We'll check that link goes to ln, as we know that one will be present.
assert alias_dict['link'] == 'ln'
@patch('os.path.exists')
@patch('eg.util._inform_cannot_edit_no_custom_dir')
@patch('eg.util.get_resolved_program')
@patch('eg.util.get_file_paths_for_program')
@patch('subprocess.call')
def test_edit_custom_examples_correct_with_custom_dir(
mock_call,
mock_get_paths,
mock_get_program,
mock_inform,
mock_exists,
):
"""
We should resolve aliases, get the custom file path, and call subprocess.
"""
program = 'du'
resolved_program = 'alias for du'
config = _create_config(custom_dir='path/to/custom', editor_cmd='nano')
paths = ['path/to/custom/du.md', 'foo.md']
mock_get_program.return_value = resolved_program
mock_get_paths.return_value = paths
mock_exists.return_value = True
util.edit_custom_examples(program, config)
mock_get_program.assert_called_once_with(program, config)
mock_get_paths.assert_called_once_with(resolved_program, config.custom_dir)
mock_call.assert_called_once_with([config.editor_cmd, paths[0]])
assert mock_inform.call_count == 0
@patch('os.path.exists')
@patch('eg.util._inform_cannot_edit_no_custom_dir')
@patch('eg.util.get_resolved_program')
@patch('eg.util.get_file_paths_for_program')
@patch('subprocess.call')
def test_edit_custom_examples_creates_file_if_none_exist(
mock_call,
mock_get_paths,
mock_get_program,
mock_inform,
mock_exists,
):
program = 'du'
resolved_program = 'alias-for-du'
config = _create_config(custom_dir='path/to/custom', editor_cmd='nano')
paths = []
mock_get_program.return_value = resolved_program
mock_get_paths.return_value = paths
mock_exists.return_value = True
util.edit_custom_examples(program, config)
mock_get_program.assert_called_once_with(program, config)
mock_get_paths.assert_called_once_with(resolved_program, config.custom_dir)
mock_call.assert_called_once_with(
[config.editor_cmd, 'path/to/custom/alias-for-du.md'])
assert mock_inform.call_count == 0
@patch('os.path.exists')
@patch('eg.util._inform_cannot_edit_no_custom_dir')
@patch('eg.util.get_resolved_program')
@patch('eg.util.get_file_paths_for_program')
@patch('subprocess.call')
def test_edit_custom_examples_informs_if_no_custom_dir(
mock_call,
mock_get_paths,
mock_get_program,
mock_inform,
mock_exists,
):
"""
We should inform the user if they are trying to edit with no custom dir.
This should be true if it is not set and if the path does not exist.
"""
program = 'awk'
# First with no custom dir set.
config = _create_config(editor_cmd='vi -e')
mock_exists.return_value = True
util.edit_custom_examples(program, config)
assert mock_inform.call_count == 1
# And now with it set but a nonexistent path.
config = _create_config(custom_dir='/path/to/custom', editor_cmd='vi -e')
mock_exists.return_value = False
util.edit_custom_examples(program, config)
assert mock_inform.call_count == 2
assert mock_call.call_count == 0
assert mock_get_paths.call_count == 0
assert mock_get_program.call_count == 0
|
{
"content_hash": "af66375711eef95037dc215bdabcb857",
"timestamp": "",
"source": "github",
"line_count": 1131,
"max_line_length": 79,
"avg_line_length": 28.261715296198055,
"alnum_prop": 0.6323050932298836,
"repo_name": "scorphus/eg",
"id": "d8743971ba681c7d60eda6356538b2e7c98a6660",
"size": "31964",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "test/util_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "114912"
}
],
"symlink_target": ""
}
|
"""File data sources for traits GUIs."""
# Authors: Christian Brodbeck <christianbrodbeck@nyu.edu>
#
# License: BSD-3-Clause
import os
import os.path as op
import numpy as np
from traits.api import (Any, HasTraits, HasPrivateTraits, cached_property,
on_trait_change, Array, Bool, Button, DelegatesTo,
Directory, Enum, Event, File, Instance, Int, List,
Property, Str, ArrayOrNone, BaseFile)
from traitsui.api import View, Item, VGroup
from pyface.api import DirectoryDialog, OK, ProgressDialog, error, information
from ._viewer import _DIG_SOURCE_WIDTH
from ..bem import read_bem_surfaces
from ..io.constants import FIFF
from ..io import read_info, read_fiducials, read_raw
from ..io._read_raw import supported
from ..io.meas_info import _empty_info
from ..io.open import fiff_open, dir_tree_find
from ..surface import read_surface, complete_surface_info
from ..coreg import (_is_mri_subject, _mri_subject_has_bem,
create_default_subject)
from ..utils import get_config, set_config
from ..viz._3d import _fiducial_coords
from ..channels import read_dig_fif
fid_wildcard = "*.fif"
trans_wildcard = "*.fif"
# for wx backend:
# fid_wildcard = "Fiducials FIFF file (*.fif)|*.fif"
# trans_wildcard = "Trans File (*.fif)|*.fif"
def _expand_path(p):
return op.abspath(op.expandvars(op.expanduser(p)))
def get_fs_home():
"""Get the FREESURFER_HOME directory.
Returns
-------
fs_home : None | str
The FREESURFER_HOME path or None if the user cancels.
Notes
-----
If FREESURFER_HOME can't be found, the user is prompted with a file dialog.
If specified successfully, the resulting path is stored with
mne.set_config().
"""
return _get_root_home('FREESURFER_HOME', 'freesurfer', _fs_home_problem)
def _get_root_home(cfg, name, check_fun):
root = get_config(cfg)
problem = check_fun(root)
while problem:
info = ("Please select the %s directory. This is the root "
"directory of the %s installation." % (cfg, name))
msg = '\n\n'.join((problem, info))
information(None, msg, "Select the %s Directory" % cfg)
msg = "Please select the %s Directory" % cfg
dlg = DirectoryDialog(message=msg, new_directory=False)
if dlg.open() == OK:
root = dlg.path
problem = check_fun(root)
if problem is None:
set_config(cfg, root, set_env=False)
else:
return None
return root
def set_fs_home():
"""Set the FREESURFER_HOME environment variable.
Returns
-------
success : bool
True if the environment variable could be set, False if FREESURFER_HOME
could not be found.
Notes
-----
If FREESURFER_HOME can't be found, the user is prompted with a file dialog.
If specified successfully, the resulting path is stored with
mne.set_config().
"""
fs_home = get_fs_home()
if fs_home is None:
return False
else:
os.environ['FREESURFER_HOME'] = fs_home
return True
def _fs_home_problem(fs_home):
"""Check FREESURFER_HOME path.
Return str describing problem or None if the path is okay.
"""
if fs_home is None:
return "FREESURFER_HOME is not set."
elif not op.exists(fs_home):
return "FREESURFER_HOME (%s) does not exist." % fs_home
else:
test_dir = op.join(fs_home, 'subjects', 'fsaverage')
if not op.exists(test_dir):
return ("FREESURFER_HOME (%s) does not contain the fsaverage "
"subject." % fs_home)
def _mne_root_problem(mne_root):
"""Check MNE_ROOT path.
Return str describing problem or None if the path is okay.
"""
if mne_root is None:
return "MNE_ROOT is not set."
elif not op.exists(mne_root):
return "MNE_ROOT (%s) does not exist." % mne_root
else:
test_dir = op.join(mne_root, 'share', 'mne', 'mne_analyze')
if not op.exists(test_dir):
return ("MNE_ROOT (%s) is missing files. If this is your MNE "
"installation, consider reinstalling." % mne_root)
class FileOrDir(File):
"""Subclass File because *.mff files are actually directories."""
def validate(self, object, name, value):
"""Validate that a specified value is valid for this trait."""
value = os.fspath(value)
validated_value = super(BaseFile, self).validate(object, name, value)
if not self.exists:
return validated_value
elif op.exists(value):
return validated_value
self.error(object, name, value)
class Surf(HasTraits):
"""Expose a surface similar to the ones used elsewhere in MNE."""
rr = Array(shape=(None, 3), value=np.empty((0, 3)))
nn = Array(shape=(None, 3), value=np.empty((0, 3)))
tris = Array(shape=(None, 3), value=np.empty((0, 3)))
class SurfaceSource(HasTraits):
"""Expose points and tris of a file storing a surface.
Parameters
----------
file : File
Path to a *-bem.fif file or a surface containing a Freesurfer surface.
Attributes
----------
pts : Array, shape = (n_pts, 3)
Point coordinates.
tris : Array, shape = (n_tri, 3)
Triangles.
Notes
-----
tri is always updated after pts, so in case downstream objects depend on
both, they should sync to a change in tris.
"""
file = File(exists=True, filter=['*.fif', '*.*'])
surf = Instance(Surf)
@on_trait_change('file')
def read_file(self):
"""Read the file."""
if op.exists(self.file):
if self.file.endswith('.fif'):
bem = read_bem_surfaces(
self.file, on_defects='warn', verbose=False
)[0]
else:
try:
bem = read_surface(self.file, return_dict=True)[2]
bem['rr'] *= 1e-3
complete_surface_info(bem, copy=False)
except Exception:
error(parent=None,
message="Error loading surface from %s (see "
"Terminal for details)." % self.file,
title="Error Loading Surface")
self.reset_traits(['file'])
raise
self.surf = Surf(rr=bem['rr'], tris=bem['tris'], nn=bem['nn'])
else:
self.surf = self._default_surf()
def _surf_default(self):
return Surf(rr=np.empty((0, 3)),
tris=np.empty((0, 3), int), nn=np.empty((0, 3)))
class FiducialsSource(HasTraits):
"""Expose points of a given fiducials fif file.
Parameters
----------
file : File
Path to a fif file with fiducials (*.fif).
Attributes
----------
points : Array, shape = (n_points, 3)
Fiducials file points.
"""
file = File(filter=[fid_wildcard])
fname = Property(depends_on='file')
points = Property(ArrayOrNone, depends_on='file')
mni_points = ArrayOrNone(float, shape=(3, 3))
def _get_fname(self):
return op.basename(self.file)
@cached_property
def _get_points(self):
if not op.exists(self.file):
return self.mni_points # can be None
try:
return _fiducial_coords(*read_fiducials(self.file))
except Exception as err:
error(None, "Error reading fiducials from %s: %s (See terminal "
"for more information)" % (self.fname, str(err)),
"Error Reading Fiducials")
self.reset_traits(['file'])
raise
class DigSource(HasPrivateTraits):
"""Expose digitization information from a file.
Parameters
----------
file : File
Path to the BEM file (*.fif).
Attributes
----------
fid : Array, shape = (3, 3)
Each row contains the coordinates for one fiducial point, in the order
Nasion, RAP, LAP. If no file is set all values are 0.
"""
file = FileOrDir(exists=True,
filter=[' '.join([f'*{ext}' for ext in supported])])
inst_fname = Property(Str, depends_on='file')
inst_dir = Property(depends_on='file')
_info = Property(depends_on='file')
points_filter = Any(desc="Index to select a subset of the head shape "
"points")
n_omitted = Property(Int, depends_on=['points_filter'])
# head shape
_hsp_points = Property(depends_on='_info',
desc="Head shape points in the file (n x 3 array)")
points = Property(depends_on=['_hsp_points', 'points_filter'],
desc="Head shape points selected by the filter (n x 3 "
"array)")
# fiducials
lpa = Property(depends_on='_info',
desc="LPA coordinates (1 x 3 array)")
nasion = Property(depends_on='_info',
desc="Nasion coordinates (1 x 3 array)")
rpa = Property(depends_on='_info',
desc="RPA coordinates (1 x 3 array)")
# EEG
eeg_points = Property(depends_on='_info',
desc="EEG sensor coordinates (N x 3 array)")
hpi_points = Property(depends_on='_info',
desc='HPI coil coordinates (N x 3 array)')
view = View(Item('file', width=_DIG_SOURCE_WIDTH, tooltip='FIF file '
'(Raw, Epochs, Evoked, or DigMontage)', show_label=False))
@cached_property
def _get_n_omitted(self):
if self.points_filter is None:
return 0
else:
return np.sum(self.points_filter == False) # noqa: E712
@cached_property
def _get__info(self):
if not self.file:
return
elif self.file.endswith(('.fif', '.fif.gz')):
info = None
fid, tree, _ = fiff_open(self.file)
fid.close()
if len(dir_tree_find(tree, FIFF.FIFFB_MEAS_INFO)) > 0:
info = read_info(self.file, verbose=False)
elif len(dir_tree_find(tree, FIFF.FIFFB_ISOTRAK)) > 0:
info = _empty_info(1)
info['dig'] = read_dig_fif(fname=self.file).dig
info._unlocked = False
else:
info = read_raw(self.file).info
# check that digitizer info is present
if info is None or info['dig'] is None:
error(None, "The selected file does not contain digitization "
"information. Please select a different file.",
"Error Reading Digitization File")
self.reset_traits(['file'])
return
# check that all fiducial points are present
point_kinds = {d['kind'] for d in info['dig']}
missing = [key for key in ('LPA', 'Nasion', 'RPA') if
getattr(FIFF, f'FIFFV_POINT_{key.upper()}') not in
point_kinds]
if missing:
points = _fiducial_coords(info['dig'])
if len(points == 3):
_append_fiducials(info['dig'], *points.T)
else:
error(None, "The selected digitization file does not contain "
f"all cardinal points (missing: {', '.join(missing)}). "
"Please select a different file.",
"Error Reading Digitization File")
self.reset_traits(['file'])
return
return info
@cached_property
def _get_inst_dir(self):
return op.dirname(self.file)
@cached_property
def _get_inst_fname(self):
if self.file:
return op.basename(self.file)
else:
return '-'
@cached_property
def _get__hsp_points(self):
if not self._info or not self._info['dig']:
return np.empty((0, 3))
points = np.array([d['r'] for d in self._info['dig']
if d['kind'] == FIFF.FIFFV_POINT_EXTRA])
points = np.empty((0, 3)) if len(points) == 0 else points
return points
@cached_property
def _get_points(self):
if self.points_filter is None:
return self._hsp_points
else:
return self._hsp_points[self.points_filter]
def _cardinal_point(self, ident):
"""Coordinates for a cardinal point."""
if not self._info or not self._info['dig']:
return np.zeros((1, 3))
for d in self._info['dig']:
if d['kind'] == FIFF.FIFFV_POINT_CARDINAL and d['ident'] == ident:
return d['r'][None, :]
return np.zeros((1, 3))
@cached_property
def _get_nasion(self):
return self._cardinal_point(FIFF.FIFFV_POINT_NASION)
@cached_property
def _get_lpa(self):
return self._cardinal_point(FIFF.FIFFV_POINT_LPA)
@cached_property
def _get_rpa(self):
return self._cardinal_point(FIFF.FIFFV_POINT_RPA)
@cached_property
def _get_eeg_points(self):
if not self._info or not self._info['dig']:
return np.empty((0, 3))
out = [d['r'] for d in self._info['dig'] if
d['kind'] == FIFF.FIFFV_POINT_EEG and
d['coord_frame'] == FIFF.FIFFV_COORD_HEAD]
out = np.empty((0, 3)) if len(out) == 0 else np.array(out)
return out
@cached_property
def _get_hpi_points(self):
if not self._info or not self._info['dig']:
return np.zeros((0, 3))
out = [d['r'] for d in self._info['dig'] if
d['kind'] == FIFF.FIFFV_POINT_HPI and
d['coord_frame'] == FIFF.FIFFV_COORD_HEAD]
out = np.empty((0, 3)) if len(out) == 0 else np.array(out)
return out
def _file_changed(self):
self.reset_traits(('points_filter',))
def _append_fiducials(dig, lpa, nasion, rpa):
dig.append({'coord_frame': FIFF.FIFFV_COORD_HEAD,
'ident': FIFF.FIFFV_POINT_LPA,
'kind': FIFF.FIFFV_POINT_CARDINAL,
'r': lpa})
dig.append({'coord_frame': FIFF.FIFFV_COORD_HEAD,
'ident': FIFF.FIFFV_POINT_NASION,
'kind': FIFF.FIFFV_POINT_CARDINAL,
'r': nasion})
dig.append({'coord_frame': FIFF.FIFFV_COORD_HEAD,
'ident': FIFF.FIFFV_POINT_RPA,
'kind': FIFF.FIFFV_POINT_CARDINAL,
'r': rpa})
class MRISubjectSource(HasPrivateTraits):
"""Find subjects in SUBJECTS_DIR and select one.
Parameters
----------
subjects_dir : directory
SUBJECTS_DIR.
subject : str
Subject, corresponding to a folder in SUBJECTS_DIR.
"""
refresh = Event(desc="Refresh the subject list based on the directory "
"structure of subjects_dir.")
# settings
subjects_dir = Directory(exists=True)
subjects = Property(List(Str), depends_on=['subjects_dir', 'refresh'])
subject = Enum(values='subjects')
# info
can_create_fsaverage = Property(Bool, depends_on=['subjects_dir',
'subjects'])
subject_has_bem = Property(Bool, depends_on=['subjects_dir', 'subject'],
desc="whether the subject has a file matching "
"the bem file name pattern")
bem_pattern = Property(depends_on='mri_dir')
@cached_property
def _get_can_create_fsaverage(self):
if not op.exists(self.subjects_dir) or 'fsaverage' in self.subjects:
return False
return True
@cached_property
def _get_mri_dir(self):
if not self.subject:
return
elif not self.subjects_dir:
return
else:
return op.join(self.subjects_dir, self.subject)
@cached_property
def _get_subjects(self):
sdir = self.subjects_dir
is_dir = sdir and op.isdir(sdir)
if is_dir:
dir_content = os.listdir(sdir)
subjects = [s for s in dir_content if _is_mri_subject(s, sdir)]
if len(subjects) == 0:
subjects.append('')
else:
subjects = ['']
return sorted(subjects)
@cached_property
def _get_subject_has_bem(self):
if not self.subject:
return False
return _mri_subject_has_bem(self.subject, self.subjects_dir)
def create_fsaverage(self): # noqa: D102
if not self.subjects_dir:
raise RuntimeError(
"No subjects directory is selected. Please specify "
"subjects_dir first.")
fs_home = get_fs_home()
if fs_home is None:
raise RuntimeError(
"FreeSurfer contains files that are needed for copying the "
"fsaverage brain. Please install FreeSurfer and try again.")
create_default_subject(fs_home=fs_home, update=True,
subjects_dir=self.subjects_dir)
self.refresh = True
self.subject = 'fsaverage'
@on_trait_change('subjects_dir')
def _emit_subject(self):
# This silliness is the only way I could figure out to get the
# on_trait_change('subject_panel.subject') in CoregFrame to work!
self.subject = self.subject
class SubjectSelectorPanel(HasPrivateTraits):
"""Subject selector panel."""
model = Instance(MRISubjectSource)
can_create_fsaverage = DelegatesTo('model')
subjects_dir = DelegatesTo('model')
subject = DelegatesTo('model')
subjects = DelegatesTo('model')
create_fsaverage = Button(
u"fsaverage⇨SUBJECTS_DIR",
desc="whether to copy the files for the fsaverage subject to the "
"subjects directory. This button is disabled if "
"fsaverage already exists in the selected subjects directory.")
view = View(VGroup(Item('subjects_dir', width=_DIG_SOURCE_WIDTH,
tooltip='Subject MRI structurals (SUBJECTS_DIR)'),
Item('subject', width=_DIG_SOURCE_WIDTH,
tooltip='Subject to use within SUBJECTS_DIR'),
Item('create_fsaverage',
enabled_when='can_create_fsaverage',
width=_DIG_SOURCE_WIDTH),
show_labels=False))
def _create_fsaverage_fired(self):
# progress dialog with indefinite progress bar
title = "Creating FsAverage ..."
message = "Copying fsaverage files ..."
prog = ProgressDialog(title=title, message=message)
prog.open()
prog.update(0)
try:
self.model.create_fsaverage()
except Exception as err:
error(None, str(err), "Error Creating FsAverage")
raise
finally:
prog.close()
def _subjects_dir_changed(self, old, new):
if new and self.subjects == ['']:
information(None, "The directory selected as subjects-directory "
"(%s) does not contain any valid MRI subjects. If "
"this is not expected make sure all MRI subjects have "
"head surface model files which "
"can be created by running:\n\n $ mne "
"make_scalp_surfaces" % self.subjects_dir,
"No Subjects Found")
|
{
"content_hash": "a3d2584e9260f1b54d3e24f1ee818cde",
"timestamp": "",
"source": "github",
"line_count": 576,
"max_line_length": 79,
"avg_line_length": 33.845486111111114,
"alnum_prop": 0.5629648627853295,
"repo_name": "bloyl/mne-python",
"id": "16d82059e52c8573514342f8511b173eb96b3e65",
"size": "19521",
"binary": false,
"copies": "1",
"ref": "refs/heads/placeholder",
"path": "mne/gui/_file_traits.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Csound Document",
"bytes": "24999"
},
{
"name": "Makefile",
"bytes": "4450"
},
{
"name": "Python",
"bytes": "8190297"
},
{
"name": "Shell",
"bytes": "936"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
import argparse
import io
from math import fsum
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Shows the length (in hours) of a Kaldi folder. Either the segment file or the utt2dur file needs to be present.')
parser.add_argument('-f', '--folder', dest='folder', help='The kaldi folder for which the length (in hours) should be calculated', type=str, default='')
args = parser.parse_args()
if args.folder == '':
print('You have to specify a Kaldi data folder with the -f flag.')
folder = args.folder
if folder[-1] != '/':
folder += '/'
try:
with io.open(folder + 'utt2dur') as utt2dur:
sum_list = []
for line in utt2dur:
if line[-1]=='\n':
line = line[:-1]
sum_list += [float(line.split()[1])]
hours = fsum(sum_list) / 60.0 / 60.0
print('Utt2dur file: ', args.folder, 'is', hours, ' hours!')
except:
print('Could not open/process utt2dur file in:', args.folder)
try:
with io.open(folder + 'segments') as segments:
sum_list = []
for line in segments:
diff = float(line.split()[3]) - float(line.split()[2])
sum_list += [diff]
if diff > 2000:
print('Warning over-long segment:', diff)
print(line)
if float(line.split()[2]) > float(line.split()[3]):
print('Warning, end marker before start:', line)
elif float(line.split()[2]) == float(line.split()[3]):
print('Warning, end marker == start marker:', line)
hours = fsum(sum_list) / 60.0 / 60.0
print('Segment file: ', args.folder, 'is', hours, ' hours!')
except:
print('Could not open/process segments file in:', args.folder)
|
{
"content_hash": "d334f4bd98251f98948355300d62d060",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 163,
"avg_line_length": 35.627450980392155,
"alnum_prop": 0.57347275729224,
"repo_name": "tudarmstadt-lt/kaldi-tuda-de",
"id": "9749c3c1268e62df252687b9b19df63fac8c12d2",
"size": "2482",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "s5_r2/local/view_data_length.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "113434"
},
{
"name": "Shell",
"bytes": "172875"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
import itsdangerous
import mock
import pytest
import pytz
from django.utils import timezone
from addons.base.utils import get_mfr_url
from addons.github.models import GithubFileNode
from addons.osfstorage import settings as osfstorage_settings
from addons.osfstorage.listeners import checkin_files_task
from api.base.settings.defaults import API_BASE
from api_tests import utils as api_utils
from framework.auth.core import Auth
from osf.models import NodeLog, Session, QuickFilesNode
from osf.utils.permissions import WRITE, READ
from osf.utils.workflows import DefaultStates
from osf_tests.factories import (
AuthUserFactory,
CommentFactory,
ProjectFactory,
UserFactory,
PreprintFactory,
)
from website import settings as website_settings
# stolen from^W^Winspired by DRF
# rest_framework.fields.DateTimeField.to_representation
def _dt_to_iso8601(value):
iso8601 = value.isoformat()
if iso8601.endswith('+00:00'):
iso8601 = iso8601[:-6] + 'Z' # microsecond precision
return iso8601
@pytest.fixture()
def user():
return AuthUserFactory()
@pytest.mark.django_db
@pytest.mark.enable_quickfiles_creation
class TestFileView:
@pytest.fixture()
def node(self, user):
return ProjectFactory(creator=user, comment_level='public')
@pytest.fixture()
def quickfiles_node(self, user):
return QuickFilesNode.objects.get(creator=user)
@pytest.fixture()
def file(self, user, node):
return api_utils.create_test_file(node, user, create_guid=False)
@pytest.fixture()
def file_url(self, file):
return '/{}files/{}/'.format(API_BASE, file._id)
def test_must_have_auth_and_be_contributor(self, app, file_url):
# test_must_have_auth(self, app, file_url):
res = app.get(file_url, expect_errors=True)
assert res.status_code == 401
# test_must_be_contributor(self, app, file_url):
non_contributor = AuthUserFactory()
res = app.get(file_url, auth=non_contributor.auth, expect_errors=True)
assert res.status_code == 403
def test_deleted_file_return_410(self, app, node, user):
deleted_file = api_utils.create_test_file(node, user, create_guid=True)
url_with_guid = '/{}files/{}/'.format(
API_BASE, deleted_file.get_guid()._id
)
url_with_id = '/{}files/{}/'.format(API_BASE, deleted_file._id)
res = app.get(url_with_guid, auth=user.auth)
assert res.status_code == 200
res = app.get(url_with_id, auth=user.auth)
assert res.status_code == 200
deleted_file.delete(user=user, save=True)
res = app.get(url_with_guid, auth=user.auth, expect_errors=True)
assert res.status_code == 410
res = app.get(url_with_id, auth=user.auth, expect_errors=True)
assert res.status_code == 410
def test_disabled_users_quickfiles_file_detail_gets_410(self, app, quickfiles_node, user):
file_node = api_utils.create_test_file(quickfiles_node, user, create_guid=True)
url_with_guid = '/{}files/{}/'.format(
API_BASE, file_node.get_guid()._id
)
url_with_id = '/{}files/{}/'.format(API_BASE, file_node._id)
res = app.get(url_with_id)
assert res.status_code == 200
res = app.get(url_with_guid, auth=user.auth)
assert res.status_code == 200
user.is_disabled = True
user.save()
res = app.get(url_with_id, expect_errors=True)
assert res.json['errors'][0]['detail'] == 'This user has been deactivated and their' \
' quickfiles are no longer available.'
assert res.status_code == 410
res = app.get(url_with_guid, expect_errors=True)
assert res.json['errors'][0]['detail'] == 'This user has been deactivated and their' \
' quickfiles are no longer available.'
assert res.status_code == 410
def test_file_guid_guid_status(self, app, user, file, file_url):
# test_unvisited_file_has_no_guid
res = app.get(file_url, auth=user.auth)
assert res.status_code == 200
assert res.json['data']['attributes']['guid'] is None
# test_visited_file_has_guid
guid = file.get_guid(create=True)
res = app.get(file_url, auth=user.auth)
assert res.status_code == 200
assert guid is not None
assert res.json['data']['attributes']['guid'] == guid._id
def test_file_with_wrong_guid(self, app, user):
url = '/{}files/{}/'.format(API_BASE, user._id)
res = app.get(url, auth=user.auth, expect_errors=True)
assert res.status_code == 404
@mock.patch('api.base.throttling.CreateGuidThrottle.allow_request')
def test_file_guid_not_created_with_basic_auth(
self, mock_allow, app, user, file_url):
res = app.get('{}?create_guid=1'.format(file_url), auth=user.auth)
guid = res.json['data']['attributes'].get('guid', None)
assert res.status_code == 200
assert mock_allow.call_count == 1
assert guid is None
@mock.patch('api.base.throttling.CreateGuidThrottle.allow_request')
def test_file_guid_created_with_cookie(
self, mock_allow, app, user, file_url, file):
session = Session(data={'auth_user_id': user._id})
session.save()
cookie = itsdangerous.Signer(
website_settings.SECRET_KEY
).sign(session._id)
app.set_cookie(website_settings.COOKIE_NAME, cookie.decode())
res = app.get('{}?create_guid=1'.format(file_url), auth=user.auth)
app.reset() # clear cookie
assert res.status_code == 200
guid = res.json['data']['attributes'].get('guid', None)
assert guid is not None
assert guid == file.get_guid()._id
assert mock_allow.call_count == 1
def test_get_file(self, app, user, file_url, file):
res = app.get(file_url, auth=user.auth)
file.versions.first().reload()
assert res.status_code == 200
assert set(res.json.keys()) == {'meta', 'data'}
attributes = res.json['data']['attributes']
assert attributes['path'] == file.path
assert attributes['kind'] == file.kind
assert attributes['name'] == file.name
assert attributes['materialized_path'] == file.materialized_path
assert attributes['last_touched'] is None
assert attributes['provider'] == file.provider
assert attributes['size'] == file.versions.first().size
assert attributes['current_version'] == len(file.history)
assert attributes['date_modified'] == _dt_to_iso8601(
file.versions.first().created.replace(tzinfo=pytz.utc)
)
assert attributes['date_created'] == _dt_to_iso8601(
file.versions.last().created.replace(tzinfo=pytz.utc)
)
assert attributes['extra']['hashes']['md5'] is None
assert attributes['extra']['hashes']['sha256'] is None
assert attributes['tags'] == []
# make sure download link has a trailing slash
# so that downloads don't 301
assert res.json['data']['links']['download'].endswith('/')
def test_file_has_rel_link_to_owning_project(
self, app, user, file_url, node):
res = app.get(file_url, auth=user.auth)
assert res.status_code == 200
assert 'target' in res.json['data']['relationships'].keys()
expected_url = node.api_v2_url
actual_url = res.json['data']['relationships']['target']['links']['related']['href']
assert expected_url in actual_url
def test_file_has_comments_link(self, app, user, file, file_url):
file.get_guid(create=True)
res = app.get(file_url, auth=user.auth)
assert res.status_code == 200
assert 'comments' in res.json['data']['relationships'].keys()
url = res.json['data']['relationships']['comments']['links']['related']['href']
assert app.get(url, auth=user.auth).status_code == 200
assert res.json['data']['type'] == 'files'
def test_file_has_correct_unread_comments_count(
self, app, user, file, node):
contributor = AuthUserFactory()
node.add_contributor(contributor, auth=Auth(user), save=True)
CommentFactory(
node=node,
target=file.get_guid(create=True),
user=contributor, page='files'
)
res = app.get(
'/{}files/{}/?related_counts=True'.format(API_BASE, file._id),
auth=user.auth
)
assert res.status_code == 200
unread_comments = res.json['data']['relationships']['comments']['links']['related']['meta']['unread']
assert unread_comments == 1
def test_only_project_contrib_can_comment_on_closed_project(
self, app, user, node, file_url):
node.comment_level = 'private'
node.is_public = True
node.save()
res = app.get(file_url, auth=user.auth)
can_comment = res.json['data']['attributes']['current_user_can_comment']
assert res.status_code == 200
assert can_comment is True
non_contributor = AuthUserFactory()
res = app.get(file_url, auth=non_contributor.auth)
can_comment = res.json['data']['attributes']['current_user_can_comment']
assert res.status_code == 200
assert can_comment is False
def test_logged_or_not_user_comment_status_on_open_project(
self, app, node, file_url):
node.is_public = True
node.save()
# test_any_loggedin_user_can_comment_on_open_project(self, app, node,
# file_url):
non_contributor = AuthUserFactory()
res = app.get(file_url, auth=non_contributor.auth)
can_comment = res.json['data']['attributes']['current_user_can_comment']
assert res.status_code == 200
assert can_comment is True
# test_non_logged_in_user_cant_comment(self, app, file_url, node):
res = app.get(file_url)
can_comment = res.json['data']['attributes']['current_user_can_comment']
assert res.status_code == 200
assert can_comment is False
def test_checkout(self, app, user, file, file_url, node):
assert file.checkout is None
res = app.put_json_api(
file_url, {
'data': {
'id': file._id,
'type': 'files',
'attributes': {
'checkout': user._id
}
}
}, auth=user.auth)
file.reload()
file.save()
node.reload()
assert res.status_code == 200
assert file.checkout == user
res = app.get(file_url, auth=user.auth)
assert node.logs.count() == 2
assert node.logs.latest().action == NodeLog.CHECKED_OUT
assert node.logs.latest().user == user
assert user._id == res.json['data']['relationships']['checkout']['links']['related']['meta']['id']
assert '/{}users/{}/'.format(
API_BASE, user._id
) in res.json['data']['relationships']['checkout']['links']['related']['href']
res = app.put_json_api(
file_url, {
'data': {
'id': file._id,
'type': 'files',
'attributes': {
'checkout': None
}
}
}, auth=user.auth)
file.reload()
assert file.checkout is None
assert res.status_code == 200
def test_checkout_file_error(self, app, user, file_url, file):
# test_checkout_file_no_type
res = app.put_json_api(
file_url,
{'data': {'id': file._id, 'attributes': {'checkout': user._id}}},
auth=user.auth, expect_errors=True
)
assert res.status_code == 400
# test_checkout_file_no_id
res = app.put_json_api(
file_url,
{'data': {'type': 'files', 'attributes': {'checkout': user._id}}},
auth=user.auth, expect_errors=True
)
assert res.status_code == 400
# test_checkout_file_incorrect_type
res = app.put_json_api(
file_url, {
'data': {
'id': file._id,
'type': 'Wrong type.',
'attributes': {
'checkout': user._id
}
}
}, auth=user.auth, expect_errors=True)
assert res.status_code == 409
# test_checkout_file_incorrect_id
res = app.put_json_api(
file_url, {
'data': {
'id': '12345',
'type': 'files',
'attributes': {
'checkout': user._id
}
}
}, auth=user.auth, expect_errors=True)
assert res.status_code == 409
# test_checkout_file_no_attributes
res = app.put_json_api(
file_url,
{'data': {'id': file._id, 'type': 'files'}},
auth=user.auth, expect_errors=True
)
assert res.status_code == 400
def test_must_set_self(self, app, user, file, file_url):
user_unauthorized = UserFactory()
assert file.checkout is None
res = app.put_json_api(
file_url, {
'data': {
'id': file._id,
'type': 'files',
'attributes': {
'checkout': user_unauthorized._id
}
}
}, auth=user.auth, expect_errors=True, )
file.reload()
assert res.status_code == 400
assert file.checkout is None
def test_must_be_self(self, app, file, file_url):
user = AuthUserFactory()
file.checkout = user
file.save()
res = app.put_json_api(
file_url, {
'data': {
'id': file._id,
'type': 'files',
'attributes': {
'checkout': user._id
}
}
}, auth=user.auth, expect_errors=True, )
file.reload()
assert res.status_code == 403
assert file.checkout == user
def test_admin_can_checkin(self, app, user, node, file, file_url):
user_unauthorized = UserFactory()
node.add_contributor(user_unauthorized)
file.checkout = user_unauthorized
file.save()
res = app.put_json_api(
file_url, {
'data': {
'id': file._id,
'type': 'files',
'attributes': {
'checkout': None
}
}
}, auth=user.auth, expect_errors=True, )
file.reload()
node.reload()
assert res.status_code == 200
assert file.checkout is None
assert node.logs.latest().action == NodeLog.CHECKED_IN
assert node.logs.latest().user == user
def test_admin_can_checkout(self, app, user, file_url, file, node):
res = app.put_json_api(
file_url, {
'data': {
'id': file._id,
'type': 'files',
'attributes': {
'checkout': user._id
}
}
}, auth=user.auth, expect_errors=True, )
file.reload()
node.reload()
assert res.status_code == 200
assert file.checkout == user
assert node.logs.latest().action == NodeLog.CHECKED_OUT
assert node.logs.latest().user == user
def test_cannot_checkin_when_already_checked_in(
self, app, user, node, file, file_url):
count = node.logs.count()
assert not file.is_checked_out
res = app.put_json_api(
file_url, {
'data': {
'id': file._id,
'type': 'files',
'attributes': {
'checkout': None
}
}
}, auth=user.auth, expect_errors=True, )
file.reload()
node.reload()
assert res.status_code == 200
assert node.logs.count() == count
assert file.checkout is None
def test_cannot_checkout_when_checked_out(
self, app, user, node, file, file_url):
user_unauthorized = UserFactory()
node.add_contributor(user_unauthorized)
file.checkout = user_unauthorized
file.save()
count = node.logs.count()
res = app.put_json_api(
file_url, {
'data': {
'id': file._id,
'type': 'files',
'attributes': {
'checkout': user._id
}
}
}, auth=user.auth, expect_errors=True, )
file.reload()
node.reload()
assert res.status_code == 200
assert file.checkout == user_unauthorized
assert node.logs.count() == count
def test_noncontrib_and_read_contrib_cannot_checkout(
self, app, file, node, file_url):
# test_noncontrib_cannot_checkout
non_contrib = AuthUserFactory()
assert file.checkout is None
assert not node.has_permission(non_contrib, READ)
res = app.put_json_api(
file_url, {
'data': {
'id': file._id,
'type': 'files',
'attributes': {
'checkout': non_contrib._id
}
}
}, auth=non_contrib.auth, expect_errors=True, )
file.reload()
node.reload()
assert res.status_code == 403
assert file.checkout is None
assert node.logs.latest().action != NodeLog.CHECKED_OUT
# test_read_contrib_cannot_checkout
read_contrib = AuthUserFactory()
node.add_contributor(read_contrib, permissions=READ)
node.save()
assert not node.can_edit(user=read_contrib)
res = app.put_json_api(
file_url, {
'data': {
'id': file._id,
'type': 'files',
'attributes': {
'checkout': None
}
}
}, auth=read_contrib.auth, expect_errors=True)
file.reload()
assert res.status_code == 403
assert file.checkout is None
assert node.logs.latest().action != NodeLog.CHECKED_OUT
def test_write_contrib_can_checkin(self, app, node, file, file_url):
write_contrib = AuthUserFactory()
node.add_contributor(write_contrib, permissions=WRITE)
node.save()
assert node.can_edit(user=write_contrib)
file.checkout = write_contrib
file.save()
res = app.put_json_api(
file_url, {
'data': {
'id': file._id,
'type': 'files',
'attributes': {
'checkout': None
}
}
}, auth=write_contrib.auth, )
file.reload()
assert res.status_code == 200
assert file.checkout is None
@mock.patch('addons.osfstorage.listeners.enqueue_postcommit_task')
def test_removed_contrib_files_checked_in(self, mock_enqueue, app, node, file):
write_contrib = AuthUserFactory()
node.add_contributor(write_contrib, permissions=WRITE)
node.save()
assert node.can_edit(user=write_contrib)
file.checkout = write_contrib
file.save()
assert file.is_checked_out
node.remove_contributor(write_contrib, auth=Auth(write_contrib))
mock_enqueue.assert_called_with(checkin_files_task, (node._id, write_contrib._id,), {}, celery=True)
def test_must_be_osfstorage(self, app, user, file, file_url):
file.recast(GithubFileNode._typedmodels_type)
file.save()
res = app.put_json_api(
file_url, {
'data': {
'id': file._id,
'type': 'files',
'attributes': {
'checkout': user._id
}
}
}, auth=user.auth, expect_errors=True, )
assert res.status_code == 403
def test_get_file_guids_misc(self, app, user, file, node):
# test_get_file_resolves_guids
guid = file.get_guid(create=True)
url = '/{}files/{}/'.format(API_BASE, guid._id)
res = app.get(url, auth=user.auth)
assert res.status_code == 200
assert set(res.json.keys()) == {'meta', 'data'}
assert res.json['data']['attributes']['path'] == file.path
# test_get_file_invalid_guid_gives_404
url = '/{}files/{}/'.format(API_BASE, 'asdasasd')
res = app.get(url, auth=user.auth, expect_errors=True)
assert res.status_code == 404
# test_get_file_non_file_guid_gives_404
url = '/{}files/{}/'.format(API_BASE, node._id)
res = app.get(url, auth=user.auth, expect_errors=True)
assert res.status_code == 404
def test_current_version_is_equal_to_length_of_history(
self, app, user, file_url, file):
res = app.get(file_url, auth=user.auth)
assert res.json['data']['attributes']['current_version'] == 1
for version in range(2, 4):
file.create_version(user, {
'object': '06d80e' + str(version),
'service': 'cloud',
osfstorage_settings.WATERBUTLER_RESOURCE: 'osf',
}, {'size': 1337,
'contentType': 'img/png'}).save()
res = app.get(file_url, auth=user.auth)
assert res.json['data']['attributes']['current_version'] == version
# Regression test for OSF-7758
def test_folder_files_relationships_contains_guid_not_id(
self, app, user, node):
folder = node.get_addon('osfstorage').get_root(
).append_folder('I\'d be a teacher!!')
folder.save()
folder_url = '/{}files/{}/'.format(API_BASE, folder._id)
res = app.get(folder_url, auth=user.auth)
split_href = res.json['data']['relationships']['files']['links']['related']['href'].split(
'/')
assert node._id in split_href
assert node.id not in split_href
def test_embed_user_on_quickfiles_detail(self, app, user):
quickfiles = QuickFilesNode.objects.get(creator=user)
osfstorage = quickfiles.get_addon('osfstorage')
root = osfstorage.get_root()
test_file = root.append_file('speedyfile.txt')
url = '/{}files/{}/?embed=user'.format(API_BASE, test_file._id)
res = app.get(url, auth=user.auth)
assert res.json['data'].get('embeds', None)
assert res.json['data']['embeds'].get('user')
assert res.json['data']['embeds']['user']['data']['id'] == user._id
@pytest.mark.django_db
class TestFileVersionView:
@pytest.fixture()
def node(self, user):
return ProjectFactory(creator=user)
@pytest.fixture()
def osfstorage(self, node):
return node.get_addon('osfstorage')
@pytest.fixture()
def root_node(self, osfstorage):
return osfstorage.get_root()
@pytest.fixture()
def file(self, root_node, user):
file = root_node.append_file('test_file')
file.create_version(user, {
'object': '06d80e',
'service': 'cloud',
osfstorage_settings.WATERBUTLER_RESOURCE: 'osf',
}, {
'size': 1337,
'contentType': 'img/png'
}).save()
return file
def test_listing(self, app, user, file):
file.create_version(user, {
'object': '0683m38e',
'service': 'cloud',
osfstorage_settings.WATERBUTLER_RESOURCE: 'osf',
}, {
'size': 1347,
'contentType': 'img/png'
}).save()
res = app.get(
'/{}files/{}/versions/'.format(API_BASE, file._id),
auth=user.auth,
)
assert res.status_code == 200
assert len(res.json['data']) == 2
assert res.json['data'][0]['id'] == '2'
assert res.json['data'][0]['attributes']['name'] == file.name
assert res.json['data'][1]['id'] == '1'
assert res.json['data'][1]['attributes']['name'] == file.name
def test_load_and_property(self, app, user, file):
# test_by_id
res = app.get(
'/{}files/{}/versions/1/'.format(API_BASE, file._id),
auth=user.auth,
)
assert res.status_code == 200
assert res.json['data']['id'] == '1'
mfr_url = get_mfr_url(file, 'osfstorage')
render_link = res.json['data']['links']['render']
download_link = res.json['data']['links']['download']
assert mfr_url in render_link
assert download_link in render_link
assert 'revision=1' in render_link
guid = file.get_guid(create=True)._id
res = app.get(
'/{}files/{}/versions/1/'.format(API_BASE, file._id),
auth=user.auth,
)
render_link = res.json['data']['links']['render']
download_link = res.json['data']['links']['download']
assert mfr_url in render_link
assert download_link in render_link
assert guid in render_link
assert 'revision=1' in render_link
# test_read_only
assert app.put(
'/{}files/{}/versions/1/'.format(API_BASE, file._id),
expect_errors=True, auth=user.auth,
).status_code == 405
assert app.post(
'/{}files/{}/versions/1/'.format(API_BASE, file._id),
expect_errors=True, auth=user.auth,
).status_code == 405
assert app.delete(
'/{}files/{}/versions/1/'.format(API_BASE, file._id),
expect_errors=True, auth=user.auth,
).status_code == 405
@pytest.mark.django_db
class TestFileTagging:
@pytest.fixture()
def node(self, user):
return ProjectFactory(creator=user)
@pytest.fixture()
def file_one(self, user, node):
return api_utils.create_test_file(
node, user, filename='file_one')
@pytest.fixture()
def payload(self, file_one):
payload = {
'data': {
'type': 'files',
'id': file_one._id,
'attributes': {
'checkout': None,
'tags': ['goofy']
}
}
}
return payload
@pytest.fixture()
def url(self, file_one):
return '/{}files/{}/'.format(API_BASE, file_one._id)
def test_tags_add_and_update_properly(self, app, user, url, payload):
# test_tags_add_properly
res = app.put_json_api(url, payload, auth=user.auth)
assert res.status_code == 200
# Ensure adding tag data is correct from the PUT response
assert len(res.json['data']['attributes']['tags']) == 1
assert res.json['data']['attributes']['tags'][0] == 'goofy'
# test_tags_update_properly
# Ensure removing and adding tag data is correct from the PUT response
payload['data']['attributes']['tags'] = ['goofier']
res = app.put_json_api(url, payload, auth=user.auth)
assert res.status_code == 200
assert len(res.json['data']['attributes']['tags']) == 1
assert res.json['data']['attributes']['tags'][0] == 'goofier'
def test_tags_add_and_remove_properly(self, app, user, url, payload):
app.put_json_api(url, payload, auth=user.auth)
payload['data']['attributes']['tags'] = []
res = app.put_json_api(url, payload, auth=user.auth)
assert res.status_code == 200
assert len(res.json['data']['attributes']['tags']) == 0
def test_put_wo_tags_doesnt_remove_tags(self, app, user, url, payload):
app.put_json_api(url, payload, auth=user.auth)
payload['data']['attributes'] = {'checkout': None}
res = app.put_json_api(url, payload, auth=user.auth)
assert res.status_code == 200
# Ensure adding tag data is correct from the PUT response
assert len(res.json['data']['attributes']['tags']) == 1
assert res.json['data']['attributes']['tags'][0] == 'goofy'
def test_add_and_remove_tag_adds_log(self, app, user, url, payload, node):
# test_add_tag_adds_log
count = node.logs.count()
app.put_json_api(url, payload, auth=user.auth)
assert node.logs.count() == count + 1
assert NodeLog.FILE_TAG_ADDED == node.logs.latest().action
# test_remove_tag_adds_log
payload['data']['attributes']['tags'] = []
count = node.logs.count()
app.put_json_api(url, payload, auth=user.auth)
assert node.logs.count() == count + 1
assert NodeLog.FILE_TAG_REMOVED == node.logs.latest().action
@pytest.mark.django_db
class TestPreprintFileView:
@pytest.fixture()
def preprint(self, user):
return PreprintFactory(creator=user)
@pytest.fixture()
def primary_file(self, preprint):
return preprint.primary_file
@pytest.fixture()
def file_url(self, primary_file):
return '/{}files/{}/'.format(API_BASE, primary_file._id)
@pytest.fixture()
def other_user(self):
return AuthUserFactory()
def test_published_preprint_file(self, app, file_url, preprint, user, other_user):
# Unauthenticated
res = app.get(file_url, expect_errors=True)
assert res.status_code == 200
# Non contrib
res = app.get(file_url, auth=other_user.auth, expect_errors=True)
assert res.status_code == 200
# Write contrib
preprint.add_contributor(other_user, WRITE, save=True)
res = app.get(file_url, auth=other_user.auth, expect_errors=True)
assert res.status_code == 200
# Admin contrib
res = app.get(file_url, auth=user.auth, expect_errors=True)
assert res.status_code == 200
def test_unpublished_preprint_file(self, app, file_url, preprint, user, other_user):
preprint.is_published = False
preprint.save()
# Unauthenticated
res = app.get(file_url, expect_errors=True)
assert res.status_code == 401
# Non contrib
res = app.get(file_url, auth=other_user.auth, expect_errors=True)
assert res.status_code == 403
# Write contrib
preprint.add_contributor(other_user, WRITE, save=True)
res = app.get(file_url, auth=other_user.auth, expect_errors=True)
assert res.status_code == 200
# Admin contrib
res = app.get(file_url, auth=user.auth, expect_errors=True)
assert res.status_code == 200
def test_private_preprint_file(self, app, file_url, preprint, user, other_user):
preprint.is_public = False
preprint.save()
# Unauthenticated
res = app.get(file_url, expect_errors=True)
assert res.status_code == 401
# Non contrib
res = app.get(file_url, auth=other_user.auth, expect_errors=True)
assert res.status_code == 403
# Write contrib
preprint.add_contributor(other_user, WRITE, save=True)
res = app.get(file_url, auth=other_user.auth, expect_errors=True)
assert res.status_code == 200
# Admin contrib
res = app.get(file_url, auth=user.auth, expect_errors=True)
assert res.status_code == 200
def test_deleted_preprint_file(self, app, file_url, preprint, user, other_user):
preprint.deleted = timezone.now()
preprint.save()
# Unauthenticated
res = app.get(file_url, expect_errors=True)
assert res.status_code == 410
# Non contrib
res = app.get(file_url, auth=other_user.auth, expect_errors=True)
assert res.status_code == 410
# Write contrib
preprint.add_contributor(other_user, WRITE, save=True)
res = app.get(file_url, auth=other_user.auth, expect_errors=True)
assert res.status_code == 410
# Admin contrib
res = app.get(file_url, auth=user.auth, expect_errors=True)
assert res.status_code == 410
def test_abandoned_preprint_file(self, app, file_url, preprint, user, other_user):
preprint.machine_state = DefaultStates.INITIAL.value
preprint.save()
# Unauthenticated
res = app.get(file_url, expect_errors=True)
assert res.status_code == 401
# Non contrib
res = app.get(file_url, auth=other_user.auth, expect_errors=True)
assert res.status_code == 403
# Write contrib
preprint.add_contributor(other_user, WRITE, save=True)
res = app.get(file_url, auth=other_user.auth, expect_errors=True)
assert res.status_code == 403
# Admin contrib
res = app.get(file_url, auth=user.auth, expect_errors=True)
assert res.status_code == 200
def test_withdrawn_preprint_files(self, app, file_url, preprint, user, other_user):
preprint.date_withdrawn = timezone.now()
preprint.save()
# Unauthenticated
res = app.get(file_url, expect_errors=True)
assert res.status_code == 401
# Noncontrib
res = app.get(file_url, auth=other_user.auth, expect_errors=True)
assert res.status_code == 403
# Write contributor
preprint.add_contributor(other_user, WRITE, save=True)
res = app.get(file_url, auth=other_user.auth, expect_errors=True)
assert res.status_code == 403
# Admin contrib
res = app.get(file_url, auth=user.auth, expect_errors=True)
assert res.status_code == 403
|
{
"content_hash": "01a293192ab02b9d0815f9c708262332",
"timestamp": "",
"source": "github",
"line_count": 944,
"max_line_length": 109,
"avg_line_length": 36.24576271186441,
"alnum_prop": 0.5580430208089783,
"repo_name": "adlius/osf.io",
"id": "bca35a6c5b57633f63e6d361758edc50b2657caa",
"size": "34216",
"binary": false,
"copies": "3",
"ref": "refs/heads/develop",
"path": "api_tests/files/views/test_file_detail.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "92846"
},
{
"name": "Dockerfile",
"bytes": "5868"
},
{
"name": "HTML",
"bytes": "341209"
},
{
"name": "JavaScript",
"bytes": "1787097"
},
{
"name": "Jupyter Notebook",
"bytes": "41326"
},
{
"name": "Mako",
"bytes": "682360"
},
{
"name": "Python",
"bytes": "11862763"
},
{
"name": "Shell",
"bytes": "2841"
},
{
"name": "VCL",
"bytes": "13885"
}
],
"symlink_target": ""
}
|
"""Contains miscellaneous helpers"""
from __future__ import unicode_literals, division, absolute_import, print_function
import copy
import urllib2
import httplib
import os
import socket
import time
import re
import sys
import locale
import Queue
import ast
import operator
from collections import MutableMapping
from urlparse import urlparse
from htmlentitydefs import name2codepoint
from datetime import timedelta, datetime
def str_to_boolean(string):
if string.lower() in ['true', '1', 't', 'y', 'yes']:
return True
else:
return False
def str_to_int(string):
try:
return int(string.replace(',', ''))
except ValueError:
return None
def convert_bytes(bytes):
"""Returns given bytes as prettified string."""
bytes = float(bytes)
if bytes >= 1099511627776:
terabytes = bytes / 1099511627776
size = '%.2fT' % terabytes
elif bytes >= 1073741824:
gigabytes = bytes / 1073741824
size = '%.2fG' % gigabytes
elif bytes >= 1048576:
megabytes = bytes / 1048576
size = '%.2fM' % megabytes
elif bytes >= 1024:
kilobytes = bytes / 1024
size = '%.2fK' % kilobytes
else:
size = '%.2fb' % bytes
return size
class MergeException(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
def strip_html(text):
"""Tries to strip all HTML tags from *text*. If unsuccessful returns original text."""
from bs4 import BeautifulSoup
try:
text = ' '.join(BeautifulSoup(text).find_all(text=True))
return ' '.join(text.split())
except:
return text
# This pattern matches a character entity reference (a decimal numeric
# references, a hexadecimal numeric reference, or a named reference).
charrefpat = re.compile(r'&(#(\d+|x[\da-fA-F]+)|[\w.:-]+);?')
def _htmldecode(text):
"""Decode HTML entities in the given text."""
# From screpe.py - licensed under apache 2.0 .. should not be a problem for a MIT afaik
if type(text) is unicode:
uchr = unichr
else:
uchr = lambda value: value > 127 and unichr(value) or chr(value)
def entitydecode(match, uchr=uchr):
entity = match.group(1)
if entity.startswith('#x'):
return uchr(int(entity[2:], 16))
elif entity.startswith('#'):
return uchr(int(entity[1:]))
elif entity in name2codepoint:
return uchr(name2codepoint[entity])
else:
return match.group(0)
return charrefpat.sub(entitydecode, text)
def decode_html(value):
"""
:param string value: String to be html-decoded
:returns: Html decoded string
"""
return _htmldecode(value)
def encode_html(unicode_data, encoding='ascii'):
"""
Encode unicode_data for use as XML or HTML, with characters outside
of the encoding converted to XML numeric character references.
"""
try:
return unicode_data.encode(encoding, 'xmlcharrefreplace')
except ValueError:
# ValueError is raised if there are unencodable chars in the
# data and the 'xmlcharrefreplace' error handler is not found.
# Pre-2.3 Python doesn't support the 'xmlcharrefreplace' error
# handler, so we'll emulate it.
return _xmlcharref_encode(unicode_data, encoding)
def _xmlcharref_encode(unicode_data, encoding):
"""Emulate Python 2.3's 'xmlcharrefreplace' encoding error handler."""
chars = []
# Phase through the unicode_data string one character at a time in
# order to catch unencodable characters:
for char in unicode_data:
try:
chars.append(char.encode(encoding, 'strict'))
except UnicodeError:
chars.append('&#%i;' % ord(char))
return ''.join(chars)
def merge_dict_from_to(d1, d2):
"""Merges dictionary d1 into dictionary d2. d1 will remain in original form."""
for k, v in d1.items():
if k in d2:
if type(v) == type(d2[k]):
if isinstance(v, dict):
merge_dict_from_to(d1[k], d2[k])
elif isinstance(v, list):
d2[k].extend(copy.deepcopy(v))
elif isinstance(v, (basestring, bool, int, float, type(None))):
pass
else:
raise Exception('Unknown type: %s value: %s in dictionary' % (type(v), repr(v)))
elif (isinstance(v, (basestring, bool, int, float, type(None))) and
isinstance(d2[k], (basestring, bool, int, float, type(None)))):
# Allow overriding of non-container types with other non-container types
pass
else:
raise MergeException('Merging key %s failed, conflicting datatypes %r vs. %r.' % (
k, type(v).__name__, type(d2[k]).__name__))
else:
d2[k] = copy.deepcopy(v)
class SmartRedirectHandler(urllib2.HTTPRedirectHandler):
def http_error_301(self, req, fp, code, msg, headers):
result = urllib2.HTTPRedirectHandler.http_error_301(self, req, fp, code, msg, headers)
result.status = code
return result
def http_error_302(self, req, fp, code, msg, headers):
result = urllib2.HTTPRedirectHandler.http_error_302(self, req, fp, code, msg, headers)
result.status = code
return result
def urlopener(url_or_request, log, **kwargs):
"""
Utility function for pulling back a url, with a retry of 3 times, increasing the timeout, etc.
Re-raises any errors as URLError.
.. warning:: This is being replaced by requests library.
flexget.utils.requests should be used going forward.
:param str url_or_request: URL or Request object to get.
:param log: Logger to log debug info and errors to
:param kwargs: Keyword arguments to be passed to urlopen
:return: The file-like object returned by urlopen
"""
from flexget.utils.requests import is_unresponsive, set_unresponsive
if isinstance(url_or_request, urllib2.Request):
url = url_or_request.get_host()
else:
url = url_or_request
if is_unresponsive(url):
msg = '%s is known to be unresponsive, not trying again.' % urlparse(url).hostname
log.warning(msg)
raise urllib2.URLError(msg)
retries = kwargs.get('retries', 3)
timeout = kwargs.get('timeout', 15.0)
# get the old timeout for sockets, so we can set it back to that when done. This is NOT threadsafe by the way.
# In order to avoid requiring python 2.6, we're not using the urlopen timeout parameter. That really should be used
# after checking for python 2.6.
oldtimeout = socket.getdefaulttimeout()
try:
socket.setdefaulttimeout(timeout)
handlers = [SmartRedirectHandler()]
if urllib2._opener:
handlers.extend(urllib2._opener.handlers)
if kwargs.get('handlers'):
handlers.extend(kwargs['handlers'])
if len(handlers) > 1:
handler_names = [h.__class__.__name__ for h in handlers]
log.debug('Additional handlers have been specified for this urlopen: %s' % ', '.join(handler_names))
opener = urllib2.build_opener(*handlers).open
for i in range(retries): # retry getting the url up to 3 times.
if i > 0:
time.sleep(3)
try:
retrieved = opener(url_or_request, kwargs.get('data'))
except urllib2.HTTPError as e:
if e.code < 500:
# If it was not a server error, don't keep retrying.
log.warning('Could not retrieve url (HTTP %s error): %s' % (e.code, e.url))
raise
log.debug('HTTP error (try %i/%i): %s' % (i + 1, retries, e.code))
except (urllib2.URLError, socket.timeout) as e:
if hasattr(e, 'reason'):
reason = str(e.reason)
else:
reason = 'N/A'
if reason == 'timed out':
set_unresponsive(url)
log.debug('Failed to retrieve url (try %i/%i): %s' % (i + 1, retries, reason))
except httplib.IncompleteRead as e:
log.critical('Incomplete read - see python bug 6312')
break
else:
# make the returned instance usable in a with statement by adding __enter__ and __exit__ methods
def enter(self):
return self
def exit(self, exc_type, exc_val, exc_tb):
self.close()
retrieved.__class__.__enter__ = enter
retrieved.__class__.__exit__ = exit
return retrieved
log.warning('Could not retrieve url: %s' % url_or_request)
raise urllib2.URLError('Could not retrieve url after %s tries.' % retries)
finally:
socket.setdefaulttimeout(oldtimeout)
class ReList(list):
"""
A list that stores regexps.
You can add compiled or uncompiled regexps to the list.
It will always return the compiled version.
It will compile the text regexps on demand when first accessed.
"""
# Set the default flags
flags = re.IGNORECASE | re.UNICODE
def __init__(self, *args, **kwargs):
"""Optional :flags: keyword argument with regexp flags to compile with"""
if 'flags' in kwargs:
self.flags = kwargs['flags']
del kwargs['flags']
list.__init__(self, *args, **kwargs)
def __getitem__(self, k):
item = list.__getitem__(self, k)
if isinstance(item, basestring):
item = re.compile(item, re.IGNORECASE | re.UNICODE)
self[k] = item
return item
def __iter__(self):
for i in range(len(self)):
yield self[i]
# Determine the encoding for io
io_encoding = None
if hasattr(sys.stdout, 'encoding'):
io_encoding = sys.stdout.encoding
if not io_encoding:
try:
io_encoding = locale.getpreferredencoding()
except Exception:
pass
if not io_encoding:
# Default to utf8 if nothing can be determined
io_encoding = 'utf8'
else:
# Normalize the encoding
io_encoding = io_encoding.lower()
if io_encoding == 'cp65001':
io_encoding = 'utf8'
elif io_encoding in ['us-ascii', '646', 'ansi_x3.4-1968']:
io_encoding = 'ascii'
def console(text):
"""Print to console safely."""
if isinstance(text, str):
print(text)
return
print(unicode(text).encode(io_encoding, 'replace'))
def parse_timedelta(value):
"""Parse a string like '5 days' into a timedelta object. Also allows timedeltas to pass through."""
if isinstance(value, timedelta):
# Allow timedelta objects to pass through
return value
if not value:
# If no time is given, default to 0
return timedelta()
amount, unit = value.lower().split(' ')
# Make sure unit name is plural.
if not unit.endswith('s'):
unit += 's'
params = {unit: float(amount)}
try:
return timedelta(**params)
except TypeError:
raise ValueError('Invalid time format \'%s\'' % value)
def multiply_timedelta(interval, number):
"""timedeltas can not normally be multiplied by floating points. This does that."""
# Python 2.6 doesn't have total seconds
total_seconds = interval.seconds + interval.days * 24 * 3600
return timedelta(seconds=total_seconds * number)
if os.name == 'posix':
def pid_exists(pid):
"""Check whether pid exists in the current process table."""
import errno
if pid < 0:
return False
try:
os.kill(pid, 0)
except OSError as e:
return e.errno == errno.EPERM
else:
return True
else:
def pid_exists(pid):
import ctypes
import ctypes.wintypes
kernel32 = ctypes.windll.kernel32
PROCESS_QUERY_INFORMATION = 0x0400
STILL_ACTIVE = 259
handle = kernel32.OpenProcess(PROCESS_QUERY_INFORMATION, 0, pid)
if handle == 0:
return False
# If the process exited recently, a pid may still exist for the handle.
# So, check if we can get the exit code.
exit_code = ctypes.wintypes.DWORD()
is_running = kernel32.GetExitCodeProcess(handle, ctypes.byref(exit_code)) == 0
kernel32.CloseHandle(handle)
# See if we couldn't get the exit code or the exit code indicates that the
# process is still running.
return is_running or exit_code.value == STILL_ACTIVE
_binOps = {
ast.Add: operator.add,
ast.Sub: operator.sub,
ast.Mult: operator.mul,
ast.Div: operator.div,
ast.Mod: operator.mod
}
def arithmeticEval(s):
"""
A safe eval supporting basic arithmetic operations.
:param s: expression to evaluate
:return: value
"""
node = ast.parse(s, mode='eval')
def _eval(node):
if isinstance(node, ast.Expression):
return _eval(node.body)
elif isinstance(node, ast.Str):
return node.s
elif isinstance(node, ast.Num):
return node.n
elif isinstance(node, ast.BinOp):
return _binOps[type(node.op)](_eval(node.left), _eval(node.right))
else:
raise Exception('Unsupported type {}'.format(node))
return _eval(node.body)
class TimedDict(MutableMapping):
"""Acts like a normal dict, but keys will only remain in the dictionary for a specified time span."""
def __init__(self, cache_time='5 minutes'):
self.cache_time = parse_timedelta(cache_time)
self._store = dict()
def __getitem__(self, key):
add_time, value = self._store[key]
# Prune data and raise KeyError when expired
if add_time < datetime.now() - self.cache_time:
del self._store[key]
raise KeyError(key, 'cache time expired')
return value
def __setitem__(self, key, value):
self._store[key] = (datetime.now(), value)
def __delitem__(self, key):
del self._store[key]
def __iter__(self):
# Uses our getitem to skip expired items
return (key for key in self._store.keys() if key in self)
def __len__(self):
return len(list(self.__iter__()))
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, dict(zip(self._store, (v[1] for v in self._store.values()))))
class BufferQueue(Queue.Queue):
"""Used in place of a file-like object to capture text and access it safely from another thread."""
# Allow access to the Empty error from here
Empty = Queue.Empty
def write(self, line):
self.put(line)
def singleton(cls):
instances = {}
def getinstance(*args, **kwargs):
if cls not in instances:
instances[cls] = cls(*args, **kwargs)
return instances[cls]
return getinstance
|
{
"content_hash": "949a3c43b3f01ad57eda450fb45a238e",
"timestamp": "",
"source": "github",
"line_count": 459,
"max_line_length": 119,
"avg_line_length": 32.854030501089326,
"alnum_prop": 0.6031167108753316,
"repo_name": "ZefQ/Flexget",
"id": "4f86f2242f42c53c0b35dec36a051cb4f897af07",
"size": "15080",
"binary": false,
"copies": "8",
"ref": "refs/heads/develop",
"path": "flexget/utils/tools.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4371"
},
{
"name": "HTML",
"bytes": "16623"
},
{
"name": "JavaScript",
"bytes": "106719"
},
{
"name": "Python",
"bytes": "2190193"
}
],
"symlink_target": ""
}
|
import cv2, os
import numpy as np
from PIL import Image
# For face detection we will use the Haar Cascade provided by OpenCV.
cascadePath = "haarcascade_frontalface_default.xml"
faceCascade = cv2.CascadeClassifier(cascadePath)
def get_images_and_labels(path):
# Append all the absolute image paths in a list image_paths
# We will not read the image with the .sad extension in the training set
# Rather, we will use them to test our accuracy of the training
###image_paths = [os.path.join(path, f) for f in os.listdir(path) if not f.endswith('.sad')]
image_paths = [os.path.join(path, f) for f in os.listdir(path)]
# images will contains face images
images = []
# labels will contains the label that is assigned to the image
labels = []
for image_path in image_paths:
# Read the image and convert to grayscale
image_pil = Image.open(image_path).convert('L')
# Convert the image format into numpy array
image = np.array(image_pil, 'uint8')
# Get the label of the image
nbr = int(os.path.split(image_path)[1].split(".")[0].replace("subject", ""))
# Detect the face in the image
faces = faceCascade.detectMultiScale(image)
# If face is detected, append the face to images and the label to labels
for (x, y, w, h) in faces:
images.append(image[y: y + h, x: x + w])
labels.append(nbr)
#cv2.imshow("Adding faces to traning set...", image[y: y + h, x: x + w])
#cv2.waitKey(50)
# return the images list and labels list
return images, labels
def train_recognizer(path):
"""Trains a face recognizer on a dataset based on a path to a folder containing images"""
# For face recognition we will the the LBPH Face Recognizer
recognizer = cv2.face.createLBPHFaceRecognizer()
# Call the get_images_and_labels function and get the face images and the
# corresponding labels
images, labels = get_images_and_labels(path)
cv2.destroyAllWindows()
# Perform the tranining
recognizer.train(images, np.array(labels))
return recognizer
def recognize_face(recognizer, img):#path):
#predict_image_pil = Image.open(path).convert('L')
#predict_image = np.array(predict_image_pil, 'uint8')
predict_image = np.array(img)
face = faceCascade.detectMultiScale(predict_image)
nbr_predicted = -1
conf = -1
for (x, y, w, h) in face:
nbr_predicted, conf = recognizer.predict(predict_image[y: y + h, x: x + w])
return nbr_predicted, conf
#print(recognize_face(train_recognizer("./yalefaces"), "./yalefaces/subject01.sad"))
|
{
"content_hash": "ad694707f17f5132f976a7c6d5f3a863",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 97,
"avg_line_length": 43.52459016393443,
"alnum_prop": 0.6655367231638418,
"repo_name": "The-J-Person/Barfacecor",
"id": "bea21b64bc8bc344724375498e6b9ccacb3576e1",
"size": "2704",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "face_recognizer.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "13006"
}
],
"symlink_target": ""
}
|
"""
Master configuration file for Evennia.
NOTE: NO MODIFICATIONS SHOULD BE MADE TO THIS FILE!
All settings changes should be done by copy-pasting the variable and
its value to <gamedir>/conf/settings.py.
Hint: Don't copy&paste over more from this file than you actually want
to change. Anything you don't copy&paste will thus retain its default
value - which may change as Evennia is developed. This way you can
always be sure of what you have changed and what is default behaviour.
"""
from builtins import range
import os
import sys
######################################################################
# Evennia base server config
######################################################################
# This is the name of your game. Make it catchy!
SERVERNAME = "Evennia"
# Lockdown mode will cut off the game from any external connections
# and only allow connections from localhost. Requires a cold reboot.
LOCKDOWN_MODE = False
# Activate telnet service
TELNET_ENABLED = True
# A list of ports the Evennia telnet server listens on Can be one or many.
TELNET_PORTS = [4000]
# Interface addresses to listen to. If 0.0.0.0, listen to all. Use :: for IPv6.
TELNET_INTERFACES = ['0.0.0.0']
# OOB (out-of-band) telnet communication allows Evennia to communicate
# special commands and data with enabled Telnet clients. This is used
# to create custom client interfaces over a telnet connection. To make
# full use of OOB, you need to prepare functions to handle the data
# server-side (see INPUT_FUNC_MODULES). TELNET_ENABLED is required for this
# to work.
TELNET_OOB_ENABLED = False
# Start the evennia django+twisted webserver so you can
# browse the evennia website and the admin interface
# (Obs - further web configuration can be found below
# in the section 'Config for Django web features')
WEBSERVER_ENABLED = True
# This is a security setting protecting against host poisoning
# attacks. It defaults to allowing all. In production, make
# sure to change this to your actual host addresses/IPs.
ALLOWED_HOSTS = ["*"]
# The webserver sits behind a Portal proxy. This is a list
# of tuples (proxyport,serverport) used. The proxyports are what
# the Portal proxy presents to the world. The serverports are
# the internal ports the proxy uses to forward data to the Server-side
# webserver (these should not be publicly open)
WEBSERVER_PORTS = [(8000, 5001)]
# Interface addresses to listen to. If 0.0.0.0, listen to all. Use :: for IPv6.
WEBSERVER_INTERFACES = ['0.0.0.0']
# IP addresses that may talk to the server in a reverse proxy configuration,
# like NginX.
UPSTREAM_IPS = ['127.0.0.1']
# The webserver uses threadpool for handling requests. This will scale
# with server load. Set the minimum and maximum number of threads it
# may use as (min, max) (must be > 0)
WEBSERVER_THREADPOOL_LIMITS = (1, 20)
# Start the evennia webclient. This requires the webserver to be running and
# offers the fallback ajax-based webclient backbone for browsers not supporting
# the websocket one.
WEBCLIENT_ENABLED = True
# Activate Websocket support for modern browsers. If this is on, the
# default webclient will use this and only use the ajax version of the browser
# is too old to support websockets. Requires WEBCLIENT_ENABLED.
WEBSOCKET_CLIENT_ENABLED = True
# Server-side websocket port to open for the webclient.
WEBSOCKET_CLIENT_PORT = 8001
# Interface addresses to listen to. If 0.0.0.0, listen to all. Use :: for IPv6.
WEBSOCKET_CLIENT_INTERFACE = '0.0.0.0'
# Actual URL for webclient component to reach the websocket. You only need
# to set this if you know you need it, like using some sort of proxy setup.
# If given it must be on the form "ws://hostname" (WEBSOCKET_CLIENT_PORT will
# be automatically appended). If left at None, the client will itself
# figure out this url based on the server's hostname.
WEBSOCKET_CLIENT_URL = None
# Activate SSH protocol communication (SecureShell)
SSH_ENABLED = False
# Ports to use for SSH
SSH_PORTS = [8022]
# Interface addresses to listen to. If 0.0.0.0, listen to all. Use :: for IPv6.
SSH_INTERFACES = ['0.0.0.0']
# Activate SSL protocol (SecureSocketLibrary)
SSL_ENABLED = False
# Ports to use for SSL
SSL_PORTS = [4001]
# Interface addresses to listen to. If 0.0.0.0, listen to all. Use :: for IPv6.
SSL_INTERFACES = ['0.0.0.0']
# This determine's whether Evennia's custom admin page is used, or if the
# standard Django admin is used.
EVENNIA_ADMIN = True
# Path to the lib directory containing the bulk of the codebase's code.
EVENNIA_DIR = os.path.dirname(os.path.abspath(__file__))
# Path to the game directory (containing the server/conf/settings.py file)
# This is dynamically created- there is generally no need to change this!
if sys.argv[1] == 'test' if len(sys.argv)>1 else False:
# unittesting mode
GAME_DIR = os.getcwd()
else:
# Fallback location (will be replaced by the actual game dir at runtime)
GAME_DIR = os.path.join(EVENNIA_DIR, 'game_template')
for i in range(10):
gpath = os.getcwd()
if "server" in os.listdir(gpath):
if os.path.isfile(os.path.join("server", "conf", "settings.py")):
GAME_DIR = gpath
break
os.chdir(os.pardir)
# Place to put log files
LOG_DIR = os.path.join(GAME_DIR, 'server', 'logs')
SERVER_LOG_FILE = os.path.join(LOG_DIR, 'server.log')
PORTAL_LOG_FILE = os.path.join(LOG_DIR, 'portal.log')
HTTP_LOG_FILE = os.path.join(LOG_DIR, 'http_requests.log')
# if this is set to the empty string, lockwarnings will be turned off.
LOCKWARNING_LOG_FILE = os.path.join(LOG_DIR, 'lockwarnings.log')
# Rotate log files when server and/or portal stops. This will keep log
# file sizes down. Turn off to get ever growing log files and never
# loose log info.
CYCLE_LOGFILES = True
# Number of lines to append to rotating channel logs when they rotate
CHANNEL_LOG_NUM_TAIL_LINES = 20
# Max size of channel log files before they rotate
CHANNEL_LOG_ROTATE_SIZE = 1000000
# Local time zone for this installation. All choices can be found here:
# http://www.postgresql.org/docs/8.0/interactive/datetime-keywords.html#DATETIME-TIMEZONE-SET-TABLE
TIME_ZONE = 'UTC'
# Activate time zone in datetimes
USE_TZ = True
# Authentication backends. This is the code used to authenticate a user.
AUTHENTICATION_BACKENDS = (
'evennia.web.utils.backends.CaseInsensitiveModelBackend',)
# Language code for this installation. All choices can be found here:
# http://www.w3.org/TR/REC-html40/struct/dirlang.html#langcodes
LANGUAGE_CODE = 'en-us'
# How long time (in seconds) a user may idle before being logged
# out. This can be set as big as desired. A user may avoid being
# thrown off by sending the empty system command 'idle' to the server
# at regular intervals. Set <=0 to deactivate idle timeout completely.
IDLE_TIMEOUT = -1
# The idle command can be sent to keep your session active without actually
# having to spam normal commands regularly. It gives no feedback, only updates
# the idle timer. Note that "idle" will *always* work, even if a different
# command-name is given here; this is because the webclient needs a default
# to send to avoid proxy timeouts.
IDLE_COMMAND = "idle"
# The set of encodings tried. A Player object may set an attribute "encoding" on
# itself to match the client used. If not set, or wrong encoding is
# given, this list is tried, in order, aborting on the first match.
# Add sets for languages/regions your players are likely to use.
# (see http://en.wikipedia.org/wiki/Character_encoding)
ENCODINGS = ["utf-8", "latin-1", "ISO-8859-1"]
# Regular expression applied to all output to a given session in order
# to strip away characters (usually various forms of decorations) for the benefit
# of users with screen readers. Note that ANSI/MXP doesn't need to
# be stripped this way, that is handled automatically.
SCREENREADER_REGEX_STRIP = r"\+-+|\+$|\+~|--+|~~+|==+"
# The game server opens an AMP port so that the portal can
# communicate with it. This is an internal functionality of Evennia, usually
# operating between two processes on the same machine. You usually don't need to
# change this unless you cannot use the default AMP port/host for
# whatever reason.
AMP_HOST = 'localhost'
AMP_PORT = 5000
AMP_INTERFACE = '127.0.0.1'
# Database objects are cached in what is known as the idmapper. The idmapper
# caching results in a massive speedup of the server (since it dramatically
# limits the number of database accesses needed) and also allows for
# storing temporary data on objects. It is however also the main memory
# consumer of Evennia. With this setting the cache can be capped and
# flushed when it reaches a certain size. Minimum is 50 MB but it is
# not recommended to set this to less than 100 MB for a distribution
# system.
# Empirically, N_objects_in_cache ~ ((RMEM - 35) / 0.0157):
# mem(MB) | objs in cache || mem(MB) | objs in cache
# 50 | ~1000 || 800 | ~49 000
# 100 | ~4000 || 1200 | ~75 000
# 200 | ~10 000 || 1600 | ~100 000
# 500 | ~30 000 || 2000 | ~125 000
# Note that the estimated memory usage is not exact (and the cap is only
# checked every 5 minutes), so err on the side of caution if
# running on a server with limited memory. Also note that Python
# will not necessarily return the memory to the OS when the idmapper
# flashes (the memory will be freed and made available to the Python
# process only). How many objects need to be in memory at any given
# time depends very much on your game so some experimentation may
# be necessary (use @server to see how many objects are in the idmapper
# cache at any time). Setting this to None disables the cache cap.
IDMAPPER_CACHE_MAXSIZE = 200 # (MB)
# This determines how many connections per second the Portal should
# accept, as a DoS countermeasure. If the rate exceeds this number, incoming
# connections will be queued to this rate, so none will be lost.
# Must be set to a value > 0.
MAX_CONNECTION_RATE = 2
# Determine how many commands per second a given Session is allowed
# to send to the Portal via a connected protocol. Too high rate will
# drop the command and echo a warning. Note that this will also cap
# OOB messages so don't set it too low if you expect a lot of events
# from the client! To turn the limiter off, set to <= 0.
MAX_COMMAND_RATE = 80
# The warning to echo back to users if they send commands too fast
COMMAND_RATE_WARNING ="You entered commands too fast. Wait a moment and try again."
# Determine how large of a string can be sent to the server in number
# of characters. If they attempt to enter a string over this character
# limit, we stop them and send a message. To make unlimited, set to
# 0 or less.
MAX_CHAR_LIMIT = 6000
# The warning to echo back to users if they enter a very large string
MAX_CHAR_LIMIT_WARNING="You entered a string that was too long. Please break it up into multiple parts."
# If this is true, errors and tracebacks from the engine will be
# echoed as text in-game as well as to the log. This can speed up
# debugging. OBS: Showing full tracebacks to regular users could be a
# security problem -turn this off in a production game!
IN_GAME_ERRORS = True
######################################################################
# Evennia Database config
######################################################################
# Database config syntax:
# ENGINE - path to the the database backend. Possible choices are:
# 'django.db.backends.sqlite3', (default)
# 'django.db.backends.mysql',
# 'django.db.backends.postgresql_psycopg2',
# 'django.db.backends.oracle' (untested).
# NAME - database name, or path to the db file for sqlite3
# USER - db admin (unused in sqlite3)
# PASSWORD - db admin password (unused in sqlite3)
# HOST - empty string is localhost (unused in sqlite3)
# PORT - empty string defaults to localhost (unused in sqlite3)
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(GAME_DIR, 'server', 'evennia.db3'),
'USER': '',
'PASSWORD': '',
'HOST': '',
'PORT': ''
}}
# How long the django-database connection should be kept open, in seconds.
# If you get errors about the database having gone away after long idle
# periods, shorten this value (e.g. MySQL defaults to a timeout of 8 hrs)
CONN_MAX_AGE = 3600 * 7
######################################################################
# Evennia pluggable modules
######################################################################
# Plugin modules extend Evennia in various ways. In the cases with no
# existing default, there are examples of many of these modules
# in contrib/examples.
# The command parser module to use. See the default module for which
# functions it must implement
COMMAND_PARSER = "evennia.commands.cmdparser.cmdparser"
# On a multi-match when search objects or commands, the user has the
# ability to search again with an index marker that differentiates
# the results. If multiple "box" objects
# are found, they can by default be separated as 1-box, 2-box. Below you
# can change the regular expression used. The regex must have one
# have two capturing groups (?P<number>...) and (?P<name>...) - the default
# parser expects this. It should also involve a number starting from 1.
# When changing this you must also update SEARCH_MULTIMATCH_TEMPLATE
# to properly describe the syntax.
SEARCH_MULTIMATCH_REGEX = r"(?P<number>[0-9]+)-(?P<name>.*)"
# To display multimatch errors in various listings we must display
# the syntax in a way that matches what SEARCH_MULTIMATCH_REGEX understand.
# The template will be populated with data and expects the following markup:
# {number} - the order of the multimatch, starting from 1; {name} - the
# name (key) of the multimatched entity; {aliases} - eventual
# aliases for the entity; {info} - extra info like #dbrefs for staff. Don't
# forget a line break if you want one match per line.
SEARCH_MULTIMATCH_TEMPLATE = " {number}-{name}{aliases}{info}\n"
# The handler that outputs errors when using any API-level search
# (not manager methods). This function should correctly report errors
# both for command- and object-searches. This allows full control
# over the error output (it uses SEARCH_MULTIMATCH_TEMPLATE by default).
SEARCH_AT_RESULT = "evennia.utils.utils.at_search_result"
# The module holding text strings for the connection screen.
# This module should contain one or more variables
# with strings defining the look of the screen.
CONNECTION_SCREEN_MODULE = "server.conf.connection_screens"
# An optional module that, if existing, must hold a function
# named at_initial_setup(). This hook method can be used to customize
# the server's initial setup sequence (the very first startup of the system).
# The check will fail quietly if module doesn't exist or fails to load.
AT_INITIAL_SETUP_HOOK_MODULE = "server.conf.at_initial_setup"
# Module containing your custom at_server_start(), at_server_reload() and
# at_server_stop() methods. These methods will be called every time
# the server starts, reloads and resets/stops respectively.
AT_SERVER_STARTSTOP_MODULE = "server.conf.at_server_startstop"
# List of one or more module paths to modules containing a function start_
# plugin_services(application). This module will be called with the main
# Evennia Server application when the Server is initiated.
# It will be called last in the startup sequence.
SERVER_SERVICES_PLUGIN_MODULES = ["server.conf.server_services_plugins"]
# List of one or more module paths to modules containing a function
# start_plugin_services(application). This module will be called with the
# main Evennia Portal application when the Portal is initiated.
# It will be called last in the startup sequence.
PORTAL_SERVICES_PLUGIN_MODULES = ["server.conf.portal_services_plugins"]
# Module holding MSSP meta data. This is used by MUD-crawlers to determine
# what type of game you are running, how many players you have etc.
MSSP_META_MODULE = "server.conf.mssp"
# Module for web plugins.
WEB_PLUGINS_MODULE = "server.conf.web_plugins"
# Tuple of modules implementing lock functions. All callable functions
# inside these modules will be available as lock functions.
LOCK_FUNC_MODULES = ("evennia.locks.lockfuncs", "server.conf.lockfuncs",)
# Module holding handlers for managing incoming data from the client. These
# will be loaded in order, meaning functions in later modules may overload
# previous ones if having the same name.
INPUT_FUNC_MODULES = ["evennia.server.inputfuncs", "server.conf.inputfuncs"]
# Modules that contain prototypes for use with the spawner mechanism.
PROTOTYPE_MODULES = ["world.prototypes"]
# Module holding settings/actions for the dummyrunner program (see the
# dummyrunner for more information)
DUMMYRUNNER_SETTINGS_MODULE = "evennia.server.profiling.dummyrunner_settings"
# Mapping to extend Evennia's normal ANSI color tags. The mapping is a list of
# tuples mapping the tag to the ANSI convertion, like `("%c%r", ansi.ANSI_RED)`
# (the evennia.utils.ansi module contains all ANSI escape sequences). This is
# mainly supplied for support of legacy codebase tag formats.
COLOR_ANSI_EXTRA_MAP = []
######################################################################
# Default command sets
######################################################################
# Note that with the exception of the unloggedin set (which is not
# stored anywhere in the database), changing these paths will only affect
# NEW created characters/objects, not those already in play. So if you plan to
# change this, it's recommended you do it before having created a lot of objects
# (or simply reset the database after the change for simplicity).
# Command set used on session before player has logged in
CMDSET_UNLOGGEDIN = "commands.default_cmdsets.UnloggedinCmdSet"
# Command set used on the logged-in session
CMDSET_SESSION = "commands.default_cmdsets.SessionCmdSet"
# Default set for logged in player with characters (fallback)
CMDSET_CHARACTER = "commands.default_cmdsets.CharacterCmdSet"
# Command set for players without a character (ooc)
CMDSET_PLAYER = "commands.default_cmdsets.PlayerCmdSet"
# Location to search for cmdsets if full path not given
CMDSET_PATHS = ["commands", "evennia", "contribs"]
# Parent class for all default commands. Changing this class will
# modify all default commands, so do so carefully.
COMMAND_DEFAULT_CLASS = "evennia.commands.default.muxcommand.MuxCommand"
# Command.arg_regex is a regular expression desribing how the arguments
# to the command must be structured for the command to match a given user
# input. By default there is no restriction as long as the input string
# starts with the command name.
COMMAND_DEFAULT_ARG_REGEX = None
# By default, Command.msg will only send data to the Session calling
# the Command in the first place. If set, Command.msg will instead return
# data to all Sessions connected to the Player/Character associated with
# calling the Command. This may be more intuitive for users in certain
# multisession modes.
COMMAND_DEFAULT_MSG_ALL_SESSIONS = False
# The help category of a command if not otherwise specified.
COMMAND_DEFAULT_HELP_CATEGORY = "general"
# The default lockstring of a command.
COMMAND_DEFAULT_LOCKS = ""
# The Channel Handler will create a command to represent each channel,
# creating it with the key of the channel, its aliases, locks etc. The
# default class logs channel messages to a file and allows for /history.
# This setting allows to override the command class used with your own.
CHANNEL_COMMAND_CLASS = "evennia.comms.channelhandler.ChannelCommand"
######################################################################
# Typeclasses and other paths
######################################################################
# Server-side session class used.
SERVER_SESSION_CLASS = "evennia.server.serversession.ServerSession"
# These are paths that will be prefixed to the paths given if the
# immediately entered path fail to find a typeclass. It allows for
# shorter input strings. They must either base off the game directory
# or start from the evennia library.
TYPECLASS_PATHS = ["typeclasses", "evennia", "evennia.contrib", "evennia.contrib.tutorial_examples"]
# Typeclass for player objects (linked to a character) (fallback)
BASE_PLAYER_TYPECLASS = "typeclasses.players.Player"
# Typeclass and base for all objects (fallback)
BASE_OBJECT_TYPECLASS = "typeclasses.objects.Object"
# Typeclass for character objects linked to a player (fallback)
BASE_CHARACTER_TYPECLASS = "typeclasses.characters.Character"
# Typeclass for rooms (fallback)
BASE_ROOM_TYPECLASS = "typeclasses.rooms.Room"
# Typeclass for Exit objects (fallback).
BASE_EXIT_TYPECLASS = "typeclasses.exits.Exit"
# Typeclass for Channel (fallback).
BASE_CHANNEL_TYPECLASS = "typeclasses.channels.Channel"
# Typeclass for Scripts (fallback). You usually don't need to change this
# but create custom variations of scripts on a per-case basis instead.
BASE_SCRIPT_TYPECLASS = "typeclasses.scripts.Script"
# The default home location used for all objects. This is used as a
# fallback if an object's normal home location is deleted. Default
# is Limbo (#2).
DEFAULT_HOME = "#2"
# The start position for new characters. Default is Limbo (#2).
# MULTISESSION_MODE = 0, 1 - used by default unloggedin create command
# MULTISESSION_MODE = 2,3 - used by default character_create command
START_LOCATION = "#2"
# Lookups of Attributes, Tags, Nicks, Aliases can be aggressively
# cached to avoid repeated database hits. This often gives noticeable
# performance gains since they are called so often. Drawback is that
# if you are accessing the database from multiple processes (such as
# from a website -not- running Evennia's own webserver) data may go
# out of sync between the processes. Keep on unless you face such
# issues.
TYPECLASS_AGGRESSIVE_CACHE = True
######################################################################
# Batch processors
######################################################################
# Python path to a directory to be searched for batch scripts
# for the batch processors (.ev and/or .py files).
BASE_BATCHPROCESS_PATHS = ['world', 'evennia.contrib', 'evennia.contrib.tutorial_examples']
######################################################################
# Game Time setup
######################################################################
# You don't actually have to use this, but it affects the routines in
# evennia.utils.gametime.py and allows for a convenient measure to
# determine the current in-game time. You can of course interpret
# "week", "month" etc as your own in-game time units as desired.
# The time factor dictates if the game world runs faster (timefactor>1)
# or slower (timefactor<1) than the real world.
TIME_FACTOR = 2.0
# The starting point of your game time (the epoch), in seconds.
# In Python a value of 0 means Jan 1 1970 (use negatives for earlier
# start date). This will affect the returns from the utils.gametime
# module.
TIME_GAME_EPOCH = None
######################################################################
# Inlinefunc
######################################################################
# Evennia supports inline function preprocessing. This allows users
# to supply inline calls on the form $func(arg, arg, ...) to do
# session-aware text formatting and manipulation on the fly. If
# disabled, such inline functions will not be parsed.
INLINEFUNC_ENABLED = False
# Only functions defined globally (and not starting with '_') in
# these modules will be considered valid inlinefuncs. The list
# is loaded from left-to-right, same-named functions will overload
INLINEFUNC_MODULES = ["evennia.utils.inlinefuncs",
"server.conf.inlinefuncs"]
######################################################################
# Default Player setup and access
######################################################################
# Different Multisession modes allow a player (=account) to connect to the
# game simultaneously with multiple clients (=sessions). In modes 0,1 there is
# only one character created to the same name as the account at first login.
# In modes 2,3 no default character will be created and the MAX_NR_CHARACTERS
# value (below) defines how many characters the default char_create command
# allow per player.
# 0 - single session, one player, one character, when a new session is
# connected, the old one is disconnected
# 1 - multiple sessions, one player, one character, each session getting
# the same data
# 2 - multiple sessions, one player, many characters, one session per
# character (disconnects multiplets)
# 3 - like mode 2, except multiple sessions can puppet one character, each
# session getting the same data.
MULTISESSION_MODE = 0
# The maximum number of characters allowed for MULTISESSION_MODE 2,3. This is
# checked by the default ooc char-creation command. Forced to 1 for
# MULTISESSION_MODE 0 and 1.
MAX_NR_CHARACTERS = 1
# The access hierarchy, in climbing order. A higher permission in the
# hierarchy includes access of all levels below it. Used by the perm()/pperm()
# lock functions.
PERMISSION_HIERARCHY = ["Guests", # note-only used if GUEST_ENABLED=True
"Players",
"PlayerHelpers",
"Builders",
"Wizards",
"Immortals"]
# The default permission given to all new players
PERMISSION_PLAYER_DEFAULT = "Players"
# Default sizes for client window (in number of characters), if client
# is not supplying this on its own
CLIENT_DEFAULT_WIDTH = 78
CLIENT_DEFAULT_HEIGHT = 45 # telnet standard is 24 but does anyone use such
# low-res displays anymore?
######################################################################
# Guest accounts
######################################################################
# This enables guest logins, by default via "connect guest". Note that
# you need to edit your login screen to inform about this possibility.
GUEST_ENABLED = False
# Typeclass for guest player objects (linked to a character)
BASE_GUEST_TYPECLASS = "typeclasses.players.Guest"
# The permission given to guests
PERMISSION_GUEST_DEFAULT = "Guests"
# The default home location used for guests.
GUEST_HOME = DEFAULT_HOME
# The start position used for guest characters.
GUEST_START_LOCATION = START_LOCATION
# The naming convention used for creating new guest
# players/characters. The size of this list also determines how many
# guests may be on the game at once. The default is a maximum of nine
# guests, named Guest1 through Guest9.
GUEST_LIST = ["Guest" + str(s+1) for s in range(9)]
######################################################################
# In-game Channels created from server start
######################################################################
# This is a list of global channels created by the
# initialization script the first time Evennia starts.
# The superuser (user #1) will be automatically subscribed
# to all channels in this list. Each channel is described by
# a dictionary keyed with the same keys valid as arguments
# to the evennia.create.create_channel() function.
# Note: Evennia will treat the first channel in this list as
# the general "public" channel and the second as the
# general "mud info" channel. Other channels beyond that
# are up to the admin to design and call appropriately.
DEFAULT_CHANNELS = [
# public channel
{"key": "Public",
"aliases": ('ooc', 'pub'),
"desc": "Public discussion",
"locks": "control:perm(Wizards);listen:all();send:all()"},
# connection/mud info
{"key": "MudInfo",
"aliases": "",
"desc": "Connection log",
"locks": "control:perm(Immortals);listen:perm(Wizards);send:false()"}
]
# Extra optional channel for receiving connection messages ("<player> has (dis)connected").
# While the MudInfo channel will also receieve this, this channel is meant for non-staffers.
CHANNEL_CONNECTINFO = None
######################################################################
# External Channel connections
######################################################################
# Note: You do *not* have to make your MUD open to
# the public to use the external connections, they
# operate as long as you have an internet connection,
# just like stand-alone chat clients. IRC requires
# that you have twisted.words installed.
# Evennia can connect to external IRC channels and
# echo what is said on the channel to IRC and vice
# versa. Obs - make sure the IRC network allows bots.
# When enabled, command @irc2chan will be available in-game
IRC_ENABLED = False
# RSS allows to connect RSS feeds (from forum updates, blogs etc) to
# an in-game channel. The channel will be updated when the rss feed
# updates. Use @rss2chan in game to connect if this setting is
# active. OBS: RSS support requires the python-feedparser package to
# be installed (through package manager or from the website
# http://code.google.com/p/feedparser/)
RSS_ENABLED=False
RSS_UPDATE_INTERVAL = 60*10 # 10 minutes
######################################################################
# Django web features
######################################################################
# While DEBUG is False, show a regular server error page on the web
# stuff, email the traceback to the people in the ADMINS tuple
# below. If True, show a detailed traceback for the web
# browser to display. Note however that this will leak memory when
# active, so make sure to turn it off for a production server!
DEBUG = False
# While true, show "pretty" error messages for template syntax errors.
TEMPLATE_DEBUG = DEBUG
# Emails are sent to these people if the above DEBUG value is False. If you'd
# rather prefer nobody receives emails, leave this commented out or empty.
ADMINS = () #'Your Name', 'your_email@domain.com'),)
# These guys get broken link notifications when SEND_BROKEN_LINK_EMAILS is True.
MANAGERS = ADMINS
# Absolute path to the directory that holds file uploads from web apps.
# Example: "/home/media/media.lawrence.com"
MEDIA_ROOT = os.path.join(GAME_DIR, "web", "media")
# It's safe to dis-regard this, as it's a Django feature we only half use as a
# dependency, not actually what it's primarily meant for.
SITE_ID = 1
# The age for sessions.
# Default: 1209600 (2 weeks, in seconds)
SESSION_COOKIE_AGE = 1209600
# Session cookie domain
# Default: None
SESSION_COOKIE_DOMAIN = None
# The name of the cookie to use for sessions.
# Default: 'sessionid'
SESSION_COOKIE_NAME = 'sessionid'
# Should the session expire when the browser closes?
# Default: False
SESSION_EXPIRE_AT_BROWSER_CLOSE = False
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = False
# Where to find locales (no need to change this, most likely)
LOCALE_PATHS = [os.path.join(EVENNIA_DIR, "locale/")]
# This should be turned off unless you want to do tests with Django's
# development webserver (normally Evennia runs its own server)
SERVE_MEDIA = False
# The master urlconf file that contains all of the sub-branches to the
# applications. Change this to add your own URLs to the website.
ROOT_URLCONF = 'web.urls'
# Where users are redirected after logging in via contrib.auth.login.
LOGIN_REDIRECT_URL = '/'
# Where to redirect users when using the @login_required decorator.
LOGIN_URL = '/accounts/login'
# Where to redirect users who wish to logout.
LOGOUT_URL = '/accounts/login'
# URL that handles the media served from MEDIA_ROOT.
# Example: "http://media.lawrence.com"
MEDIA_URL = '/media/'
# URL prefix for admin media -- CSS, JavaScript and images. Make sure
# to use a trailing slash. Django1.4+ will look for admin files under
# STATIC_URL/admin.
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(GAME_DIR, "web", "static")
# Location of static data to overload the defaults from
# evennia/web/webclient and evennia/web/website's static/ dirs.
STATICFILES_DIRS = (
os.path.join(GAME_DIR, "web", "static_overrides"),)
# Patterns of files in the static directories. Used here to make sure that
# its readme file is preserved but unused.
STATICFILES_IGNORE_PATTERNS = ('README.md',)
# The name of the currently selected web template. This corresponds to the
# directory names shown in the templates directory.
WEBSITE_TEMPLATE = 'website'
WEBCLIENT_TEMPLATE = 'webclient'
# The default options used by the webclient
WEBCLIENT_OPTIONS = {
"gagprompt": True, # Gags prompt from the output window and keep them
# together with the input bar
"helppopup": True, # Shows help files in a new popup window
"notification_popup": False, # Shows notifications of new messages as
# popup windows
"notification_sound": False # Plays a sound for notifications of new
# messages
}
# We setup the location of the website template as well as the admin site.
TEMPLATES = [{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(GAME_DIR, "web", "template_overrides", WEBSITE_TEMPLATE),
os.path.join(GAME_DIR, "web", "template_overrides", WEBCLIENT_TEMPLATE),
os.path.join(GAME_DIR, "web", "template_overrides"),
os.path.join(EVENNIA_DIR, "web", "website", "templates", WEBSITE_TEMPLATE),
os.path.join(EVENNIA_DIR, "web", "website", "templates"),
os.path.join(EVENNIA_DIR, "web", "webclient", "templates", WEBCLIENT_TEMPLATE),
os.path.join(EVENNIA_DIR, "web", "webclient", "templates")],
'APP_DIRS': True,
'OPTIONS': {
"context_processors": [
'django.template.context_processors.i18n',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.media',
'django.template.context_processors.debug',
'evennia.web.utils.general_context.general_context']
}
}]
# MiddleWare are semi-transparent extensions to Django's functionality.
# see http://www.djangoproject.com/documentation/middleware/ for a more detailed
# explanation.
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.messages.middleware.MessageMiddleware', # 1.4?
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.admindocs.middleware.XViewMiddleware',
'django.contrib.flatpages.middleware.FlatpageFallbackMiddleware',)
######################################################################
# Evennia components
######################################################################
# Global and Evennia-specific apps. This ties everything together so we can
# refer to app models and perform DB syncs.
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.admin',
'django.contrib.admindocs',
'django.contrib.flatpages',
'django.contrib.sites',
'django.contrib.staticfiles',
'evennia.utils.idmapper',
'evennia.server',
'evennia.typeclasses',
'evennia.players',
'evennia.objects',
'evennia.comms',
'evennia.help',
'evennia.scripts',
'evennia.web.website',
'evennia.web.webclient')
# The user profile extends the User object with more functionality;
# This should usually not be changed.
AUTH_USER_MODEL = "players.PlayerDB"
# Use a custom test runner that just tests Evennia-specific apps.
TEST_RUNNER = 'evennia.server.tests.EvenniaTestSuiteRunner'
######################################################################
# Django extensions
######################################################################
# Django extesions are useful third-party tools that are not
# always included in the default django distro.
try:
import django_extensions
INSTALLED_APPS = INSTALLED_APPS + ('django_extensions',)
except ImportError:
# Django extensions are not installed in all distros.
pass
#######################################################################
# SECRET_KEY
#######################################################################
# This is the signing key for the cookies generated by Evennia's
# web interface.
#
# It is a fallback for the SECRET_KEY setting in settings.py, which
# is randomly seeded when settings.py is first created. If copying
# from here, make sure to change it!
SECRET_KEY = 'changeme!(*#&*($&*(#*(&SDFKJJKLS*(@#KJAS'
|
{
"content_hash": "8f2173b415f540e31c7c756e65720b53",
"timestamp": "",
"source": "github",
"line_count": 748,
"max_line_length": 104,
"avg_line_length": 49.643048128342244,
"alnum_prop": 0.6894945196994587,
"repo_name": "whitehorse-io/encarnia",
"id": "f94bf5d3ef35ed106ca919bb0ba33e0bc14338f6",
"size": "37133",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "evennia/evennia/settings_default.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "106"
},
{
"name": "C",
"bytes": "63966"
},
{
"name": "CSS",
"bytes": "87525"
},
{
"name": "Emacs Lisp",
"bytes": "2734"
},
{
"name": "GAP",
"bytes": "18122"
},
{
"name": "HTML",
"bytes": "91741"
},
{
"name": "JavaScript",
"bytes": "151335"
},
{
"name": "Objective-C",
"bytes": "1292"
},
{
"name": "Python",
"bytes": "24616242"
},
{
"name": "Shell",
"bytes": "8808"
}
],
"symlink_target": ""
}
|
import sys
import time
import random
import rsa
from botocore.utils import parse_to_aware_datetime
from botocore.signers import CloudFrontSigner
from awscli.arguments import CustomArgument
from awscli.customizations.utils import validate_mutually_exclusive_handler
from awscli.customizations.commands import BasicCommand
def register(event_handler):
event_handler.register('building-command-table.cloudfront', _add_sign)
# Provides a simpler --paths for ``aws cloudfront create-invalidation``
event_handler.register(
'building-argument-table.cloudfront.create-invalidation', _add_paths)
event_handler.register(
'operation-args-parsed.cloudfront.create-invalidation',
validate_mutually_exclusive_handler(['invalidation_batch'], ['paths']))
event_handler.register(
'operation-args-parsed.cloudfront.create-distribution',
validate_mutually_exclusive_handler(
['default_root_object', 'origin_domain_name'],
['distribution_config']))
event_handler.register(
'building-argument-table.cloudfront.create-distribution',
lambda argument_table, **kwargs: argument_table.__setitem__(
'origin-domain-name', OriginDomainName(argument_table)))
event_handler.register(
'building-argument-table.cloudfront.create-distribution',
lambda argument_table, **kwargs: argument_table.__setitem__(
'default-root-object', CreateDefaultRootObject(argument_table)))
context = {}
event_handler.register(
'top-level-args-parsed', context.update, unique_id='cloudfront')
event_handler.register(
'operation-args-parsed.cloudfront.update-distribution',
validate_mutually_exclusive_handler(
['default_root_object'], ['distribution_config']))
event_handler.register(
'building-argument-table.cloudfront.update-distribution',
lambda argument_table, **kwargs: argument_table.__setitem__(
'default-root-object', UpdateDefaultRootObject(
context=context, argument_table=argument_table)))
def unique_string(prefix='cli'):
return '%s-%s-%s' % (prefix, int(time.time()), random.randint(1, 1000000))
def _add_paths(argument_table, **kwargs):
argument_table['invalidation-batch'].required = False
argument_table['paths'] = PathsArgument()
class PathsArgument(CustomArgument):
def __init__(self):
doc = (
'The space-separated paths to be invalidated.'
' Note: --invalidation-batch and --paths are mututally exclusive.'
)
super(PathsArgument, self).__init__('paths', nargs='+', help_text=doc)
def add_to_params(self, parameters, value):
if value is not None:
parameters['InvalidationBatch'] = {
"CallerReference": unique_string(),
"Paths": {"Quantity": len(value), "Items": value},
}
class ExclusiveArgument(CustomArgument):
DOC = '%s This argument and --%s are mututally exclusive.'
def __init__(self, name, argument_table,
exclusive_to='distribution-config', help_text=''):
argument_table[exclusive_to].required = False
super(ExclusiveArgument, self).__init__(
name, help_text=self.DOC % (help_text, exclusive_to))
def distribution_config_template(self):
return {
"CallerReference": unique_string(),
"Origins": {"Quantity": 0, "Items": []},
"DefaultCacheBehavior": {
"TargetOriginId": "placeholder",
"ForwardedValues": {
"QueryString": False,
"Cookies": {"Forward": "none"},
},
"TrustedSigners": {
"Enabled": False,
"Quantity": 0
},
"ViewerProtocolPolicy": "allow-all",
"MinTTL": 0
},
"Enabled": True,
"Comment": "",
}
class OriginDomainName(ExclusiveArgument):
def __init__(self, argument_table):
super(OriginDomainName, self).__init__(
'origin-domain-name', argument_table,
help_text='The domain name for your origin.')
def add_to_params(self, parameters, value):
if value is None:
return
parameters.setdefault(
'DistributionConfig', self.distribution_config_template())
origin_id = unique_string(prefix=value)
item = {"Id": origin_id, "DomainName": value, "OriginPath": ''}
if item['DomainName'].endswith('.s3.amazonaws.com'):
# We do not need to detect '.s3[\w-].amazonaws.com' as S3 buckets,
# because CloudFront treats GovCloud S3 buckets as custom domain.
# http://docs.aws.amazon.com/govcloud-us/latest/UserGuide/setting-up-cloudfront.html
item["S3OriginConfig"] = {"OriginAccessIdentity": ""}
else:
item["CustomOriginConfig"] = {
'HTTPPort': 80, 'HTTPSPort': 443,
'OriginProtocolPolicy': 'http-only'}
parameters['DistributionConfig']['Origins'] = {
"Quantity": 1, "Items": [item]}
parameters['DistributionConfig']['DefaultCacheBehavior'][
'TargetOriginId'] = origin_id
class CreateDefaultRootObject(ExclusiveArgument):
def __init__(self, argument_table, help_text=''):
super(CreateDefaultRootObject, self).__init__(
'default-root-object', argument_table, help_text=help_text or (
'The object that you want CloudFront to return (for example, '
'index.html) when a viewer request points to your root URL.'))
def add_to_params(self, parameters, value):
if value is not None:
parameters.setdefault(
'DistributionConfig', self.distribution_config_template())
parameters['DistributionConfig']['DefaultRootObject'] = value
class UpdateDefaultRootObject(CreateDefaultRootObject):
def __init__(self, context, argument_table):
super(UpdateDefaultRootObject, self).__init__(
argument_table, help_text=(
'The object that you want CloudFront to return (for example, '
'index.html) when a viewer request points to your root URL. '
'CLI will automatically make a get-distribution-config call '
'to load and preserve your other settings.'))
self.context = context
def add_to_params(self, parameters, value):
if value is not None:
client = self.context['session'].create_client(
'cloudfront',
region_name=self.context['parsed_args'].region,
endpoint_url=self.context['parsed_args'].endpoint_url,
verify=self.context['parsed_args'].verify_ssl)
response = client.get_distribution_config(Id=parameters['Id'])
parameters['IfMatch'] = response['ETag']
parameters['DistributionConfig'] = response['DistributionConfig']
parameters['DistributionConfig']['DefaultRootObject'] = value
def _add_sign(command_table, session, **kwargs):
command_table['sign'] = SignCommand(session)
class SignCommand(BasicCommand):
NAME = 'sign'
DESCRIPTION = 'Sign a given url.'
DATE_FORMAT = """Supported formats include:
YYYY-MM-DD (which means 0AM UTC of that day),
YYYY-MM-DDThh:mm:ss (with default timezone as UTC),
YYYY-MM-DDThh:mm:ss+hh:mm or YYYY-MM-DDThh:mm:ss-hh:mm (with offset),
or EpochTime (which always means UTC).
Do NOT use YYYYMMDD, because it will be treated as EpochTime."""
ARG_TABLE = [
{
'name': 'url',
'no_paramfile': True, # To disable the default paramfile behavior
'required': True,
'help_text': 'The URL to be signed',
},
{
'name': 'key-pair-id',
'required': True,
'help_text': (
"The active CloudFront key pair Id for the key pair "
"that you're using to generate the signature."),
},
{
'name': 'private-key',
'required': True,
'help_text': 'file://path/to/your/private-key.pem',
},
{
'name': 'date-less-than', 'required': True,
'help_text':
'The expiration date and time for the URL. ' + DATE_FORMAT,
},
{
'name': 'date-greater-than',
'help_text':
'An optional start date and time for the URL. ' + DATE_FORMAT,
},
{
'name': 'ip-address',
'help_text': (
'An optional IP address or IP address range to allow client '
'making the GET request from. Format: x.x.x.x/x or x.x.x.x'),
},
]
def _run_main(self, args, parsed_globals):
signer = CloudFrontSigner(
args.key_pair_id, RSASigner(args.private_key).sign)
date_less_than = parse_to_aware_datetime(args.date_less_than)
date_greater_than = args.date_greater_than
if date_greater_than is not None:
date_greater_than = parse_to_aware_datetime(date_greater_than)
if date_greater_than is not None or args.ip_address is not None:
policy = signer.build_policy(
args.url, date_less_than, date_greater_than=date_greater_than,
ip_address=args.ip_address)
sys.stdout.write(signer.generate_presigned_url(
args.url, policy=policy))
else:
sys.stdout.write(signer.generate_presigned_url(
args.url, date_less_than=date_less_than))
return 0
class RSASigner(object):
def __init__(self, private_key):
self.priv_key = rsa.PrivateKey.load_pkcs1(private_key.encode('utf8'))
def sign(self, message):
return rsa.sign(message, self.priv_key, 'SHA-1')
|
{
"content_hash": "e0391659232b4779f76beab9ac6eca04",
"timestamp": "",
"source": "github",
"line_count": 248,
"max_line_length": 96,
"avg_line_length": 40.38306451612903,
"alnum_prop": 0.6003994008986521,
"repo_name": "mnahm5/django-estore",
"id": "968a1c65a3b8a915db81cf0090878954993f49a6",
"size": "10580",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Lib/site-packages/awscli/customizations/cloudfront.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ApacheConf",
"bytes": "1351"
},
{
"name": "Batchfile",
"bytes": "2695"
},
{
"name": "C",
"bytes": "460931"
},
{
"name": "C++",
"bytes": "26115"
},
{
"name": "CSS",
"bytes": "144496"
},
{
"name": "HTML",
"bytes": "155544"
},
{
"name": "JavaScript",
"bytes": "206799"
},
{
"name": "PowerShell",
"bytes": "8175"
},
{
"name": "Python",
"bytes": "24837167"
},
{
"name": "Shell",
"bytes": "4408"
},
{
"name": "Tcl",
"bytes": "1237789"
},
{
"name": "TeX",
"bytes": "1527"
}
],
"symlink_target": ""
}
|
import sys
import os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
#
testinfo = "s, t 0.5, s, t 1, s, q"
tags = "SkewVertical"
import pyglet
import cocos
from cocos.director import director
from cocos.actions import *
from cocos.layer import *
class BackgroundLayer(cocos.layer.Layer):
def __init__(self):
super(BackgroundLayer, self).__init__()
self.img = pyglet.resource.image('background_image.png')
def draw( self ):
glColor4ub(255, 255, 255, 255)
glPushMatrix()
self.transform()
self.img.blit(0,0)
glPopMatrix()
def main():
director.init( resizable=True )
main_scene = cocos.scene.Scene()
main_scene.add( BackgroundLayer(), z=0 )
e = SkewVertical( duration=1 )
main_scene.do( e )
director.run( main_scene )
if __name__ == '__main__':
main()
|
{
"content_hash": "f4515ef11183a7b36b6c963e0f5ec7c6",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 65,
"avg_line_length": 23.23076923076923,
"alnum_prop": 0.5960264900662252,
"repo_name": "shadowmint/nwidget",
"id": "8489af344303c869da9faf57df24f8321667a738",
"size": "980",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/cocos2d-0.5.5/test/test_skew_vertical.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "11298"
},
{
"name": "JavaScript",
"bytes": "17394"
},
{
"name": "PHP",
"bytes": "2190"
},
{
"name": "Python",
"bytes": "9815941"
},
{
"name": "Shell",
"bytes": "10521"
}
],
"symlink_target": ""
}
|
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'restless'
copyright = u'2014, Daniel Lindsley'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '2.0.2'
# The full version, including alpha/beta/rc tags.
release = '2.0.2-dev'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'restlessdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'restless.tex', u'restless Documentation',
u'Daniel Lindsley', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'restless', u'restless Documentation',
[u'Daniel Lindsley'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'restless', u'restless Documentation',
u'Daniel Lindsley', 'restless', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Blerg. The autodocs for the Django module freak out if this isn't done.
from django.conf import settings
settings.configure()
|
{
"content_hash": "e02f57ad6848922b3fae89097f281538",
"timestamp": "",
"source": "github",
"line_count": 251,
"max_line_length": 79,
"avg_line_length": 31.49402390438247,
"alnum_prop": 0.7065148640101202,
"repo_name": "pobear/restless",
"id": "a511f70fdf66483d907774655bb49e346409b6c1",
"size": "8326",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "docs/conf.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "99768"
}
],
"symlink_target": ""
}
|
from swgpy.object import *
def create(kernel):
result = Building()
result.template = "object/building/player/city/shared_cityhall_naboo.iff"
result.attribute_template_id = -1
result.stfName("building_name","cityhall")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
{
"content_hash": "bb22cfbdeef16c59e7eb19f92101a1d1",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 74,
"avg_line_length": 23.615384615384617,
"alnum_prop": 0.7003257328990228,
"repo_name": "obi-two/Rebelion",
"id": "92c720b375e8a7d542a6c098d34273b2e126c2f3",
"size": "452",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "data/scripts/templates/object/building/player/city/shared_cityhall_naboo.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11818"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2293610"
},
{
"name": "CMake",
"bytes": "39727"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7499185"
},
{
"name": "SQLPL",
"bytes": "41864"
}
],
"symlink_target": ""
}
|
"""
ORCID Member
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: Latest
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from orcid_api_v3.models.created_date_v20 import CreatedDateV20 # noqa: F401,E501
from orcid_api_v3.models.last_modified_date_v20 import LastModifiedDateV20 # noqa: F401,E501
from orcid_api_v3.models.source_v20 import SourceV20 # noqa: F401,E501
from orcid_api_v3.models.url_v20 import UrlV20 # noqa: F401,E501
class PersonExternalIdentifierV20(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'created_date': 'CreatedDateV20',
'last_modified_date': 'LastModifiedDateV20',
'source': 'SourceV20',
'external_id_type': 'str',
'external_id_value': 'str',
'external_id_url': 'UrlV20',
'external_id_relationship': 'str',
'visibility': 'str',
'path': 'str',
'put_code': 'int',
'display_index': 'int'
}
attribute_map = {
'created_date': 'created-date',
'last_modified_date': 'last-modified-date',
'source': 'source',
'external_id_type': 'external-id-type',
'external_id_value': 'external-id-value',
'external_id_url': 'external-id-url',
'external_id_relationship': 'external-id-relationship',
'visibility': 'visibility',
'path': 'path',
'put_code': 'put-code',
'display_index': 'display-index'
}
def __init__(self, created_date=None, last_modified_date=None, source=None, external_id_type=None, external_id_value=None, external_id_url=None, external_id_relationship=None, visibility=None, path=None, put_code=None, display_index=None): # noqa: E501
"""PersonExternalIdentifierV20 - a model defined in Swagger""" # noqa: E501
self._created_date = None
self._last_modified_date = None
self._source = None
self._external_id_type = None
self._external_id_value = None
self._external_id_url = None
self._external_id_relationship = None
self._visibility = None
self._path = None
self._put_code = None
self._display_index = None
self.discriminator = None
if created_date is not None:
self.created_date = created_date
if last_modified_date is not None:
self.last_modified_date = last_modified_date
if source is not None:
self.source = source
self.external_id_type = external_id_type
self.external_id_value = external_id_value
if external_id_url is not None:
self.external_id_url = external_id_url
if external_id_relationship is not None:
self.external_id_relationship = external_id_relationship
if visibility is not None:
self.visibility = visibility
if path is not None:
self.path = path
if put_code is not None:
self.put_code = put_code
if display_index is not None:
self.display_index = display_index
@property
def created_date(self):
"""Gets the created_date of this PersonExternalIdentifierV20. # noqa: E501
:return: The created_date of this PersonExternalIdentifierV20. # noqa: E501
:rtype: CreatedDateV20
"""
return self._created_date
@created_date.setter
def created_date(self, created_date):
"""Sets the created_date of this PersonExternalIdentifierV20.
:param created_date: The created_date of this PersonExternalIdentifierV20. # noqa: E501
:type: CreatedDateV20
"""
self._created_date = created_date
@property
def last_modified_date(self):
"""Gets the last_modified_date of this PersonExternalIdentifierV20. # noqa: E501
:return: The last_modified_date of this PersonExternalIdentifierV20. # noqa: E501
:rtype: LastModifiedDateV20
"""
return self._last_modified_date
@last_modified_date.setter
def last_modified_date(self, last_modified_date):
"""Sets the last_modified_date of this PersonExternalIdentifierV20.
:param last_modified_date: The last_modified_date of this PersonExternalIdentifierV20. # noqa: E501
:type: LastModifiedDateV20
"""
self._last_modified_date = last_modified_date
@property
def source(self):
"""Gets the source of this PersonExternalIdentifierV20. # noqa: E501
:return: The source of this PersonExternalIdentifierV20. # noqa: E501
:rtype: SourceV20
"""
return self._source
@source.setter
def source(self, source):
"""Sets the source of this PersonExternalIdentifierV20.
:param source: The source of this PersonExternalIdentifierV20. # noqa: E501
:type: SourceV20
"""
self._source = source
@property
def external_id_type(self):
"""Gets the external_id_type of this PersonExternalIdentifierV20. # noqa: E501
:return: The external_id_type of this PersonExternalIdentifierV20. # noqa: E501
:rtype: str
"""
return self._external_id_type
@external_id_type.setter
def external_id_type(self, external_id_type):
"""Sets the external_id_type of this PersonExternalIdentifierV20.
:param external_id_type: The external_id_type of this PersonExternalIdentifierV20. # noqa: E501
:type: str
"""
if external_id_type is None:
raise ValueError("Invalid value for `external_id_type`, must not be `None`") # noqa: E501
self._external_id_type = external_id_type
@property
def external_id_value(self):
"""Gets the external_id_value of this PersonExternalIdentifierV20. # noqa: E501
:return: The external_id_value of this PersonExternalIdentifierV20. # noqa: E501
:rtype: str
"""
return self._external_id_value
@external_id_value.setter
def external_id_value(self, external_id_value):
"""Sets the external_id_value of this PersonExternalIdentifierV20.
:param external_id_value: The external_id_value of this PersonExternalIdentifierV20. # noqa: E501
:type: str
"""
if external_id_value is None:
raise ValueError("Invalid value for `external_id_value`, must not be `None`") # noqa: E501
self._external_id_value = external_id_value
@property
def external_id_url(self):
"""Gets the external_id_url of this PersonExternalIdentifierV20. # noqa: E501
:return: The external_id_url of this PersonExternalIdentifierV20. # noqa: E501
:rtype: UrlV20
"""
return self._external_id_url
@external_id_url.setter
def external_id_url(self, external_id_url):
"""Sets the external_id_url of this PersonExternalIdentifierV20.
:param external_id_url: The external_id_url of this PersonExternalIdentifierV20. # noqa: E501
:type: UrlV20
"""
self._external_id_url = external_id_url
@property
def external_id_relationship(self):
"""Gets the external_id_relationship of this PersonExternalIdentifierV20. # noqa: E501
:return: The external_id_relationship of this PersonExternalIdentifierV20. # noqa: E501
:rtype: str
"""
return self._external_id_relationship
@external_id_relationship.setter
def external_id_relationship(self, external_id_relationship):
"""Sets the external_id_relationship of this PersonExternalIdentifierV20.
:param external_id_relationship: The external_id_relationship of this PersonExternalIdentifierV20. # noqa: E501
:type: str
"""
allowed_values = ["PART_OF", "SELF"] # noqa: E501
if external_id_relationship not in allowed_values:
raise ValueError(
"Invalid value for `external_id_relationship` ({0}), must be one of {1}" # noqa: E501
.format(external_id_relationship, allowed_values)
)
self._external_id_relationship = external_id_relationship
@property
def visibility(self):
"""Gets the visibility of this PersonExternalIdentifierV20. # noqa: E501
:return: The visibility of this PersonExternalIdentifierV20. # noqa: E501
:rtype: str
"""
return self._visibility
@visibility.setter
def visibility(self, visibility):
"""Sets the visibility of this PersonExternalIdentifierV20.
:param visibility: The visibility of this PersonExternalIdentifierV20. # noqa: E501
:type: str
"""
allowed_values = ["LIMITED", "REGISTERED_ONLY", "PUBLIC", "PRIVATE"] # noqa: E501
if visibility not in allowed_values:
raise ValueError(
"Invalid value for `visibility` ({0}), must be one of {1}" # noqa: E501
.format(visibility, allowed_values)
)
self._visibility = visibility
@property
def path(self):
"""Gets the path of this PersonExternalIdentifierV20. # noqa: E501
:return: The path of this PersonExternalIdentifierV20. # noqa: E501
:rtype: str
"""
return self._path
@path.setter
def path(self, path):
"""Sets the path of this PersonExternalIdentifierV20.
:param path: The path of this PersonExternalIdentifierV20. # noqa: E501
:type: str
"""
self._path = path
@property
def put_code(self):
"""Gets the put_code of this PersonExternalIdentifierV20. # noqa: E501
:return: The put_code of this PersonExternalIdentifierV20. # noqa: E501
:rtype: int
"""
return self._put_code
@put_code.setter
def put_code(self, put_code):
"""Sets the put_code of this PersonExternalIdentifierV20.
:param put_code: The put_code of this PersonExternalIdentifierV20. # noqa: E501
:type: int
"""
self._put_code = put_code
@property
def display_index(self):
"""Gets the display_index of this PersonExternalIdentifierV20. # noqa: E501
:return: The display_index of this PersonExternalIdentifierV20. # noqa: E501
:rtype: int
"""
return self._display_index
@display_index.setter
def display_index(self, display_index):
"""Sets the display_index of this PersonExternalIdentifierV20.
:param display_index: The display_index of this PersonExternalIdentifierV20. # noqa: E501
:type: int
"""
self._display_index = display_index
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(PersonExternalIdentifierV20, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, PersonExternalIdentifierV20):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
{
"content_hash": "91709735775d220eb33a2afaff6385fd",
"timestamp": "",
"source": "github",
"line_count": 387,
"max_line_length": 257,
"avg_line_length": 32.91472868217054,
"alnum_prop": 0.613911132045847,
"repo_name": "Royal-Society-of-New-Zealand/NZ-ORCID-Hub",
"id": "2dc452c9aca9814b06395f9b36da5e4cc21b8fd2",
"size": "12755",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "orcid_api_v3/models/person_external_identifier_v20.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "20266"
},
{
"name": "Dockerfile",
"bytes": "3303"
},
{
"name": "HTML",
"bytes": "239338"
},
{
"name": "JavaScript",
"bytes": "2240"
},
{
"name": "Makefile",
"bytes": "600"
},
{
"name": "PLpgSQL",
"bytes": "2581"
},
{
"name": "Python",
"bytes": "7935510"
},
{
"name": "Shell",
"bytes": "12088"
}
],
"symlink_target": ""
}
|
NEVER_TIMEOUT = None # for django caches setting None as a timeout value means the cache never times out.
FIVE_MIN_TIMEOUT = 60 * 5
STORAGE_USAGE_KEY = 'storage_usage:{target_id}'
|
{
"content_hash": "fdf68050bc708bf042fe71d2378341d0",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 106,
"avg_line_length": 45.5,
"alnum_prop": 0.7417582417582418,
"repo_name": "mattclark/osf.io",
"id": "489c232d96e806c514c8c03f68fef73c15515699",
"size": "182",
"binary": false,
"copies": "3",
"ref": "refs/heads/develop",
"path": "api/caching/settings.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "92773"
},
{
"name": "Dockerfile",
"bytes": "8456"
},
{
"name": "HTML",
"bytes": "317371"
},
{
"name": "JavaScript",
"bytes": "1792241"
},
{
"name": "Mako",
"bytes": "654772"
},
{
"name": "Python",
"bytes": "10166997"
},
{
"name": "VCL",
"bytes": "13885"
}
],
"symlink_target": ""
}
|
from __future__ import division, absolute_import, print_function
import warnings
import sys
import collections
import operator
import numpy as np
import numpy.core.numeric as _nx
from numpy.core import linspace, atleast_1d, atleast_2d
from numpy.core.numeric import (
ones, zeros, arange, concatenate, array, asarray, asanyarray, empty,
empty_like, ndarray, around, floor, ceil, take, dot, where, intp,
integer, isscalar
)
from numpy.core.umath import (
pi, multiply, add, arctan2, frompyfunc, cos, less_equal, sqrt, sin,
mod, exp, log10
)
from numpy.core.fromnumeric import (
ravel, nonzero, sort, partition, mean, any, sum
)
from numpy.core.numerictypes import typecodes, number
from numpy.lib.twodim_base import diag
from .utils import deprecate
from numpy.core.multiarray import _insert, add_docstring
from numpy.core.multiarray import digitize, bincount, interp as compiled_interp
from numpy.core.umath import _add_newdoc_ufunc as add_newdoc_ufunc
from numpy.compat import long
from numpy.compat.py3k import basestring
# Force range to be a generator, for np.delete's usage.
if sys.version_info[0] < 3:
range = xrange
__all__ = [
'select', 'piecewise', 'trim_zeros', 'copy', 'iterable', 'percentile',
'diff', 'gradient', 'angle', 'unwrap', 'sort_complex', 'disp',
'extract', 'place', 'vectorize', 'asarray_chkfinite', 'average',
'histogram', 'histogramdd', 'bincount', 'digitize', 'cov', 'corrcoef',
'msort', 'median', 'sinc', 'hamming', 'hanning', 'bartlett',
'blackman', 'kaiser', 'trapz', 'i0', 'add_newdoc', 'add_docstring',
'meshgrid', 'delete', 'insert', 'append', 'interp', 'add_newdoc_ufunc'
]
def iterable(y):
"""
Check whether or not an object can be iterated over.
Parameters
----------
y : object
Input object.
Returns
-------
b : {0, 1}
Return 1 if the object has an iterator method or is a sequence,
and 0 otherwise.
Examples
--------
>>> np.iterable([1, 2, 3])
1
>>> np.iterable(2)
0
"""
try:
iter(y)
except:
return 0
return 1
def _hist_optim_numbins_estimator(a, estimator):
"""
A helper function to be called from histogram to deal with estimating optimal number of bins
estimator: str
If estimator is one of ['auto', 'fd', 'scott', 'rice', 'sturges'] this function
will choose the appropriate estimator and return it's estimate for the optimal
number of bins.
"""
assert isinstance(estimator, basestring)
# private function should not be called otherwise
if a.size == 0:
return 1
def sturges(x):
"""
Sturges Estimator
A very simplistic estimator based on the assumption of normality of the data
Poor performance for non-normal data, especially obvious for large X.
Depends only on size of the data.
"""
return np.ceil(np.log2(x.size)) + 1
def rice(x):
"""
Rice Estimator
Another simple estimator, with no normality assumption.
It has better performance for large data, but tends to overestimate number of bins.
The number of bins is proportional to the cube root of data size (asymptotically optimal)
Depends only on size of the data
"""
return np.ceil(2 * x.size ** (1.0 / 3))
def scott(x):
"""
Scott Estimator
The binwidth is proportional to the standard deviation of the data and
inversely proportional to the cube root of data size (asymptotically optimal)
"""
h = 3.5 * x.std() * x.size ** (-1.0 / 3)
if h > 0:
return np.ceil(x.ptp() / h)
return 1
def fd(x):
"""
Freedman Diaconis rule using interquartile range (IQR) for binwidth
Considered a variation of the Scott rule with more robustness as the IQR
is less affected by outliers than the standard deviation. However the IQR depends on
fewer points than the sd so it is less accurate, especially for long tailed distributions.
If the IQR is 0, we return 1 for the number of bins.
Binwidth is inversely proportional to the cube root of data size (asymptotically optimal)
"""
iqr = np.subtract(*np.percentile(x, [75, 25]))
if iqr > 0:
h = (2 * iqr * x.size ** (-1.0 / 3))
return np.ceil(x.ptp() / h)
# If iqr is 0, default number of bins is 1
return 1
def auto(x):
"""
The FD estimator is usually the most robust method, but it tends to be too small
for small X. The Sturges estimator is quite good for small (<1000) datasets and is
the default in R.
This method gives good off the shelf behaviour.
"""
return max(fd(x), sturges(x))
optimal_numbins_methods = {'sturges': sturges, 'rice': rice, 'scott': scott,
'fd': fd, 'auto': auto}
try:
estimator_func = optimal_numbins_methods[estimator.lower()]
except KeyError:
raise ValueError("{0} not a valid method for `bins`".format(estimator))
else:
# these methods return floats, np.histogram requires an int
return int(estimator_func(a))
def histogram(a, bins=10, range=None, normed=False, weights=None,
density=None):
"""
Compute the histogram of a set of data.
Parameters
----------
a : array_like
Input data. The histogram is computed over the flattened array.
bins : int or sequence of scalars or str, optional
If `bins` is an int, it defines the number of equal-width
bins in the given range (10, by default). If `bins` is a sequence,
it defines the bin edges, including the rightmost edge, allowing
for non-uniform bin widths.
.. versionadded:: 1.11.0
If `bins` is a string from the list below, `histogram` will use the method
chosen to calculate the optimal number of bins (see Notes for more detail
on the estimators). For visualisation, we suggest using the 'auto' option.
'auto'
Maximum of the 'sturges' and 'fd' estimators. Provides good all round performance
'fd' (Freedman Diaconis Estimator)
Robust (resilient to outliers) estimator that takes into account data
variability and data size .
'scott'
Less robust estimator that that takes into account data
variability and data size.
'rice'
Estimator does not take variability into account, only data size.
Commonly overestimates number of bins required.
'sturges'
R's default method, only accounts for data size. Only optimal for
gaussian data and underestimates number of bins for large non-gaussian datasets.
range : (float, float), optional
The lower and upper range of the bins. If not provided, range
is simply ``(a.min(), a.max())``. Values outside the range are
ignored.
normed : bool, optional
This keyword is deprecated in Numpy 1.6 due to confusing/buggy
behavior. It will be removed in Numpy 2.0. Use the density keyword
instead.
If False, the result will contain the number of samples
in each bin. If True, the result is the value of the
probability *density* function at the bin, normalized such that
the *integral* over the range is 1. Note that this latter behavior is
known to be buggy with unequal bin widths; use `density` instead.
weights : array_like, optional
An array of weights, of the same shape as `a`. Each value in `a`
only contributes its associated weight towards the bin count
(instead of 1). If `normed` is True, the weights are normalized,
so that the integral of the density over the range remains 1
density : bool, optional
If False, the result will contain the number of samples
in each bin. If True, the result is the value of the
probability *density* function at the bin, normalized such that
the *integral* over the range is 1. Note that the sum of the
histogram values will not be equal to 1 unless bins of unity
width are chosen; it is not a probability *mass* function.
Overrides the `normed` keyword if given.
Returns
-------
hist : array
The values of the histogram. See `normed` and `weights` for a
description of the possible semantics.
bin_edges : array of dtype float
Return the bin edges ``(length(hist)+1)``.
See Also
--------
histogramdd, bincount, searchsorted, digitize
Notes
-----
All but the last (righthand-most) bin is half-open. In other words, if
`bins` is::
[1, 2, 3, 4]
then the first bin is ``[1, 2)`` (including 1, but excluding 2) and the
second ``[2, 3)``. The last bin, however, is ``[3, 4]``, which *includes*
4.
.. versionadded:: 1.11.0
The methods to estimate the optimal number of bins are well found in literature,
and are inspired by the choices R provides for histogram visualisation.
Note that having the number of bins proportional to :math:`n^{1/3}` is asymptotically optimal,
which is why it appears in most estimators.
These are simply plug-in methods that give good starting points for number of bins.
In the equations below, :math:`h` is the binwidth and :math:`n_h` is the number of bins
'Auto' (maximum of the 'Sturges' and 'FD' estimators)
A compromise to get a good value. For small datasets the sturges
value will usually be chosen, while larger datasets will usually default to FD.
Avoids the overly conservative behaviour of FD and Sturges for small and
large datasets respectively. Switchover point is usually x.size~1000.
'FD' (Freedman Diaconis Estimator)
.. math:: h = 2 \\frac{IQR}{n^{1/3}}
The binwidth is proportional to the interquartile range (IQR)
and inversely proportional to cube root of a.size. Can be too
conservative for small datasets, but is quite good
for large datasets. The IQR is very robust to outliers.
'Scott'
.. math:: h = \\frac{3.5\\sigma}{n^{1/3}}
The binwidth is proportional to the standard deviation (sd) of the data
and inversely proportional to cube root of a.size. Can be too
conservative for small datasets, but is quite good
for large datasets. The sd is not very robust to outliers. Values
are very similar to the Freedman Diaconis Estimator in the absence of outliers.
'Rice'
.. math:: n_h = \\left\\lceil 2n^{1/3} \\right\\rceil
The number of bins is only proportional to cube root of a.size.
It tends to overestimate the number of bins
and it does not take into account data variability.
'Sturges'
.. math:: n_h = \\left\\lceil \\log _{2}n+1 \\right\\rceil
The number of bins is the base2 log of a.size.
This estimator assumes normality of data and is too conservative for larger,
non-normal datasets. This is the default method in R's `hist` method.
Examples
--------
>>> np.histogram([1, 2, 1], bins=[0, 1, 2, 3])
(array([0, 2, 1]), array([0, 1, 2, 3]))
>>> np.histogram(np.arange(4), bins=np.arange(5), density=True)
(array([ 0.25, 0.25, 0.25, 0.25]), array([0, 1, 2, 3, 4]))
>>> np.histogram([[1, 2, 1], [1, 0, 1]], bins=[0,1,2,3])
(array([1, 4, 1]), array([0, 1, 2, 3]))
>>> a = np.arange(5)
>>> hist, bin_edges = np.histogram(a, density=True)
>>> hist
array([ 0.5, 0. , 0.5, 0. , 0. , 0.5, 0. , 0.5, 0. , 0.5])
>>> hist.sum()
2.4999999999999996
>>> np.sum(hist*np.diff(bin_edges))
1.0
.. versionadded:: 1.11.0
Automated Bin Selection Methods example, using 2 peak random data with 2000 points
>>> import matplotlib.pyplot as plt
>>> rng = np.random.RandomState(10) # deterministic random data
>>> a = np.hstack((rng.normal(size = 1000), rng.normal(loc = 5, scale = 2, size = 1000)))
>>> plt.hist(a, bins = 'auto') # plt.hist passes it's arguments to np.histogram
>>> plt.title("Histogram with 'auto' bins")
>>> plt.show()
"""
a = asarray(a)
if weights is not None:
weights = asarray(weights)
if np.any(weights.shape != a.shape):
raise ValueError(
'weights should have the same shape as a.')
weights = weights.ravel()
a = a.ravel()
if (range is not None):
mn, mx = range
if (mn > mx):
raise ValueError(
'max must be larger than min in range parameter.')
if not np.all(np.isfinite([mn, mx])):
raise ValueError(
'range parameter must be finite.')
if isinstance(bins, basestring):
bins = _hist_optim_numbins_estimator(a, bins)
# if `bins` is a string for an automatic method,
# this will replace it with the number of bins calculated
# Histogram is an integer or a float array depending on the weights.
if weights is None:
ntype = np.dtype(np.intp)
else:
ntype = weights.dtype
# We set a block size, as this allows us to iterate over chunks when
# computing histograms, to minimize memory usage.
BLOCK = 65536
if not iterable(bins):
if np.isscalar(bins) and bins < 1:
raise ValueError(
'`bins` should be a positive integer.')
if range is None:
if a.size == 0:
# handle empty arrays. Can't determine range, so use 0-1.
range = (0, 1)
else:
range = (a.min(), a.max())
mn, mx = [mi + 0.0 for mi in range]
if mn == mx:
mn -= 0.5
mx += 0.5
# At this point, if the weights are not integer, floating point, or
# complex, we have to use the slow algorithm.
if weights is not None and not (np.can_cast(weights.dtype, np.double) or
np.can_cast(weights.dtype, np.complex)):
bins = linspace(mn, mx, bins + 1, endpoint=True)
if not iterable(bins):
# We now convert values of a to bin indices, under the assumption of
# equal bin widths (which is valid here).
# Initialize empty histogram
n = np.zeros(bins, ntype)
# Pre-compute histogram scaling factor
norm = bins / (mx - mn)
# We iterate over blocks here for two reasons: the first is that for
# large arrays, it is actually faster (for example for a 10^8 array it
# is 2x as fast) and it results in a memory footprint 3x lower in the
# limit of large arrays.
for i in arange(0, len(a), BLOCK):
tmp_a = a[i:i+BLOCK]
if weights is None:
tmp_w = None
else:
tmp_w = weights[i:i + BLOCK]
# Only include values in the right range
keep = (tmp_a >= mn)
keep &= (tmp_a <= mx)
if not np.logical_and.reduce(keep):
tmp_a = tmp_a[keep]
if tmp_w is not None:
tmp_w = tmp_w[keep]
tmp_a = tmp_a.astype(float)
tmp_a -= mn
tmp_a *= norm
# Compute the bin indices, and for values that lie exactly on mx we
# need to subtract one
indices = tmp_a.astype(np.intp)
indices[indices == bins] -= 1
# We now compute the histogram using bincount
if ntype.kind == 'c':
n.real += np.bincount(indices, weights=tmp_w.real, minlength=bins)
n.imag += np.bincount(indices, weights=tmp_w.imag, minlength=bins)
else:
n += np.bincount(indices, weights=tmp_w, minlength=bins).astype(ntype)
# We now compute the bin edges since these are returned
bins = linspace(mn, mx, bins + 1, endpoint=True)
else:
bins = asarray(bins)
if (np.diff(bins) < 0).any():
raise ValueError(
'bins must increase monotonically.')
# Initialize empty histogram
n = np.zeros(bins.shape, ntype)
if weights is None:
for i in arange(0, len(a), BLOCK):
sa = sort(a[i:i+BLOCK])
n += np.r_[sa.searchsorted(bins[:-1], 'left'),
sa.searchsorted(bins[-1], 'right')]
else:
zero = array(0, dtype=ntype)
for i in arange(0, len(a), BLOCK):
tmp_a = a[i:i+BLOCK]
tmp_w = weights[i:i+BLOCK]
sorting_index = np.argsort(tmp_a)
sa = tmp_a[sorting_index]
sw = tmp_w[sorting_index]
cw = np.concatenate(([zero, ], sw.cumsum()))
bin_index = np.r_[sa.searchsorted(bins[:-1], 'left'),
sa.searchsorted(bins[-1], 'right')]
n += cw[bin_index]
n = np.diff(n)
if density is not None:
if density:
db = array(np.diff(bins), float)
return n/db/n.sum(), bins
else:
return n, bins
else:
# deprecated, buggy behavior. Remove for Numpy 2.0
if normed:
db = array(np.diff(bins), float)
return n/(n*db).sum(), bins
else:
return n, bins
def histogramdd(sample, bins=10, range=None, normed=False, weights=None):
"""
Compute the multidimensional histogram of some data.
Parameters
----------
sample : array_like
The data to be histogrammed. It must be an (N,D) array or data
that can be converted to such. The rows of the resulting array
are the coordinates of points in a D dimensional polytope.
bins : sequence or int, optional
The bin specification:
* A sequence of arrays describing the bin edges along each dimension.
* The number of bins for each dimension (nx, ny, ... =bins)
* The number of bins for all dimensions (nx=ny=...=bins).
range : sequence, optional
A sequence of lower and upper bin edges to be used if the edges are
not given explicitly in `bins`. Defaults to the minimum and maximum
values along each dimension.
normed : bool, optional
If False, returns the number of samples in each bin. If True,
returns the bin density ``bin_count / sample_count / bin_volume``.
weights : (N,) array_like, optional
An array of values `w_i` weighing each sample `(x_i, y_i, z_i, ...)`.
Weights are normalized to 1 if normed is True. If normed is False,
the values of the returned histogram are equal to the sum of the
weights belonging to the samples falling into each bin.
Returns
-------
H : ndarray
The multidimensional histogram of sample x. See normed and weights
for the different possible semantics.
edges : list
A list of D arrays describing the bin edges for each dimension.
See Also
--------
histogram: 1-D histogram
histogram2d: 2-D histogram
Examples
--------
>>> r = np.random.randn(100,3)
>>> H, edges = np.histogramdd(r, bins = (5, 8, 4))
>>> H.shape, edges[0].size, edges[1].size, edges[2].size
((5, 8, 4), 6, 9, 5)
"""
try:
# Sample is an ND-array.
N, D = sample.shape
except (AttributeError, ValueError):
# Sample is a sequence of 1D arrays.
sample = atleast_2d(sample).T
N, D = sample.shape
nbin = empty(D, int)
edges = D*[None]
dedges = D*[None]
if weights is not None:
weights = asarray(weights)
try:
M = len(bins)
if M != D:
raise ValueError(
'The dimension of bins must be equal to the dimension of the '
' sample x.')
except TypeError:
# bins is an integer
bins = D*[bins]
# Select range for each dimension
# Used only if number of bins is given.
if range is None:
# Handle empty input. Range can't be determined in that case, use 0-1.
if N == 0:
smin = zeros(D)
smax = ones(D)
else:
smin = atleast_1d(array(sample.min(0), float))
smax = atleast_1d(array(sample.max(0), float))
else:
if not np.all(np.isfinite(range)):
raise ValueError(
'range parameter must be finite.')
smin = zeros(D)
smax = zeros(D)
for i in arange(D):
smin[i], smax[i] = range[i]
# Make sure the bins have a finite width.
for i in arange(len(smin)):
if smin[i] == smax[i]:
smin[i] = smin[i] - .5
smax[i] = smax[i] + .5
# avoid rounding issues for comparisons when dealing with inexact types
if np.issubdtype(sample.dtype, np.inexact):
edge_dt = sample.dtype
else:
edge_dt = float
# Create edge arrays
for i in arange(D):
if isscalar(bins[i]):
if bins[i] < 1:
raise ValueError(
"Element at index %s in `bins` should be a positive "
"integer." % i)
nbin[i] = bins[i] + 2 # +2 for outlier bins
edges[i] = linspace(smin[i], smax[i], nbin[i]-1, dtype=edge_dt)
else:
edges[i] = asarray(bins[i], edge_dt)
nbin[i] = len(edges[i]) + 1 # +1 for outlier bins
dedges[i] = diff(edges[i])
if np.any(np.asarray(dedges[i]) <= 0):
raise ValueError(
"Found bin edge of size <= 0. Did you specify `bins` with"
"non-monotonic sequence?")
nbin = asarray(nbin)
# Handle empty input.
if N == 0:
return np.zeros(nbin-2), edges
# Compute the bin number each sample falls into.
Ncount = {}
for i in arange(D):
Ncount[i] = digitize(sample[:, i], edges[i])
# Using digitize, values that fall on an edge are put in the right bin.
# For the rightmost bin, we want values equal to the right edge to be
# counted in the last bin, and not as an outlier.
for i in arange(D):
# Rounding precision
mindiff = dedges[i].min()
if not np.isinf(mindiff):
decimal = int(-log10(mindiff)) + 6
# Find which points are on the rightmost edge.
not_smaller_than_edge = (sample[:, i] >= edges[i][-1])
on_edge = (around(sample[:, i], decimal) ==
around(edges[i][-1], decimal))
# Shift these points one bin to the left.
Ncount[i][where(on_edge & not_smaller_than_edge)[0]] -= 1
# Flattened histogram matrix (1D)
# Reshape is used so that overlarge arrays
# will raise an error.
hist = zeros(nbin, float).reshape(-1)
# Compute the sample indices in the flattened histogram matrix.
ni = nbin.argsort()
xy = zeros(N, int)
for i in arange(0, D-1):
xy += Ncount[ni[i]] * nbin[ni[i+1:]].prod()
xy += Ncount[ni[-1]]
# Compute the number of repetitions in xy and assign it to the
# flattened histmat.
if len(xy) == 0:
return zeros(nbin-2, int), edges
flatcount = bincount(xy, weights)
a = arange(len(flatcount))
hist[a] = flatcount
# Shape into a proper matrix
hist = hist.reshape(sort(nbin))
for i in arange(nbin.size):
j = ni.argsort()[i]
hist = hist.swapaxes(i, j)
ni[i], ni[j] = ni[j], ni[i]
# Remove outliers (indices 0 and -1 for each dimension).
core = D*[slice(1, -1)]
hist = hist[core]
# Normalize if normed is True
if normed:
s = hist.sum()
for i in arange(D):
shape = ones(D, int)
shape[i] = nbin[i] - 2
hist = hist / dedges[i].reshape(shape)
hist /= s
if (hist.shape != nbin - 2).any():
raise RuntimeError(
"Internal Shape Error")
return hist, edges
def average(a, axis=None, weights=None, returned=False):
"""
Compute the weighted average along the specified axis.
Parameters
----------
a : array_like
Array containing data to be averaged. If `a` is not an array, a
conversion is attempted.
axis : int, optional
Axis along which to average `a`. If `None`, averaging is done over
the flattened array.
weights : array_like, optional
An array of weights associated with the values in `a`. Each value in
`a` contributes to the average according to its associated weight.
The weights array can either be 1-D (in which case its length must be
the size of `a` along the given axis) or of the same shape as `a`.
If `weights=None`, then all data in `a` are assumed to have a
weight equal to one.
returned : bool, optional
Default is `False`. If `True`, the tuple (`average`, `sum_of_weights`)
is returned, otherwise only the average is returned.
If `weights=None`, `sum_of_weights` is equivalent to the number of
elements over which the average is taken.
Returns
-------
average, [sum_of_weights] : array_type or double
Return the average along the specified axis. When returned is `True`,
return a tuple with the average as the first element and the sum
of the weights as the second element. The return type is `Float`
if `a` is of integer type, otherwise it is of the same type as `a`.
`sum_of_weights` is of the same type as `average`.
Raises
------
ZeroDivisionError
When all weights along axis are zero. See `numpy.ma.average` for a
version robust to this type of error.
TypeError
When the length of 1D `weights` is not the same as the shape of `a`
along axis.
See Also
--------
mean
ma.average : average for masked arrays -- useful if your data contains
"missing" values
Examples
--------
>>> data = range(1,5)
>>> data
[1, 2, 3, 4]
>>> np.average(data)
2.5
>>> np.average(range(1,11), weights=range(10,0,-1))
4.0
>>> data = np.arange(6).reshape((3,2))
>>> data
array([[0, 1],
[2, 3],
[4, 5]])
>>> np.average(data, axis=1, weights=[1./4, 3./4])
array([ 0.75, 2.75, 4.75])
>>> np.average(data, weights=[1./4, 3./4])
Traceback (most recent call last):
...
TypeError: Axis must be specified when shapes of a and weights differ.
"""
if not isinstance(a, np.matrix):
a = np.asarray(a)
if weights is None:
avg = a.mean(axis)
scl = avg.dtype.type(a.size/avg.size)
else:
a = a + 0.0
wgt = np.asarray(weights)
# Sanity checks
if a.shape != wgt.shape:
if axis is None:
raise TypeError(
"Axis must be specified when shapes of a and weights "
"differ.")
if wgt.ndim != 1:
raise TypeError(
"1D weights expected when shapes of a and weights differ.")
if wgt.shape[0] != a.shape[axis]:
raise ValueError(
"Length of weights not compatible with specified axis.")
# setup wgt to broadcast along axis
wgt = np.array(wgt, copy=0, ndmin=a.ndim).swapaxes(-1, axis)
scl = wgt.sum(axis=axis, dtype=np.result_type(a.dtype, wgt.dtype))
if (scl == 0.0).any():
raise ZeroDivisionError(
"Weights sum to zero, can't be normalized")
avg = np.multiply(a, wgt).sum(axis)/scl
if returned:
scl = np.multiply(avg, 0) + scl
return avg, scl
else:
return avg
def asarray_chkfinite(a, dtype=None, order=None):
"""Convert the input to an array, checking for NaNs or Infs.
Parameters
----------
a : array_like
Input data, in any form that can be converted to an array. This
includes lists, lists of tuples, tuples, tuples of tuples, tuples
of lists and ndarrays. Success requires no NaNs or Infs.
dtype : data-type, optional
By default, the data-type is inferred from the input data.
order : {'C', 'F'}, optional
Whether to use row-major (C-style) or
column-major (Fortran-style) memory representation.
Defaults to 'C'.
Returns
-------
out : ndarray
Array interpretation of `a`. No copy is performed if the input
is already an ndarray. If `a` is a subclass of ndarray, a base
class ndarray is returned.
Raises
------
ValueError
Raises ValueError if `a` contains NaN (Not a Number) or Inf (Infinity).
See Also
--------
asarray : Create and array.
asanyarray : Similar function which passes through subclasses.
ascontiguousarray : Convert input to a contiguous array.
asfarray : Convert input to a floating point ndarray.
asfortranarray : Convert input to an ndarray with column-major
memory order.
fromiter : Create an array from an iterator.
fromfunction : Construct an array by executing a function on grid
positions.
Examples
--------
Convert a list into an array. If all elements are finite
``asarray_chkfinite`` is identical to ``asarray``.
>>> a = [1, 2]
>>> np.asarray_chkfinite(a, dtype=float)
array([1., 2.])
Raises ValueError if array_like contains Nans or Infs.
>>> a = [1, 2, np.inf]
>>> try:
... np.asarray_chkfinite(a)
... except ValueError:
... print('ValueError')
...
ValueError
"""
a = asarray(a, dtype=dtype, order=order)
if a.dtype.char in typecodes['AllFloat'] and not np.isfinite(a).all():
raise ValueError(
"array must not contain infs or NaNs")
return a
def piecewise(x, condlist, funclist, *args, **kw):
"""
Evaluate a piecewise-defined function.
Given a set of conditions and corresponding functions, evaluate each
function on the input data wherever its condition is true.
Parameters
----------
x : ndarray
The input domain.
condlist : list of bool arrays
Each boolean array corresponds to a function in `funclist`. Wherever
`condlist[i]` is True, `funclist[i](x)` is used as the output value.
Each boolean array in `condlist` selects a piece of `x`,
and should therefore be of the same shape as `x`.
The length of `condlist` must correspond to that of `funclist`.
If one extra function is given, i.e. if
``len(funclist) - len(condlist) == 1``, then that extra function
is the default value, used wherever all conditions are false.
funclist : list of callables, f(x,*args,**kw), or scalars
Each function is evaluated over `x` wherever its corresponding
condition is True. It should take an array as input and give an array
or a scalar value as output. If, instead of a callable,
a scalar is provided then a constant function (``lambda x: scalar``) is
assumed.
args : tuple, optional
Any further arguments given to `piecewise` are passed to the functions
upon execution, i.e., if called ``piecewise(..., ..., 1, 'a')``, then
each function is called as ``f(x, 1, 'a')``.
kw : dict, optional
Keyword arguments used in calling `piecewise` are passed to the
functions upon execution, i.e., if called
``piecewise(..., ..., lambda=1)``, then each function is called as
``f(x, lambda=1)``.
Returns
-------
out : ndarray
The output is the same shape and type as x and is found by
calling the functions in `funclist` on the appropriate portions of `x`,
as defined by the boolean arrays in `condlist`. Portions not covered
by any condition have a default value of 0.
See Also
--------
choose, select, where
Notes
-----
This is similar to choose or select, except that functions are
evaluated on elements of `x` that satisfy the corresponding condition from
`condlist`.
The result is::
|--
|funclist[0](x[condlist[0]])
out = |funclist[1](x[condlist[1]])
|...
|funclist[n2](x[condlist[n2]])
|--
Examples
--------
Define the sigma function, which is -1 for ``x < 0`` and +1 for ``x >= 0``.
>>> x = np.linspace(-2.5, 2.5, 6)
>>> np.piecewise(x, [x < 0, x >= 0], [-1, 1])
array([-1., -1., -1., 1., 1., 1.])
Define the absolute value, which is ``-x`` for ``x <0`` and ``x`` for
``x >= 0``.
>>> np.piecewise(x, [x < 0, x >= 0], [lambda x: -x, lambda x: x])
array([ 2.5, 1.5, 0.5, 0.5, 1.5, 2.5])
"""
x = asanyarray(x)
n2 = len(funclist)
if (isscalar(condlist) or not (isinstance(condlist[0], list) or
isinstance(condlist[0], ndarray))):
condlist = [condlist]
condlist = array(condlist, dtype=bool)
n = len(condlist)
# This is a hack to work around problems with NumPy's
# handling of 0-d arrays and boolean indexing with
# numpy.bool_ scalars
zerod = False
if x.ndim == 0:
x = x[None]
zerod = True
if condlist.shape[-1] != 1:
condlist = condlist.T
if n == n2 - 1: # compute the "otherwise" condition.
totlist = np.logical_or.reduce(condlist, axis=0)
# Only able to stack vertically if the array is 1d or less
if x.ndim <= 1:
condlist = np.vstack([condlist, ~totlist])
else:
condlist = [asarray(c, dtype=bool) for c in condlist]
totlist = condlist[0]
for k in range(1, n):
totlist |= condlist[k]
condlist.append(~totlist)
n += 1
y = zeros(x.shape, x.dtype)
for k in range(n):
item = funclist[k]
if not isinstance(item, collections.Callable):
y[condlist[k]] = item
else:
vals = x[condlist[k]]
if vals.size > 0:
y[condlist[k]] = item(vals, *args, **kw)
if zerod:
y = y.squeeze()
return y
def select(condlist, choicelist, default=0):
"""
Return an array drawn from elements in choicelist, depending on conditions.
Parameters
----------
condlist : list of bool ndarrays
The list of conditions which determine from which array in `choicelist`
the output elements are taken. When multiple conditions are satisfied,
the first one encountered in `condlist` is used.
choicelist : list of ndarrays
The list of arrays from which the output elements are taken. It has
to be of the same length as `condlist`.
default : scalar, optional
The element inserted in `output` when all conditions evaluate to False.
Returns
-------
output : ndarray
The output at position m is the m-th element of the array in
`choicelist` where the m-th element of the corresponding array in
`condlist` is True.
See Also
--------
where : Return elements from one of two arrays depending on condition.
take, choose, compress, diag, diagonal
Examples
--------
>>> x = np.arange(10)
>>> condlist = [x<3, x>5]
>>> choicelist = [x, x**2]
>>> np.select(condlist, choicelist)
array([ 0, 1, 2, 0, 0, 0, 36, 49, 64, 81])
"""
# Check the size of condlist and choicelist are the same, or abort.
if len(condlist) != len(choicelist):
raise ValueError(
'list of cases must be same length as list of conditions')
# Now that the dtype is known, handle the deprecated select([], []) case
if len(condlist) == 0:
# 2014-02-24, 1.9
warnings.warn("select with an empty condition list is not possible"
"and will be deprecated",
DeprecationWarning)
return np.asarray(default)[()]
choicelist = [np.asarray(choice) for choice in choicelist]
choicelist.append(np.asarray(default))
# need to get the result type before broadcasting for correct scalar
# behaviour
dtype = np.result_type(*choicelist)
# Convert conditions to arrays and broadcast conditions and choices
# as the shape is needed for the result. Doing it separately optimizes
# for example when all choices are scalars.
condlist = np.broadcast_arrays(*condlist)
choicelist = np.broadcast_arrays(*choicelist)
# If cond array is not an ndarray in boolean format or scalar bool, abort.
deprecated_ints = False
for i in range(len(condlist)):
cond = condlist[i]
if cond.dtype.type is not np.bool_:
if np.issubdtype(cond.dtype, np.integer):
# A previous implementation accepted int ndarrays accidentally.
# Supported here deliberately, but deprecated.
condlist[i] = condlist[i].astype(bool)
deprecated_ints = True
else:
raise ValueError(
'invalid entry in choicelist: should be boolean ndarray')
if deprecated_ints:
# 2014-02-24, 1.9
msg = "select condlists containing integer ndarrays is deprecated " \
"and will be removed in the future. Use `.astype(bool)` to " \
"convert to bools."
warnings.warn(msg, DeprecationWarning)
if choicelist[0].ndim == 0:
# This may be common, so avoid the call.
result_shape = condlist[0].shape
else:
result_shape = np.broadcast_arrays(condlist[0], choicelist[0])[0].shape
result = np.full(result_shape, choicelist[-1], dtype)
# Use np.copyto to burn each choicelist array onto result, using the
# corresponding condlist as a boolean mask. This is done in reverse
# order since the first choice should take precedence.
choicelist = choicelist[-2::-1]
condlist = condlist[::-1]
for choice, cond in zip(choicelist, condlist):
np.copyto(result, choice, where=cond)
return result
def copy(a, order='K'):
"""
Return an array copy of the given object.
Parameters
----------
a : array_like
Input data.
order : {'C', 'F', 'A', 'K'}, optional
Controls the memory layout of the copy. 'C' means C-order,
'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous,
'C' otherwise. 'K' means match the layout of `a` as closely
as possible. (Note that this function and :meth:ndarray.copy are very
similar, but have different default values for their order=
arguments.)
Returns
-------
arr : ndarray
Array interpretation of `a`.
Notes
-----
This is equivalent to
>>> np.array(a, copy=True) #doctest: +SKIP
Examples
--------
Create an array x, with a reference y and a copy z:
>>> x = np.array([1, 2, 3])
>>> y = x
>>> z = np.copy(x)
Note that, when we modify x, y changes, but not z:
>>> x[0] = 10
>>> x[0] == y[0]
True
>>> x[0] == z[0]
False
"""
return array(a, order=order, copy=True)
# Basic operations
def gradient(f, *varargs, **kwargs):
"""
Return the gradient of an N-dimensional array.
The gradient is computed using second order accurate central differences
in the interior and either first differences or second order accurate
one-sides (forward or backwards) differences at the boundaries. The
returned gradient hence has the same shape as the input array.
Parameters
----------
f : array_like
An N-dimensional array containing samples of a scalar function.
varargs : scalar or list of scalar, optional
N scalars specifying the sample distances for each dimension,
i.e. `dx`, `dy`, `dz`, ... Default distance: 1.
single scalar specifies sample distance for all dimensions.
if `axis` is given, the number of varargs must equal the number of axes.
edge_order : {1, 2}, optional
Gradient is calculated using N\ :sup:`th` order accurate differences
at the boundaries. Default: 1.
.. versionadded:: 1.9.1
axis : None or int or tuple of ints, optional
Gradient is calculated only along the given axis or axes
The default (axis = None) is to calculate the gradient for all the axes of the input array.
axis may be negative, in which case it counts from the last to the first axis.
.. versionadded:: 1.11.0
Returns
-------
gradient : list of ndarray
Each element of `list` has the same shape as `f` giving the derivative
of `f` with respect to each dimension.
Examples
--------
>>> x = np.array([1, 2, 4, 7, 11, 16], dtype=np.float)
>>> np.gradient(x)
array([ 1. , 1.5, 2.5, 3.5, 4.5, 5. ])
>>> np.gradient(x, 2)
array([ 0.5 , 0.75, 1.25, 1.75, 2.25, 2.5 ])
For two dimensional arrays, the return will be two arrays ordered by
axis. In this example the first array stands for the gradient in
rows and the second one in columns direction:
>>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]], dtype=np.float))
[array([[ 2., 2., -1.],
[ 2., 2., -1.]]), array([[ 1. , 2.5, 4. ],
[ 1. , 1. , 1. ]])]
>>> x = np.array([0, 1, 2, 3, 4])
>>> dx = np.gradient(x)
>>> y = x**2
>>> np.gradient(y, dx, edge_order=2)
array([-0., 2., 4., 6., 8.])
The axis keyword can be used to specify a subset of axes of which the gradient is calculated
>>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]], dtype=np.float), axis=0)
array([[ 2., 2., -1.],
[ 2., 2., -1.]])
"""
f = np.asanyarray(f)
N = len(f.shape) # number of dimensions
axes = kwargs.pop('axis', None)
if axes is None:
axes = tuple(range(N))
# check axes to have correct type and no duplicate entries
if isinstance(axes, int):
axes = (axes,)
if not isinstance(axes, tuple):
raise TypeError("A tuple of integers or a single integer is required")
# normalize axis values:
axes = tuple(x + N if x < 0 else x for x in axes)
if max(axes) >= N or min(axes) < 0:
raise ValueError("'axis' entry is out of bounds")
if len(set(axes)) != len(axes):
raise ValueError("duplicate value in 'axis'")
n = len(varargs)
if n == 0:
dx = [1.0]*N
elif n == 1:
dx = [varargs[0]]*N
elif n == len(axes):
dx = list(varargs)
else:
raise SyntaxError(
"invalid number of arguments")
edge_order = kwargs.pop('edge_order', 1)
if kwargs:
raise TypeError('"{}" are not valid keyword arguments.'.format(
'", "'.join(kwargs.keys())))
if edge_order > 2:
raise ValueError("'edge_order' greater than 2 not supported")
# use central differences on interior and one-sided differences on the
# endpoints. This preserves second order-accuracy over the full domain.
outvals = []
# create slice objects --- initially all are [:, :, ..., :]
slice1 = [slice(None)]*N
slice2 = [slice(None)]*N
slice3 = [slice(None)]*N
slice4 = [slice(None)]*N
otype = f.dtype.char
if otype not in ['f', 'd', 'F', 'D', 'm', 'M']:
otype = 'd'
# Difference of datetime64 elements results in timedelta64
if otype == 'M':
# Need to use the full dtype name because it contains unit information
otype = f.dtype.name.replace('datetime', 'timedelta')
elif otype == 'm':
# Needs to keep the specific units, can't be a general unit
otype = f.dtype
# Convert datetime64 data into ints. Make dummy variable `y`
# that is a view of ints if the data is datetime64, otherwise
# just set y equal to the array `f`.
if f.dtype.char in ["M", "m"]:
y = f.view('int64')
else:
y = f
for i, axis in enumerate(axes):
if y.shape[axis] < 2:
raise ValueError(
"Shape of array too small to calculate a numerical gradient, "
"at least two elements are required.")
# Numerical differentiation: 1st order edges, 2nd order interior
if y.shape[axis] == 2 or edge_order == 1:
# Use first order differences for time data
out = np.empty_like(y, dtype=otype)
slice1[axis] = slice(1, -1)
slice2[axis] = slice(2, None)
slice3[axis] = slice(None, -2)
# 1D equivalent -- out[1:-1] = (y[2:] - y[:-2])/2.0
out[slice1] = (y[slice2] - y[slice3])/2.0
slice1[axis] = 0
slice2[axis] = 1
slice3[axis] = 0
# 1D equivalent -- out[0] = (y[1] - y[0])
out[slice1] = (y[slice2] - y[slice3])
slice1[axis] = -1
slice2[axis] = -1
slice3[axis] = -2
# 1D equivalent -- out[-1] = (y[-1] - y[-2])
out[slice1] = (y[slice2] - y[slice3])
# Numerical differentiation: 2st order edges, 2nd order interior
else:
# Use second order differences where possible
out = np.empty_like(y, dtype=otype)
slice1[axis] = slice(1, -1)
slice2[axis] = slice(2, None)
slice3[axis] = slice(None, -2)
# 1D equivalent -- out[1:-1] = (y[2:] - y[:-2])/2.0
out[slice1] = (y[slice2] - y[slice3])/2.0
slice1[axis] = 0
slice2[axis] = 0
slice3[axis] = 1
slice4[axis] = 2
# 1D equivalent -- out[0] = -(3*y[0] - 4*y[1] + y[2]) / 2.0
out[slice1] = -(3.0*y[slice2] - 4.0*y[slice3] + y[slice4])/2.0
slice1[axis] = -1
slice2[axis] = -1
slice3[axis] = -2
slice4[axis] = -3
# 1D equivalent -- out[-1] = (3*y[-1] - 4*y[-2] + y[-3])
out[slice1] = (3.0*y[slice2] - 4.0*y[slice3] + y[slice4])/2.0
# divide by step size
out /= dx[i]
outvals.append(out)
# reset the slice object in this dimension to ":"
slice1[axis] = slice(None)
slice2[axis] = slice(None)
slice3[axis] = slice(None)
slice4[axis] = slice(None)
if len(axes) == 1:
return outvals[0]
else:
return outvals
def diff(a, n=1, axis=-1):
"""
Calculate the n-th discrete difference along given axis.
The first difference is given by ``out[n] = a[n+1] - a[n]`` along
the given axis, higher differences are calculated by using `diff`
recursively.
Parameters
----------
a : array_like
Input array
n : int, optional
The number of times values are differenced.
axis : int, optional
The axis along which the difference is taken, default is the last axis.
Returns
-------
diff : ndarray
The n-th differences. The shape of the output is the same as `a`
except along `axis` where the dimension is smaller by `n`.
.
See Also
--------
gradient, ediff1d, cumsum
Examples
--------
>>> x = np.array([1, 2, 4, 7, 0])
>>> np.diff(x)
array([ 1, 2, 3, -7])
>>> np.diff(x, n=2)
array([ 1, 1, -10])
>>> x = np.array([[1, 3, 6, 10], [0, 5, 6, 8]])
>>> np.diff(x)
array([[2, 3, 4],
[5, 1, 2]])
>>> np.diff(x, axis=0)
array([[-1, 2, 0, -2]])
"""
if n == 0:
return a
if n < 0:
raise ValueError(
"order must be non-negative but got " + repr(n))
a = asanyarray(a)
nd = len(a.shape)
slice1 = [slice(None)]*nd
slice2 = [slice(None)]*nd
slice1[axis] = slice(1, None)
slice2[axis] = slice(None, -1)
slice1 = tuple(slice1)
slice2 = tuple(slice2)
if n > 1:
return diff(a[slice1]-a[slice2], n-1, axis=axis)
else:
return a[slice1]-a[slice2]
def interp(x, xp, fp, left=None, right=None, period=None):
"""
One-dimensional linear interpolation.
Returns the one-dimensional piecewise linear interpolant to a function
with given values at discrete data-points.
Parameters
----------
x : array_like
The x-coordinates of the interpolated values.
xp : 1-D sequence of floats
The x-coordinates of the data points, must be increasing if argument
`period` is not specified. Otherwise, `xp` is internally sorted after
normalizing the periodic boundaries with ``xp = xp % period``.
fp : 1-D sequence of floats
The y-coordinates of the data points, same length as `xp`.
left : float, optional
Value to return for `x < xp[0]`, default is `fp[0]`.
right : float, optional
Value to return for `x > xp[-1]`, default is `fp[-1]`.
period : None or float, optional
A period for the x-coordinates. This parameter allows the proper
interpolation of angular x-coordinates. Parameters `left` and `right`
are ignored if `period` is specified.
.. versionadded:: 1.10.0
Returns
-------
y : float or ndarray
The interpolated values, same shape as `x`.
Raises
------
ValueError
If `xp` and `fp` have different length
If `xp` or `fp` are not 1-D sequences
If `period == 0`
Notes
-----
Does not check that the x-coordinate sequence `xp` is increasing.
If `xp` is not increasing, the results are nonsense.
A simple check for increasing is::
np.all(np.diff(xp) > 0)
Examples
--------
>>> xp = [1, 2, 3]
>>> fp = [3, 2, 0]
>>> np.interp(2.5, xp, fp)
1.0
>>> np.interp([0, 1, 1.5, 2.72, 3.14], xp, fp)
array([ 3. , 3. , 2.5 , 0.56, 0. ])
>>> UNDEF = -99.0
>>> np.interp(3.14, xp, fp, right=UNDEF)
-99.0
Plot an interpolant to the sine function:
>>> x = np.linspace(0, 2*np.pi, 10)
>>> y = np.sin(x)
>>> xvals = np.linspace(0, 2*np.pi, 50)
>>> yinterp = np.interp(xvals, x, y)
>>> import matplotlib.pyplot as plt
>>> plt.plot(x, y, 'o')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.plot(xvals, yinterp, '-x')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.show()
Interpolation with periodic x-coordinates:
>>> x = [-180, -170, -185, 185, -10, -5, 0, 365]
>>> xp = [190, -190, 350, -350]
>>> fp = [5, 10, 3, 4]
>>> np.interp(x, xp, fp, period=360)
array([7.5, 5., 8.75, 6.25, 3., 3.25, 3.5, 3.75])
"""
if period is None:
if isinstance(x, (float, int, number)):
return compiled_interp([x], xp, fp, left, right).item()
elif isinstance(x, np.ndarray) and x.ndim == 0:
return compiled_interp([x], xp, fp, left, right).item()
else:
return compiled_interp(x, xp, fp, left, right)
else:
if period == 0:
raise ValueError("period must be a non-zero value")
period = abs(period)
left = None
right = None
return_array = True
if isinstance(x, (float, int, number)):
return_array = False
x = [x]
x = np.asarray(x, dtype=np.float64)
xp = np.asarray(xp, dtype=np.float64)
fp = np.asarray(fp, dtype=np.float64)
if xp.ndim != 1 or fp.ndim != 1:
raise ValueError("Data points must be 1-D sequences")
if xp.shape[0] != fp.shape[0]:
raise ValueError("fp and xp are not of the same length")
# normalizing periodic boundaries
x = x % period
xp = xp % period
asort_xp = np.argsort(xp)
xp = xp[asort_xp]
fp = fp[asort_xp]
xp = np.concatenate((xp[-1:]-period, xp, xp[0:1]+period))
fp = np.concatenate((fp[-1:], fp, fp[0:1]))
if return_array:
return compiled_interp(x, xp, fp, left, right)
else:
return compiled_interp(x, xp, fp, left, right).item()
def angle(z, deg=0):
"""
Return the angle of the complex argument.
Parameters
----------
z : array_like
A complex number or sequence of complex numbers.
deg : bool, optional
Return angle in degrees if True, radians if False (default).
Returns
-------
angle : ndarray or scalar
The counterclockwise angle from the positive real axis on
the complex plane, with dtype as numpy.float64.
See Also
--------
arctan2
absolute
Examples
--------
>>> np.angle([1.0, 1.0j, 1+1j]) # in radians
array([ 0. , 1.57079633, 0.78539816])
>>> np.angle(1+1j, deg=True) # in degrees
45.0
"""
if deg:
fact = 180/pi
else:
fact = 1.0
z = asarray(z)
if (issubclass(z.dtype.type, _nx.complexfloating)):
zimag = z.imag
zreal = z.real
else:
zimag = 0
zreal = z
return arctan2(zimag, zreal) * fact
def unwrap(p, discont=pi, axis=-1):
"""
Unwrap by changing deltas between values to 2*pi complement.
Unwrap radian phase `p` by changing absolute jumps greater than
`discont` to their 2*pi complement along the given axis.
Parameters
----------
p : array_like
Input array.
discont : float, optional
Maximum discontinuity between values, default is ``pi``.
axis : int, optional
Axis along which unwrap will operate, default is the last axis.
Returns
-------
out : ndarray
Output array.
See Also
--------
rad2deg, deg2rad
Notes
-----
If the discontinuity in `p` is smaller than ``pi``, but larger than
`discont`, no unwrapping is done because taking the 2*pi complement
would only make the discontinuity larger.
Examples
--------
>>> phase = np.linspace(0, np.pi, num=5)
>>> phase[3:] += np.pi
>>> phase
array([ 0. , 0.78539816, 1.57079633, 5.49778714, 6.28318531])
>>> np.unwrap(phase)
array([ 0. , 0.78539816, 1.57079633, -0.78539816, 0. ])
"""
p = asarray(p)
nd = len(p.shape)
dd = diff(p, axis=axis)
slice1 = [slice(None, None)]*nd # full slices
slice1[axis] = slice(1, None)
ddmod = mod(dd + pi, 2*pi) - pi
_nx.copyto(ddmod, pi, where=(ddmod == -pi) & (dd > 0))
ph_correct = ddmod - dd
_nx.copyto(ph_correct, 0, where=abs(dd) < discont)
up = array(p, copy=True, dtype='d')
up[slice1] = p[slice1] + ph_correct.cumsum(axis)
return up
def sort_complex(a):
"""
Sort a complex array using the real part first, then the imaginary part.
Parameters
----------
a : array_like
Input array
Returns
-------
out : complex ndarray
Always returns a sorted complex array.
Examples
--------
>>> np.sort_complex([5, 3, 6, 2, 1])
array([ 1.+0.j, 2.+0.j, 3.+0.j, 5.+0.j, 6.+0.j])
>>> np.sort_complex([1 + 2j, 2 - 1j, 3 - 2j, 3 - 3j, 3 + 5j])
array([ 1.+2.j, 2.-1.j, 3.-3.j, 3.-2.j, 3.+5.j])
"""
b = array(a, copy=True)
b.sort()
if not issubclass(b.dtype.type, _nx.complexfloating):
if b.dtype.char in 'bhBH':
return b.astype('F')
elif b.dtype.char == 'g':
return b.astype('G')
else:
return b.astype('D')
else:
return b
def trim_zeros(filt, trim='fb'):
"""
Trim the leading and/or trailing zeros from a 1-D array or sequence.
Parameters
----------
filt : 1-D array or sequence
Input array.
trim : str, optional
A string with 'f' representing trim from front and 'b' to trim from
back. Default is 'fb', trim zeros from both front and back of the
array.
Returns
-------
trimmed : 1-D array or sequence
The result of trimming the input. The input data type is preserved.
Examples
--------
>>> a = np.array((0, 0, 0, 1, 2, 3, 0, 2, 1, 0))
>>> np.trim_zeros(a)
array([1, 2, 3, 0, 2, 1])
>>> np.trim_zeros(a, 'b')
array([0, 0, 0, 1, 2, 3, 0, 2, 1])
The input data type is preserved, list/tuple in means list/tuple out.
>>> np.trim_zeros([0, 1, 2, 0])
[1, 2]
"""
first = 0
trim = trim.upper()
if 'F' in trim:
for i in filt:
if i != 0.:
break
else:
first = first + 1
last = len(filt)
if 'B' in trim:
for i in filt[::-1]:
if i != 0.:
break
else:
last = last - 1
return filt[first:last]
@deprecate
def unique(x):
"""
This function is deprecated. Use numpy.lib.arraysetops.unique()
instead.
"""
try:
tmp = x.flatten()
if tmp.size == 0:
return tmp
tmp.sort()
idx = concatenate(([True], tmp[1:] != tmp[:-1]))
return tmp[idx]
except AttributeError:
items = sorted(set(x))
return asarray(items)
def extract(condition, arr):
"""
Return the elements of an array that satisfy some condition.
This is equivalent to ``np.compress(ravel(condition), ravel(arr))``. If
`condition` is boolean ``np.extract`` is equivalent to ``arr[condition]``.
Note that `place` does the exact opposite of `extract`.
Parameters
----------
condition : array_like
An array whose nonzero or True entries indicate the elements of `arr`
to extract.
arr : array_like
Input array of the same size as `condition`.
Returns
-------
extract : ndarray
Rank 1 array of values from `arr` where `condition` is True.
See Also
--------
take, put, copyto, compress, place
Examples
--------
>>> arr = np.arange(12).reshape((3, 4))
>>> arr
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]])
>>> condition = np.mod(arr, 3)==0
>>> condition
array([[ True, False, False, True],
[False, False, True, False],
[False, True, False, False]], dtype=bool)
>>> np.extract(condition, arr)
array([0, 3, 6, 9])
If `condition` is boolean:
>>> arr[condition]
array([0, 3, 6, 9])
"""
return _nx.take(ravel(arr), nonzero(ravel(condition))[0])
def place(arr, mask, vals):
"""
Change elements of an array based on conditional and input values.
Similar to ``np.copyto(arr, vals, where=mask)``, the difference is that
`place` uses the first N elements of `vals`, where N is the number of
True values in `mask`, while `copyto` uses the elements where `mask`
is True.
Note that `extract` does the exact opposite of `place`.
Parameters
----------
arr : ndarray
Array to put data into.
mask : array_like
Boolean mask array. Must have the same size as `a`.
vals : 1-D sequence
Values to put into `a`. Only the first N elements are used, where
N is the number of True values in `mask`. If `vals` is smaller
than N it will be repeated.
See Also
--------
copyto, put, take, extract
Examples
--------
>>> arr = np.arange(6).reshape(2, 3)
>>> np.place(arr, arr>2, [44, 55])
>>> arr
array([[ 0, 1, 2],
[44, 55, 44]])
"""
if not isinstance(arr, np.ndarray):
raise TypeError("argument 1 must be numpy.ndarray, "
"not {name}".format(name=type(arr).__name__))
return _insert(arr, mask, vals)
def disp(mesg, device=None, linefeed=True):
"""
Display a message on a device.
Parameters
----------
mesg : str
Message to display.
device : object
Device to write message. If None, defaults to ``sys.stdout`` which is
very similar to ``print``. `device` needs to have ``write()`` and
``flush()`` methods.
linefeed : bool, optional
Option whether to print a line feed or not. Defaults to True.
Raises
------
AttributeError
If `device` does not have a ``write()`` or ``flush()`` method.
Examples
--------
Besides ``sys.stdout``, a file-like object can also be used as it has
both required methods:
>>> from StringIO import StringIO
>>> buf = StringIO()
>>> np.disp('"Display" in a file', device=buf)
>>> buf.getvalue()
'"Display" in a file\\n'
"""
if device is None:
device = sys.stdout
if linefeed:
device.write('%s\n' % mesg)
else:
device.write('%s' % mesg)
device.flush()
return
class vectorize(object):
"""
vectorize(pyfunc, otypes='', doc=None, excluded=None, cache=False)
Generalized function class.
Define a vectorized function which takes a nested sequence
of objects or numpy arrays as inputs and returns a
numpy array as output. The vectorized function evaluates `pyfunc` over
successive tuples of the input arrays like the python map function,
except it uses the broadcasting rules of numpy.
The data type of the output of `vectorized` is determined by calling
the function with the first element of the input. This can be avoided
by specifying the `otypes` argument.
Parameters
----------
pyfunc : callable
A python function or method.
otypes : str or list of dtypes, optional
The output data type. It must be specified as either a string of
typecode characters or a list of data type specifiers. There should
be one data type specifier for each output.
doc : str, optional
The docstring for the function. If `None`, the docstring will be the
``pyfunc.__doc__``.
excluded : set, optional
Set of strings or integers representing the positional or keyword
arguments for which the function will not be vectorized. These will be
passed directly to `pyfunc` unmodified.
.. versionadded:: 1.7.0
cache : bool, optional
If `True`, then cache the first function call that determines the number
of outputs if `otypes` is not provided.
.. versionadded:: 1.7.0
Returns
-------
vectorized : callable
Vectorized function.
Examples
--------
>>> def myfunc(a, b):
... "Return a-b if a>b, otherwise return a+b"
... if a > b:
... return a - b
... else:
... return a + b
>>> vfunc = np.vectorize(myfunc)
>>> vfunc([1, 2, 3, 4], 2)
array([3, 4, 1, 2])
The docstring is taken from the input function to `vectorize` unless it
is specified
>>> vfunc.__doc__
'Return a-b if a>b, otherwise return a+b'
>>> vfunc = np.vectorize(myfunc, doc='Vectorized `myfunc`')
>>> vfunc.__doc__
'Vectorized `myfunc`'
The output type is determined by evaluating the first element of the input,
unless it is specified
>>> out = vfunc([1, 2, 3, 4], 2)
>>> type(out[0])
<type 'numpy.int32'>
>>> vfunc = np.vectorize(myfunc, otypes=[np.float])
>>> out = vfunc([1, 2, 3, 4], 2)
>>> type(out[0])
<type 'numpy.float64'>
The `excluded` argument can be used to prevent vectorizing over certain
arguments. This can be useful for array-like arguments of a fixed length
such as the coefficients for a polynomial as in `polyval`:
>>> def mypolyval(p, x):
... _p = list(p)
... res = _p.pop(0)
... while _p:
... res = res*x + _p.pop(0)
... return res
>>> vpolyval = np.vectorize(mypolyval, excluded=['p'])
>>> vpolyval(p=[1, 2, 3], x=[0, 1])
array([3, 6])
Positional arguments may also be excluded by specifying their position:
>>> vpolyval.excluded.add(0)
>>> vpolyval([1, 2, 3], x=[0, 1])
array([3, 6])
Notes
-----
The `vectorize` function is provided primarily for convenience, not for
performance. The implementation is essentially a for loop.
If `otypes` is not specified, then a call to the function with the
first argument will be used to determine the number of outputs. The
results of this call will be cached if `cache` is `True` to prevent
calling the function twice. However, to implement the cache, the
original function must be wrapped which will slow down subsequent
calls, so only do this if your function is expensive.
The new keyword argument interface and `excluded` argument support
further degrades performance.
"""
def __init__(self, pyfunc, otypes='', doc=None, excluded=None,
cache=False):
self.pyfunc = pyfunc
self.cache = cache
self._ufunc = None # Caching to improve default performance
if doc is None:
self.__doc__ = pyfunc.__doc__
else:
self.__doc__ = doc
if isinstance(otypes, str):
self.otypes = otypes
for char in self.otypes:
if char not in typecodes['All']:
raise ValueError(
"Invalid otype specified: %s" % (char,))
elif iterable(otypes):
self.otypes = ''.join([_nx.dtype(x).char for x in otypes])
else:
raise ValueError(
"Invalid otype specification")
# Excluded variable support
if excluded is None:
excluded = set()
self.excluded = set(excluded)
def __call__(self, *args, **kwargs):
"""
Return arrays with the results of `pyfunc` broadcast (vectorized) over
`args` and `kwargs` not in `excluded`.
"""
excluded = self.excluded
if not kwargs and not excluded:
func = self.pyfunc
vargs = args
else:
# The wrapper accepts only positional arguments: we use `names` and
# `inds` to mutate `the_args` and `kwargs` to pass to the original
# function.
nargs = len(args)
names = [_n for _n in kwargs if _n not in excluded]
inds = [_i for _i in range(nargs) if _i not in excluded]
the_args = list(args)
def func(*vargs):
for _n, _i in enumerate(inds):
the_args[_i] = vargs[_n]
kwargs.update(zip(names, vargs[len(inds):]))
return self.pyfunc(*the_args, **kwargs)
vargs = [args[_i] for _i in inds]
vargs.extend([kwargs[_n] for _n in names])
return self._vectorize_call(func=func, args=vargs)
def _get_ufunc_and_otypes(self, func, args):
"""Return (ufunc, otypes)."""
# frompyfunc will fail if args is empty
if not args:
raise ValueError('args can not be empty')
if self.otypes:
otypes = self.otypes
nout = len(otypes)
# Note logic here: We only *use* self._ufunc if func is self.pyfunc
# even though we set self._ufunc regardless.
if func is self.pyfunc and self._ufunc is not None:
ufunc = self._ufunc
else:
ufunc = self._ufunc = frompyfunc(func, len(args), nout)
else:
# Get number of outputs and output types by calling the function on
# the first entries of args. We also cache the result to prevent
# the subsequent call when the ufunc is evaluated.
# Assumes that ufunc first evaluates the 0th elements in the input
# arrays (the input values are not checked to ensure this)
inputs = [asarray(_a).flat[0] for _a in args]
outputs = func(*inputs)
# Performance note: profiling indicates that -- for simple
# functions at least -- this wrapping can almost double the
# execution time.
# Hence we make it optional.
if self.cache:
_cache = [outputs]
def _func(*vargs):
if _cache:
return _cache.pop()
else:
return func(*vargs)
else:
_func = func
if isinstance(outputs, tuple):
nout = len(outputs)
else:
nout = 1
outputs = (outputs,)
otypes = ''.join([asarray(outputs[_k]).dtype.char
for _k in range(nout)])
# Performance note: profiling indicates that creating the ufunc is
# not a significant cost compared with wrapping so it seems not
# worth trying to cache this.
ufunc = frompyfunc(_func, len(args), nout)
return ufunc, otypes
def _vectorize_call(self, func, args):
"""Vectorized call to `func` over positional `args`."""
if not args:
_res = func()
else:
ufunc, otypes = self._get_ufunc_and_otypes(func=func, args=args)
# Convert args to object arrays first
inputs = [array(_a, copy=False, subok=True, dtype=object)
for _a in args]
outputs = ufunc(*inputs)
if ufunc.nout == 1:
_res = array(outputs,
copy=False, subok=True, dtype=otypes[0])
else:
_res = tuple([array(_x, copy=False, subok=True, dtype=_t)
for _x, _t in zip(outputs, otypes)])
return _res
def cov(m, y=None, rowvar=True, bias=False, ddof=None, fweights=None,
aweights=None):
"""
Estimate a covariance matrix, given data and weights.
Covariance indicates the level to which two variables vary together.
If we examine N-dimensional samples, :math:`X = [x_1, x_2, ... x_N]^T`,
then the covariance matrix element :math:`C_{ij}` is the covariance of
:math:`x_i` and :math:`x_j`. The element :math:`C_{ii}` is the variance
of :math:`x_i`.
See the notes for an outline of the algorithm.
Parameters
----------
m : array_like
A 1-D or 2-D array containing multiple variables and observations.
Each row of `m` represents a variable, and each column a single
observation of all those variables. Also see `rowvar` below.
y : array_like, optional
An additional set of variables and observations. `y` has the same form
as that of `m`.
rowvar : bool, optional
If `rowvar` is True (default), then each row represents a
variable, with observations in the columns. Otherwise, the relationship
is transposed: each column represents a variable, while the rows
contain observations.
bias : bool, optional
Default normalization (False) is by ``(N - 1)``, where ``N`` is the
number of observations given (unbiased estimate). If `bias` is True, then
normalization is by ``N``. These values can be overridden by using the
keyword ``ddof`` in numpy versions >= 1.5.
ddof : int, optional
If not ``None`` the default value implied by `bias` is overridden.
Note that ``ddof=1`` will return the unbiased estimate, even if both
`fweights` and `aweights` are specified, and ``ddof=0`` will return
the simple average. See the notes for the details. The default value
is ``None``.
.. versionadded:: 1.5
fweights : array_like, int, optional
1-D array of integer freguency weights; the number of times each
observation vector should be repeated.
.. versionadded:: 1.10
aweights : array_like, optional
1-D array of observation vector weights. These relative weights are
typically large for observations considered "important" and smaller for
observations considered less "important". If ``ddof=0`` the array of
weights can be used to assign probabilities to observation vectors.
.. versionadded:: 1.10
Returns
-------
out : ndarray
The covariance matrix of the variables.
See Also
--------
corrcoef : Normalized covariance matrix
Notes
-----
Assume that the observations are in the columns of the observation
array `m` and let ``f = fweights`` and ``a = aweights`` for brevity. The
steps to compute the weighted covariance are as follows::
>>> w = f * a
>>> v1 = np.sum(w)
>>> v2 = np.sum(w * a)
>>> m -= np.sum(m * w, axis=1, keepdims=True) / v1
>>> cov = np.dot(m * w, m.T) * v1 / (v1**2 - ddof * v2)
Note that when ``a == 1``, the normalization factor
``v1 / (v1**2 - ddof * v2)`` goes over to ``1 / (np.sum(f) - ddof)``
as it should.
Examples
--------
Consider two variables, :math:`x_0` and :math:`x_1`, which
correlate perfectly, but in opposite directions:
>>> x = np.array([[0, 2], [1, 1], [2, 0]]).T
>>> x
array([[0, 1, 2],
[2, 1, 0]])
Note how :math:`x_0` increases while :math:`x_1` decreases. The covariance
matrix shows this clearly:
>>> np.cov(x)
array([[ 1., -1.],
[-1., 1.]])
Note that element :math:`C_{0,1}`, which shows the correlation between
:math:`x_0` and :math:`x_1`, is negative.
Further, note how `x` and `y` are combined:
>>> x = [-2.1, -1, 4.3]
>>> y = [3, 1.1, 0.12]
>>> X = np.vstack((x,y))
>>> print(np.cov(X))
[[ 11.71 -4.286 ]
[ -4.286 2.14413333]]
>>> print(np.cov(x, y))
[[ 11.71 -4.286 ]
[ -4.286 2.14413333]]
>>> print(np.cov(x))
11.71
"""
# Check inputs
if ddof is not None and ddof != int(ddof):
raise ValueError(
"ddof must be integer")
# Handles complex arrays too
m = np.asarray(m)
if y is None:
dtype = np.result_type(m, np.float64)
else:
y = np.asarray(y)
dtype = np.result_type(m, y, np.float64)
X = array(m, ndmin=2, dtype=dtype)
if rowvar == 0 and X.shape[0] != 1:
X = X.T
if X.shape[0] == 0:
return np.array([]).reshape(0, 0)
if y is not None:
y = array(y, copy=False, ndmin=2, dtype=dtype)
if rowvar == 0 and y.shape[0] != 1:
y = y.T
X = np.vstack((X, y))
if ddof is None:
if bias == 0:
ddof = 1
else:
ddof = 0
# Get the product of frequencies and weights
w = None
if fweights is not None:
fweights = np.asarray(fweights, dtype=np.float)
if not np.all(fweights == np.around(fweights)):
raise TypeError(
"fweights must be integer")
if fweights.ndim > 1:
raise RuntimeError(
"cannot handle multidimensional fweights")
if fweights.shape[0] != X.shape[1]:
raise RuntimeError(
"incompatible numbers of samples and fweights")
if any(fweights < 0):
raise ValueError(
"fweights cannot be negative")
w = fweights
if aweights is not None:
aweights = np.asarray(aweights, dtype=np.float)
if aweights.ndim > 1:
raise RuntimeError(
"cannot handle multidimensional aweights")
if aweights.shape[0] != X.shape[1]:
raise RuntimeError(
"incompatible numbers of samples and aweights")
if any(aweights < 0):
raise ValueError(
"aweights cannot be negative")
if w is None:
w = aweights
else:
w *= aweights
avg, w_sum = average(X, axis=1, weights=w, returned=True)
w_sum = w_sum[0]
# Determine the normalization
if w is None:
fact = X.shape[1] - ddof
elif ddof == 0:
fact = w_sum
elif aweights is None:
fact = w_sum - ddof
else:
fact = w_sum - ddof*sum(w*aweights)/w_sum
if fact <= 0:
warnings.warn("Degrees of freedom <= 0 for slice", RuntimeWarning)
fact = 0.0
X -= avg[:, None]
if w is None:
X_T = X.T
else:
X_T = (X*w).T
c = dot(X, X_T.conj())
c *= 1. / np.float64(fact)
return c.squeeze()
def corrcoef(x, y=None, rowvar=1, bias=np._NoValue, ddof=np._NoValue):
"""
Return Pearson product-moment correlation coefficients.
Please refer to the documentation for `cov` for more detail. The
relationship between the correlation coefficient matrix, `R`, and the
covariance matrix, `C`, is
.. math:: R_{ij} = \\frac{ C_{ij} } { \\sqrt{ C_{ii} * C_{jj} } }
The values of `R` are between -1 and 1, inclusive.
Parameters
----------
x : array_like
A 1-D or 2-D array containing multiple variables and observations.
Each row of `x` represents a variable, and each column a single
observation of all those variables. Also see `rowvar` below.
y : array_like, optional
An additional set of variables and observations. `y` has the same
shape as `x`.
rowvar : int, optional
If `rowvar` is non-zero (default), then each row represents a
variable, with observations in the columns. Otherwise, the relationship
is transposed: each column represents a variable, while the rows
contain observations.
bias : _NoValue, optional
Has no effect, do not use.
.. deprecated:: 1.10.0
ddof : _NoValue, optional
Has no effect, do not use.
.. deprecated:: 1.10.0
Returns
-------
R : ndarray
The correlation coefficient matrix of the variables.
See Also
--------
cov : Covariance matrix
Notes
-----
This function accepts but discards arguments `bias` and `ddof`. This is
for backwards compatibility with previous versions of this function. These
arguments had no effect on the return values of the function and can be
safely ignored in this and previous versions of numpy.
"""
if bias is not np._NoValue or ddof is not np._NoValue:
# 2015-03-15, 1.10
warnings.warn('bias and ddof have no effect and are deprecated',
DeprecationWarning)
c = cov(x, y, rowvar)
try:
d = diag(c)
except ValueError: # scalar covariance
# nan if incorrect value (nan, inf, 0), 1 otherwise
return c / c
d = sqrt(d)
# calculate "c / multiply.outer(d, d)" row-wise ... for memory and speed
for i in range(0, d.size):
c[i,:] /= (d * d[i])
return c
def blackman(M):
"""
Return the Blackman window.
The Blackman window is a taper formed by using the first three
terms of a summation of cosines. It was designed to have close to the
minimal leakage possible. It is close to optimal, only slightly worse
than a Kaiser window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
Returns
-------
out : ndarray
The window, with the maximum value normalized to one (the value one
appears only if the number of samples is odd).
See Also
--------
bartlett, hamming, hanning, kaiser
Notes
-----
The Blackman window is defined as
.. math:: w(n) = 0.42 - 0.5 \\cos(2\\pi n/M) + 0.08 \\cos(4\\pi n/M)
Most references to the Blackman window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function. It is known as a
"near optimal" tapering function, almost as good (by some measures)
as the kaiser window.
References
----------
Blackman, R.B. and Tukey, J.W., (1958) The measurement of power spectra,
Dover Publications, New York.
Oppenheim, A.V., and R.W. Schafer. Discrete-Time Signal Processing.
Upper Saddle River, NJ: Prentice-Hall, 1999, pp. 468-471.
Examples
--------
>>> np.blackman(12)
array([ -1.38777878e-17, 3.26064346e-02, 1.59903635e-01,
4.14397981e-01, 7.36045180e-01, 9.67046769e-01,
9.67046769e-01, 7.36045180e-01, 4.14397981e-01,
1.59903635e-01, 3.26064346e-02, -1.38777878e-17])
Plot the window and the frequency response:
>>> from numpy.fft import fft, fftshift
>>> window = np.blackman(51)
>>> plt.plot(window)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Blackman window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Sample")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
>>> plt.figure()
<matplotlib.figure.Figure object at 0x...>
>>> A = fft(window, 2048) / 25.5
>>> mag = np.abs(fftshift(A))
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(mag)
>>> response = np.clip(response, -100, 100)
>>> plt.plot(freq, response)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Frequency response of Blackman window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Magnitude [dB]")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Normalized frequency [cycles per sample]")
<matplotlib.text.Text object at 0x...>
>>> plt.axis('tight')
(-0.5, 0.5, -100.0, ...)
>>> plt.show()
"""
if M < 1:
return array([])
if M == 1:
return ones(1, float)
n = arange(0, M)
return 0.42 - 0.5*cos(2.0*pi*n/(M-1)) + 0.08*cos(4.0*pi*n/(M-1))
def bartlett(M):
"""
Return the Bartlett window.
The Bartlett window is very similar to a triangular window, except
that the end points are at zero. It is often used in signal
processing for tapering a signal, without generating too much
ripple in the frequency domain.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an
empty array is returned.
Returns
-------
out : array
The triangular window, with the maximum value normalized to one
(the value one appears only if the number of samples is odd), with
the first and last samples equal to zero.
See Also
--------
blackman, hamming, hanning, kaiser
Notes
-----
The Bartlett window is defined as
.. math:: w(n) = \\frac{2}{M-1} \\left(
\\frac{M-1}{2} - \\left|n - \\frac{M-1}{2}\\right|
\\right)
Most references to the Bartlett window come from the signal
processing literature, where it is used as one of many windowing
functions for smoothing values. Note that convolution with this
window produces linear interpolation. It is also known as an
apodization (which means"removing the foot", i.e. smoothing
discontinuities at the beginning and end of the sampled signal) or
tapering function. The fourier transform of the Bartlett is the product
of two sinc functions.
Note the excellent discussion in Kanasewich.
References
----------
.. [1] M.S. Bartlett, "Periodogram Analysis and Continuous Spectra",
Biometrika 37, 1-16, 1950.
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics",
The University of Alberta Press, 1975, pp. 109-110.
.. [3] A.V. Oppenheim and R.W. Schafer, "Discrete-Time Signal
Processing", Prentice-Hall, 1999, pp. 468-471.
.. [4] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
.. [5] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 429.
Examples
--------
>>> np.bartlett(12)
array([ 0. , 0.18181818, 0.36363636, 0.54545455, 0.72727273,
0.90909091, 0.90909091, 0.72727273, 0.54545455, 0.36363636,
0.18181818, 0. ])
Plot the window and its frequency response (requires SciPy and matplotlib):
>>> from numpy.fft import fft, fftshift
>>> window = np.bartlett(51)
>>> plt.plot(window)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Bartlett window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Sample")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
>>> plt.figure()
<matplotlib.figure.Figure object at 0x...>
>>> A = fft(window, 2048) / 25.5
>>> mag = np.abs(fftshift(A))
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(mag)
>>> response = np.clip(response, -100, 100)
>>> plt.plot(freq, response)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Frequency response of Bartlett window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Magnitude [dB]")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Normalized frequency [cycles per sample]")
<matplotlib.text.Text object at 0x...>
>>> plt.axis('tight')
(-0.5, 0.5, -100.0, ...)
>>> plt.show()
"""
if M < 1:
return array([])
if M == 1:
return ones(1, float)
n = arange(0, M)
return where(less_equal(n, (M-1)/2.0), 2.0*n/(M-1), 2.0 - 2.0*n/(M-1))
def hanning(M):
"""
Return the Hanning window.
The Hanning window is a taper formed by using a weighted cosine.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an
empty array is returned.
Returns
-------
out : ndarray, shape(M,)
The window, with the maximum value normalized to one (the value
one appears only if `M` is odd).
See Also
--------
bartlett, blackman, hamming, kaiser
Notes
-----
The Hanning window is defined as
.. math:: w(n) = 0.5 - 0.5cos\\left(\\frac{2\\pi{n}}{M-1}\\right)
\\qquad 0 \\leq n \\leq M-1
The Hanning was named for Julius von Hann, an Austrian meteorologist.
It is also known as the Cosine Bell. Some authors prefer that it be
called a Hann window, to help avoid confusion with the very similar
Hamming window.
Most references to the Hanning window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function.
References
----------
.. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power
spectra, Dover Publications, New York.
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics",
The University of Alberta Press, 1975, pp. 106-108.
.. [3] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
.. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 425.
Examples
--------
>>> np.hanning(12)
array([ 0. , 0.07937323, 0.29229249, 0.57115742, 0.82743037,
0.97974649, 0.97974649, 0.82743037, 0.57115742, 0.29229249,
0.07937323, 0. ])
Plot the window and its frequency response:
>>> from numpy.fft import fft, fftshift
>>> window = np.hanning(51)
>>> plt.plot(window)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Hann window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Sample")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
>>> plt.figure()
<matplotlib.figure.Figure object at 0x...>
>>> A = fft(window, 2048) / 25.5
>>> mag = np.abs(fftshift(A))
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(mag)
>>> response = np.clip(response, -100, 100)
>>> plt.plot(freq, response)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Frequency response of the Hann window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Magnitude [dB]")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Normalized frequency [cycles per sample]")
<matplotlib.text.Text object at 0x...>
>>> plt.axis('tight')
(-0.5, 0.5, -100.0, ...)
>>> plt.show()
"""
if M < 1:
return array([])
if M == 1:
return ones(1, float)
n = arange(0, M)
return 0.5 - 0.5*cos(2.0*pi*n/(M-1))
def hamming(M):
"""
Return the Hamming window.
The Hamming window is a taper formed by using a weighted cosine.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an
empty array is returned.
Returns
-------
out : ndarray
The window, with the maximum value normalized to one (the value
one appears only if the number of samples is odd).
See Also
--------
bartlett, blackman, hanning, kaiser
Notes
-----
The Hamming window is defined as
.. math:: w(n) = 0.54 - 0.46cos\\left(\\frac{2\\pi{n}}{M-1}\\right)
\\qquad 0 \\leq n \\leq M-1
The Hamming was named for R. W. Hamming, an associate of J. W. Tukey
and is described in Blackman and Tukey. It was recommended for
smoothing the truncated autocovariance function in the time domain.
Most references to the Hamming window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function.
References
----------
.. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power
spectra, Dover Publications, New York.
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The
University of Alberta Press, 1975, pp. 109-110.
.. [3] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
.. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 425.
Examples
--------
>>> np.hamming(12)
array([ 0.08 , 0.15302337, 0.34890909, 0.60546483, 0.84123594,
0.98136677, 0.98136677, 0.84123594, 0.60546483, 0.34890909,
0.15302337, 0.08 ])
Plot the window and the frequency response:
>>> from numpy.fft import fft, fftshift
>>> window = np.hamming(51)
>>> plt.plot(window)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Hamming window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Sample")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
>>> plt.figure()
<matplotlib.figure.Figure object at 0x...>
>>> A = fft(window, 2048) / 25.5
>>> mag = np.abs(fftshift(A))
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(mag)
>>> response = np.clip(response, -100, 100)
>>> plt.plot(freq, response)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Frequency response of Hamming window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Magnitude [dB]")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Normalized frequency [cycles per sample]")
<matplotlib.text.Text object at 0x...>
>>> plt.axis('tight')
(-0.5, 0.5, -100.0, ...)
>>> plt.show()
"""
if M < 1:
return array([])
if M == 1:
return ones(1, float)
n = arange(0, M)
return 0.54 - 0.46*cos(2.0*pi*n/(M-1))
## Code from cephes for i0
_i0A = [
-4.41534164647933937950E-18,
3.33079451882223809783E-17,
-2.43127984654795469359E-16,
1.71539128555513303061E-15,
-1.16853328779934516808E-14,
7.67618549860493561688E-14,
-4.85644678311192946090E-13,
2.95505266312963983461E-12,
-1.72682629144155570723E-11,
9.67580903537323691224E-11,
-5.18979560163526290666E-10,
2.65982372468238665035E-9,
-1.30002500998624804212E-8,
6.04699502254191894932E-8,
-2.67079385394061173391E-7,
1.11738753912010371815E-6,
-4.41673835845875056359E-6,
1.64484480707288970893E-5,
-5.75419501008210370398E-5,
1.88502885095841655729E-4,
-5.76375574538582365885E-4,
1.63947561694133579842E-3,
-4.32430999505057594430E-3,
1.05464603945949983183E-2,
-2.37374148058994688156E-2,
4.93052842396707084878E-2,
-9.49010970480476444210E-2,
1.71620901522208775349E-1,
-3.04682672343198398683E-1,
6.76795274409476084995E-1
]
_i0B = [
-7.23318048787475395456E-18,
-4.83050448594418207126E-18,
4.46562142029675999901E-17,
3.46122286769746109310E-17,
-2.82762398051658348494E-16,
-3.42548561967721913462E-16,
1.77256013305652638360E-15,
3.81168066935262242075E-15,
-9.55484669882830764870E-15,
-4.15056934728722208663E-14,
1.54008621752140982691E-14,
3.85277838274214270114E-13,
7.18012445138366623367E-13,
-1.79417853150680611778E-12,
-1.32158118404477131188E-11,
-3.14991652796324136454E-11,
1.18891471078464383424E-11,
4.94060238822496958910E-10,
3.39623202570838634515E-9,
2.26666899049817806459E-8,
2.04891858946906374183E-7,
2.89137052083475648297E-6,
6.88975834691682398426E-5,
3.36911647825569408990E-3,
8.04490411014108831608E-1
]
def _chbevl(x, vals):
b0 = vals[0]
b1 = 0.0
for i in range(1, len(vals)):
b2 = b1
b1 = b0
b0 = x*b1 - b2 + vals[i]
return 0.5*(b0 - b2)
def _i0_1(x):
return exp(x) * _chbevl(x/2.0-2, _i0A)
def _i0_2(x):
return exp(x) * _chbevl(32.0/x - 2.0, _i0B) / sqrt(x)
def i0(x):
"""
Modified Bessel function of the first kind, order 0.
Usually denoted :math:`I_0`. This function does broadcast, but will *not*
"up-cast" int dtype arguments unless accompanied by at least one float or
complex dtype argument (see Raises below).
Parameters
----------
x : array_like, dtype float or complex
Argument of the Bessel function.
Returns
-------
out : ndarray, shape = x.shape, dtype = x.dtype
The modified Bessel function evaluated at each of the elements of `x`.
Raises
------
TypeError: array cannot be safely cast to required type
If argument consists exclusively of int dtypes.
See Also
--------
scipy.special.iv, scipy.special.ive
Notes
-----
We use the algorithm published by Clenshaw [1]_ and referenced by
Abramowitz and Stegun [2]_, for which the function domain is
partitioned into the two intervals [0,8] and (8,inf), and Chebyshev
polynomial expansions are employed in each interval. Relative error on
the domain [0,30] using IEEE arithmetic is documented [3]_ as having a
peak of 5.8e-16 with an rms of 1.4e-16 (n = 30000).
References
----------
.. [1] C. W. Clenshaw, "Chebyshev series for mathematical functions", in
*National Physical Laboratory Mathematical Tables*, vol. 5, London:
Her Majesty's Stationery Office, 1962.
.. [2] M. Abramowitz and I. A. Stegun, *Handbook of Mathematical
Functions*, 10th printing, New York: Dover, 1964, pp. 379.
http://www.math.sfu.ca/~cbm/aands/page_379.htm
.. [3] http://kobesearch.cpan.org/htdocs/Math-Cephes/Math/Cephes.html
Examples
--------
>>> np.i0([0.])
array(1.0)
>>> np.i0([0., 1. + 2j])
array([ 1.00000000+0.j , 0.18785373+0.64616944j])
"""
x = atleast_1d(x).copy()
y = empty_like(x)
ind = (x < 0)
x[ind] = -x[ind]
ind = (x <= 8.0)
y[ind] = _i0_1(x[ind])
ind2 = ~ind
y[ind2] = _i0_2(x[ind2])
return y.squeeze()
## End of cephes code for i0
def kaiser(M, beta):
"""
Return the Kaiser window.
The Kaiser window is a taper formed by using a Bessel function.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an
empty array is returned.
beta : float
Shape parameter for window.
Returns
-------
out : array
The window, with the maximum value normalized to one (the value
one appears only if the number of samples is odd).
See Also
--------
bartlett, blackman, hamming, hanning
Notes
-----
The Kaiser window is defined as
.. math:: w(n) = I_0\\left( \\beta \\sqrt{1-\\frac{4n^2}{(M-1)^2}}
\\right)/I_0(\\beta)
with
.. math:: \\quad -\\frac{M-1}{2} \\leq n \\leq \\frac{M-1}{2},
where :math:`I_0` is the modified zeroth-order Bessel function.
The Kaiser was named for Jim Kaiser, who discovered a simple
approximation to the DPSS window based on Bessel functions. The Kaiser
window is a very good approximation to the Digital Prolate Spheroidal
Sequence, or Slepian window, which is the transform which maximizes the
energy in the main lobe of the window relative to total energy.
The Kaiser can approximate many other windows by varying the beta
parameter.
==== =======================
beta Window shape
==== =======================
0 Rectangular
5 Similar to a Hamming
6 Similar to a Hanning
8.6 Similar to a Blackman
==== =======================
A beta value of 14 is probably a good starting point. Note that as beta
gets large, the window narrows, and so the number of samples needs to be
large enough to sample the increasingly narrow spike, otherwise NaNs will
get returned.
Most references to the Kaiser window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function.
References
----------
.. [1] J. F. Kaiser, "Digital Filters" - Ch 7 in "Systems analysis by
digital computer", Editors: F.F. Kuo and J.F. Kaiser, p 218-285.
John Wiley and Sons, New York, (1966).
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The
University of Alberta Press, 1975, pp. 177-178.
.. [3] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
Examples
--------
>>> np.kaiser(12, 14)
array([ 7.72686684e-06, 3.46009194e-03, 4.65200189e-02,
2.29737120e-01, 5.99885316e-01, 9.45674898e-01,
9.45674898e-01, 5.99885316e-01, 2.29737120e-01,
4.65200189e-02, 3.46009194e-03, 7.72686684e-06])
Plot the window and the frequency response:
>>> from numpy.fft import fft, fftshift
>>> window = np.kaiser(51, 14)
>>> plt.plot(window)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Kaiser window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Sample")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
>>> plt.figure()
<matplotlib.figure.Figure object at 0x...>
>>> A = fft(window, 2048) / 25.5
>>> mag = np.abs(fftshift(A))
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(mag)
>>> response = np.clip(response, -100, 100)
>>> plt.plot(freq, response)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Frequency response of Kaiser window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Magnitude [dB]")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Normalized frequency [cycles per sample]")
<matplotlib.text.Text object at 0x...>
>>> plt.axis('tight')
(-0.5, 0.5, -100.0, ...)
>>> plt.show()
"""
from numpy.dual import i0
if M == 1:
return np.array([1.])
n = arange(0, M)
alpha = (M-1)/2.0
return i0(beta * sqrt(1-((n-alpha)/alpha)**2.0))/i0(float(beta))
def sinc(x):
"""
Return the sinc function.
The sinc function is :math:`\\sin(\\pi x)/(\\pi x)`.
Parameters
----------
x : ndarray
Array (possibly multi-dimensional) of values for which to to
calculate ``sinc(x)``.
Returns
-------
out : ndarray
``sinc(x)``, which has the same shape as the input.
Notes
-----
``sinc(0)`` is the limit value 1.
The name sinc is short for "sine cardinal" or "sinus cardinalis".
The sinc function is used in various signal processing applications,
including in anti-aliasing, in the construction of a Lanczos resampling
filter, and in interpolation.
For bandlimited interpolation of discrete-time signals, the ideal
interpolation kernel is proportional to the sinc function.
References
----------
.. [1] Weisstein, Eric W. "Sinc Function." From MathWorld--A Wolfram Web
Resource. http://mathworld.wolfram.com/SincFunction.html
.. [2] Wikipedia, "Sinc function",
http://en.wikipedia.org/wiki/Sinc_function
Examples
--------
>>> x = np.linspace(-4, 4, 41)
>>> np.sinc(x)
array([ -3.89804309e-17, -4.92362781e-02, -8.40918587e-02,
-8.90384387e-02, -5.84680802e-02, 3.89804309e-17,
6.68206631e-02, 1.16434881e-01, 1.26137788e-01,
8.50444803e-02, -3.89804309e-17, -1.03943254e-01,
-1.89206682e-01, -2.16236208e-01, -1.55914881e-01,
3.89804309e-17, 2.33872321e-01, 5.04551152e-01,
7.56826729e-01, 9.35489284e-01, 1.00000000e+00,
9.35489284e-01, 7.56826729e-01, 5.04551152e-01,
2.33872321e-01, 3.89804309e-17, -1.55914881e-01,
-2.16236208e-01, -1.89206682e-01, -1.03943254e-01,
-3.89804309e-17, 8.50444803e-02, 1.26137788e-01,
1.16434881e-01, 6.68206631e-02, 3.89804309e-17,
-5.84680802e-02, -8.90384387e-02, -8.40918587e-02,
-4.92362781e-02, -3.89804309e-17])
>>> plt.plot(x, np.sinc(x))
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Sinc Function")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("X")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
It works in 2-D as well:
>>> x = np.linspace(-4, 4, 401)
>>> xx = np.outer(x, x)
>>> plt.imshow(np.sinc(xx))
<matplotlib.image.AxesImage object at 0x...>
"""
x = np.asanyarray(x)
y = pi * where(x == 0, 1.0e-20, x)
return sin(y)/y
def msort(a):
"""
Return a copy of an array sorted along the first axis.
Parameters
----------
a : array_like
Array to be sorted.
Returns
-------
sorted_array : ndarray
Array of the same type and shape as `a`.
See Also
--------
sort
Notes
-----
``np.msort(a)`` is equivalent to ``np.sort(a, axis=0)``.
"""
b = array(a, subok=True, copy=True)
b.sort(0)
return b
def _ureduce(a, func, **kwargs):
"""
Internal Function.
Call `func` with `a` as first argument swapping the axes to use extended
axis on functions that don't support it natively.
Returns result and a.shape with axis dims set to 1.
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
func : callable
Reduction function Kapable of receiving an axis argument.
It is is called with `a` as first argument followed by `kwargs`.
kwargs : keyword arguments
additional keyword arguments to pass to `func`.
Returns
-------
result : tuple
Result of func(a, **kwargs) and a.shape with axis dims set to 1
which can be used to reshape the result to the same shape a ufunc with
keepdims=True would produce.
"""
a = np.asanyarray(a)
axis = kwargs.get('axis', None)
if axis is not None:
keepdim = list(a.shape)
nd = a.ndim
try:
axis = operator.index(axis)
if axis >= nd or axis < -nd:
raise IndexError("axis %d out of bounds (%d)" % (axis, a.ndim))
keepdim[axis] = 1
except TypeError:
sax = set()
for x in axis:
if x >= nd or x < -nd:
raise IndexError("axis %d out of bounds (%d)" % (x, nd))
if x in sax:
raise ValueError("duplicate value in axis")
sax.add(x % nd)
keepdim[x] = 1
keep = sax.symmetric_difference(frozenset(range(nd)))
nkeep = len(keep)
# swap axis that should not be reduced to front
for i, s in enumerate(sorted(keep)):
a = a.swapaxes(i, s)
# merge reduced axis
a = a.reshape(a.shape[:nkeep] + (-1,))
kwargs['axis'] = -1
else:
keepdim = [1] * a.ndim
r = func(a, **kwargs)
return r, keepdim
def median(a, axis=None, out=None, overwrite_input=False, keepdims=False):
"""
Compute the median along the specified axis.
Returns the median of the array elements.
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
axis : {int, sequence of int, None}, optional
Axis or axes along which the medians are computed. The default
is to compute the median along a flattened version of the array.
A sequence of axes is supported since version 1.9.0.
out : ndarray, optional
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output,
but the type (of the output) will be cast if necessary.
overwrite_input : bool, optional
If True, then allow use of memory of input array `a` for
calculations. The input array will be modified by the call to
`median`. This will save memory when you do not need to preserve
the contents of the input array. Treat the input as undefined,
but it will probably be fully or partially sorted. Default is
False. If `overwrite_input` is ``True`` and `a` is not already an
`ndarray`, an error will be raised.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the original `arr`.
.. versionadded:: 1.9.0
Returns
-------
median : ndarray
A new array holding the result. If the input contains integers
or floats smaller than ``float64``, then the output data-type is
``np.float64``. Otherwise, the data-type of the output is the
same as that of the input. If `out` is specified, that array is
returned instead.
See Also
--------
mean, percentile
Notes
-----
Given a vector ``V`` of length ``N``, the median of ``V`` is the
middle value of a sorted copy of ``V``, ``V_sorted`` - i
e., ``V_sorted[(N-1)/2]``, when ``N`` is odd, and the average of the
two middle values of ``V_sorted`` when ``N`` is even.
Examples
--------
>>> a = np.array([[10, 7, 4], [3, 2, 1]])
>>> a
array([[10, 7, 4],
[ 3, 2, 1]])
>>> np.median(a)
3.5
>>> np.median(a, axis=0)
array([ 6.5, 4.5, 2.5])
>>> np.median(a, axis=1)
array([ 7., 2.])
>>> m = np.median(a, axis=0)
>>> out = np.zeros_like(m)
>>> np.median(a, axis=0, out=m)
array([ 6.5, 4.5, 2.5])
>>> m
array([ 6.5, 4.5, 2.5])
>>> b = a.copy()
>>> np.median(b, axis=1, overwrite_input=True)
array([ 7., 2.])
>>> assert not np.all(a==b)
>>> b = a.copy()
>>> np.median(b, axis=None, overwrite_input=True)
3.5
>>> assert not np.all(a==b)
"""
r, k = _ureduce(a, func=_median, axis=axis, out=out,
overwrite_input=overwrite_input)
if keepdims:
return r.reshape(k)
else:
return r
def _median(a, axis=None, out=None, overwrite_input=False):
# can't be reasonably be implemented in terms of percentile as we have to
# call mean to not break astropy
a = np.asanyarray(a)
# Set the partition indexes
if axis is None:
sz = a.size
else:
sz = a.shape[axis]
if sz % 2 == 0:
szh = sz // 2
kth = [szh - 1, szh]
else:
kth = [(sz - 1) // 2]
# Check if the array contains any nan's
if np.issubdtype(a.dtype, np.inexact):
kth.append(-1)
if overwrite_input:
if axis is None:
part = a.ravel()
part.partition(kth)
else:
a.partition(kth, axis=axis)
part = a
else:
part = partition(a, kth, axis=axis)
if part.shape == ():
# make 0-D arrays work
return part.item()
if axis is None:
axis = 0
indexer = [slice(None)] * part.ndim
index = part.shape[axis] // 2
if part.shape[axis] % 2 == 1:
# index with slice to allow mean (below) to work
indexer[axis] = slice(index, index+1)
else:
indexer[axis] = slice(index-1, index+1)
# Check if the array contains any nan's
if np.issubdtype(a.dtype, np.inexact) and sz > 0:
# warn and return nans like mean would
rout = mean(part[indexer], axis=axis, out=out)
part = np.rollaxis(part, axis, part.ndim)
n = np.isnan(part[..., -1])
if rout.ndim == 0:
if n == True:
warnings.warn("Invalid value encountered in median",
RuntimeWarning)
if out is not None:
out[...] = a.dtype.type(np.nan)
rout = out
else:
rout = a.dtype.type(np.nan)
elif np.count_nonzero(n.ravel()) > 0:
warnings.warn("Invalid value encountered in median for" +
" %d results" % np.count_nonzero(n.ravel()),
RuntimeWarning)
rout[n] = np.nan
return rout
else:
# if there are no nans
# Use mean in odd and even case to coerce data type
# and check, use out array.
return mean(part[indexer], axis=axis, out=out)
def percentile(a, q, axis=None, out=None,
overwrite_input=False, interpolation='linear', keepdims=False):
"""
Compute the qth percentile of the data along the specified axis.
Returns the qth percentile(s) of the array elements.
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
q : float in range of [0,100] (or sequence of floats)
Percentile to compute, which must be between 0 and 100 inclusive.
axis : {int, sequence of int, None}, optional
Axis or axes along which the percentiles are computed. The
default is to compute the percentile(s) along a flattened
version of the array. A sequence of axes is supported since
version 1.9.0.
out : ndarray, optional
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output,
but the type (of the output) will be cast if necessary.
overwrite_input : bool, optional
If True, then allow use of memory of input array `a`
calculations. The input array will be modified by the call to
`percentile`. This will save memory when you do not need to
preserve the contents of the input array. In this case you
should not make any assumptions about the contents of the input
`a` after this function completes -- treat it as undefined.
Default is False. If `a` is not already an array, this parameter
will have no effect as `a` will be converted to an array
internally regardless of the value of this parameter.
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
This optional parameter specifies the interpolation method to
use when the desired quantile lies between two data points
``i < j``:
* linear: ``i + (j - i) * fraction``, where ``fraction``
is the fractional part of the index surrounded by ``i``
and ``j``.
* lower: ``i``.
* higher: ``j``.
* nearest: ``i`` or ``j``, whichever is nearest.
* midpoint: ``(i + j) / 2``.
.. versionadded:: 1.9.0
keepdims : bool, optional
If this is set to True, the axes which are reduced are left in
the result as dimensions with size one. With this option, the
result will broadcast correctly against the original array `a`.
.. versionadded:: 1.9.0
Returns
-------
percentile : scalar or ndarray
If `q` is a single percentile and `axis=None`, then the result
is a scalar. If multiple percentiles are given, first axis of
the result corresponds to the percentiles. The other axes are
the axes that remain after the reduction of `a`. If the input
contains integers or floats smaller than ``float64``, the output
data-type is ``float64``. Otherwise, the output data-type is the
same as that of the input. If `out` is specified, that array is
returned instead.
See Also
--------
mean, median, nanpercentile
Notes
-----
Given a vector ``V`` of length ``N``, the ``q``-th percentile of
``V`` is the value ``q/100`` of the way from the mimumum to the
maximum in in a sorted copy of ``V``. The values and distances of
the two nearest neighbors as well as the `interpolation` parameter
will determine the percentile if the normalized ranking does not
match the location of ``q`` exactly. This function is the same as
the median if ``q=50``, the same as the minimum if ``q=0`` and the
same as the maximum if ``q=100``.
Examples
--------
>>> a = np.array([[10, 7, 4], [3, 2, 1]])
>>> a
array([[10, 7, 4],
[ 3, 2, 1]])
>>> np.percentile(a, 50)
3.5
>>> np.percentile(a, 50, axis=0)
array([[ 6.5, 4.5, 2.5]])
>>> np.percentile(a, 50, axis=1)
array([ 7., 2.])
>>> np.percentile(a, 50, axis=1, keepdims=True)
array([[ 7.],
[ 2.]])
>>> m = np.percentile(a, 50, axis=0)
>>> out = np.zeros_like(m)
>>> np.percentile(a, 50, axis=0, out=out)
array([[ 6.5, 4.5, 2.5]])
>>> m
array([[ 6.5, 4.5, 2.5]])
>>> b = a.copy()
>>> np.percentile(b, 50, axis=1, overwrite_input=True)
array([ 7., 2.])
>>> assert not np.all(a == b)
"""
q = array(q, dtype=np.float64, copy=True)
r, k = _ureduce(a, func=_percentile, q=q, axis=axis, out=out,
overwrite_input=overwrite_input,
interpolation=interpolation)
if keepdims:
if q.ndim == 0:
return r.reshape(k)
else:
return r.reshape([len(q)] + k)
else:
return r
def _percentile(a, q, axis=None, out=None,
overwrite_input=False, interpolation='linear', keepdims=False):
a = asarray(a)
if q.ndim == 0:
# Do not allow 0-d arrays because following code fails for scalar
zerod = True
q = q[None]
else:
zerod = False
# avoid expensive reductions, relevant for arrays with < O(1000) elements
if q.size < 10:
for i in range(q.size):
if q[i] < 0. or q[i] > 100.:
raise ValueError("Percentiles must be in the range [0,100]")
q[i] /= 100.
else:
# faster than any()
if np.count_nonzero(q < 0.) or np.count_nonzero(q > 100.):
raise ValueError("Percentiles must be in the range [0,100]")
q /= 100.
# prepare a for partioning
if overwrite_input:
if axis is None:
ap = a.ravel()
else:
ap = a
else:
if axis is None:
ap = a.flatten()
else:
ap = a.copy()
if axis is None:
axis = 0
Nx = ap.shape[axis]
indices = q * (Nx - 1)
# round fractional indices according to interpolation method
if interpolation == 'lower':
indices = floor(indices).astype(intp)
elif interpolation == 'higher':
indices = ceil(indices).astype(intp)
elif interpolation == 'midpoint':
indices = 0.5 * (floor(indices) + ceil(indices))
elif interpolation == 'nearest':
indices = around(indices).astype(intp)
elif interpolation == 'linear':
pass # keep index as fraction and interpolate
else:
raise ValueError(
"interpolation can only be 'linear', 'lower' 'higher', "
"'midpoint', or 'nearest'")
n = np.array(False, dtype=bool) # check for nan's flag
if indices.dtype == intp: # take the points along axis
# Check if the array contains any nan's
if np.issubdtype(a.dtype, np.inexact):
indices = concatenate((indices, [-1]))
ap.partition(indices, axis=axis)
# ensure axis with qth is first
ap = np.rollaxis(ap, axis, 0)
axis = 0
# Check if the array contains any nan's
if np.issubdtype(a.dtype, np.inexact):
indices = indices[:-1]
n = np.isnan(ap[-1:, ...])
if zerod:
indices = indices[0]
r = take(ap, indices, axis=axis, out=out)
else: # weight the points above and below the indices
indices_below = floor(indices).astype(intp)
indices_above = indices_below + 1
indices_above[indices_above > Nx - 1] = Nx - 1
# Check if the array contains any nan's
if np.issubdtype(a.dtype, np.inexact):
indices_above = concatenate((indices_above, [-1]))
weights_above = indices - indices_below
weights_below = 1.0 - weights_above
weights_shape = [1, ] * ap.ndim
weights_shape[axis] = len(indices)
weights_below.shape = weights_shape
weights_above.shape = weights_shape
ap.partition(concatenate((indices_below, indices_above)), axis=axis)
# ensure axis with qth is first
ap = np.rollaxis(ap, axis, 0)
weights_below = np.rollaxis(weights_below, axis, 0)
weights_above = np.rollaxis(weights_above, axis, 0)
axis = 0
# Check if the array contains any nan's
if np.issubdtype(a.dtype, np.inexact):
indices_above = indices_above[:-1]
n = np.isnan(ap[-1:, ...])
x1 = take(ap, indices_below, axis=axis) * weights_below
x2 = take(ap, indices_above, axis=axis) * weights_above
# ensure axis with qth is first
x1 = np.rollaxis(x1, axis, 0)
x2 = np.rollaxis(x2, axis, 0)
if zerod:
x1 = x1.squeeze(0)
x2 = x2.squeeze(0)
if out is not None:
r = add(x1, x2, out=out)
else:
r = add(x1, x2)
if np.any(n):
warnings.warn("Invalid value encountered in percentile",
RuntimeWarning)
if zerod:
if ap.ndim == 1:
if out is not None:
out[...] = a.dtype.type(np.nan)
r = out
else:
r = a.dtype.type(np.nan)
else:
r[..., n.squeeze(0)] = a.dtype.type(np.nan)
else:
if r.ndim == 1:
r[:] = a.dtype.type(np.nan)
else:
r[..., n.repeat(q.size, 0)] = a.dtype.type(np.nan)
return r
def trapz(y, x=None, dx=1.0, axis=-1):
"""
Integrate along the given axis using the composite trapezoidal rule.
Integrate `y` (`x`) along given axis.
Parameters
----------
y : array_like
Input array to integrate.
x : array_like, optional
The sample points corresponding to the `y` values. If `x` is None,
the sample points are assumed to be evenly spaced `dx` apart. The
default is None.
dx : scalar, optional
The spacing between sample points when `x` is None. The default is 1.
axis : int, optional
The axis along which to integrate.
Returns
-------
trapz : float
Definite integral as approximated by trapezoidal rule.
See Also
--------
sum, cumsum
Notes
-----
Image [2]_ illustrates trapezoidal rule -- y-axis locations of points
will be taken from `y` array, by default x-axis distances between
points will be 1.0, alternatively they can be provided with `x` array
or with `dx` scalar. Return value will be equal to combined area under
the red lines.
References
----------
.. [1] Wikipedia page: http://en.wikipedia.org/wiki/Trapezoidal_rule
.. [2] Illustration image:
http://en.wikipedia.org/wiki/File:Composite_trapezoidal_rule_illustration.png
Examples
--------
>>> np.trapz([1,2,3])
4.0
>>> np.trapz([1,2,3], x=[4,6,8])
8.0
>>> np.trapz([1,2,3], dx=2)
8.0
>>> a = np.arange(6).reshape(2, 3)
>>> a
array([[0, 1, 2],
[3, 4, 5]])
>>> np.trapz(a, axis=0)
array([ 1.5, 2.5, 3.5])
>>> np.trapz(a, axis=1)
array([ 2., 8.])
"""
y = asanyarray(y)
if x is None:
d = dx
else:
x = asanyarray(x)
if x.ndim == 1:
d = diff(x)
# reshape to correct shape
shape = [1]*y.ndim
shape[axis] = d.shape[0]
d = d.reshape(shape)
else:
d = diff(x, axis=axis)
nd = len(y.shape)
slice1 = [slice(None)]*nd
slice2 = [slice(None)]*nd
slice1[axis] = slice(1, None)
slice2[axis] = slice(None, -1)
try:
ret = (d * (y[slice1] + y[slice2]) / 2.0).sum(axis)
except ValueError:
# Operations didn't work, cast to ndarray
d = np.asarray(d)
y = np.asarray(y)
ret = add.reduce(d * (y[slice1]+y[slice2])/2.0, axis)
return ret
#always succeed
def add_newdoc(place, obj, doc):
"""
Adds documentation to obj which is in module place.
If doc is a string add it to obj as a docstring
If doc is a tuple, then the first element is interpreted as
an attribute of obj and the second as the docstring
(method, docstring)
If doc is a list, then each element of the list should be a
sequence of length two --> [(method1, docstring1),
(method2, docstring2), ...]
This routine never raises an error.
This routine cannot modify read-only docstrings, as appear
in new-style classes or built-in functions. Because this
routine never raises an error the caller must check manually
that the docstrings were changed.
"""
try:
new = getattr(__import__(place, globals(), {}, [obj]), obj)
if isinstance(doc, str):
add_docstring(new, doc.strip())
elif isinstance(doc, tuple):
add_docstring(getattr(new, doc[0]), doc[1].strip())
elif isinstance(doc, list):
for val in doc:
add_docstring(getattr(new, val[0]), val[1].strip())
except:
pass
# Based on scitools meshgrid
def meshgrid(*xi, **kwargs):
"""
Return coordinate matrices from coordinate vectors.
Make N-D coordinate arrays for vectorized evaluations of
N-D scalar/vector fields over N-D grids, given
one-dimensional coordinate arrays x1, x2,..., xn.
.. versionchanged:: 1.9
1-D and 0-D cases are allowed.
Parameters
----------
x1, x2,..., xn : array_like
1-D arrays representing the coordinates of a grid.
indexing : {'xy', 'ij'}, optional
Cartesian ('xy', default) or matrix ('ij') indexing of output.
See Notes for more details.
.. versionadded:: 1.7.0
sparse : bool, optional
If True a sparse grid is returned in order to conserve memory.
Default is False.
.. versionadded:: 1.7.0
copy : bool, optional
If False, a view into the original arrays are returned in order to
conserve memory. Default is True. Please note that
``sparse=False, copy=False`` will likely return non-contiguous
arrays. Furthermore, more than one element of a broadcast array
may refer to a single memory location. If you need to write to the
arrays, make copies first.
.. versionadded:: 1.7.0
Returns
-------
X1, X2,..., XN : ndarray
For vectors `x1`, `x2`,..., 'xn' with lengths ``Ni=len(xi)`` ,
return ``(N1, N2, N3,...Nn)`` shaped arrays if indexing='ij'
or ``(N2, N1, N3,...Nn)`` shaped arrays if indexing='xy'
with the elements of `xi` repeated to fill the matrix along
the first dimension for `x1`, the second for `x2` and so on.
Notes
-----
This function supports both indexing conventions through the indexing
keyword argument. Giving the string 'ij' returns a meshgrid with
matrix indexing, while 'xy' returns a meshgrid with Cartesian indexing.
In the 2-D case with inputs of length M and N, the outputs are of shape
(N, M) for 'xy' indexing and (M, N) for 'ij' indexing. In the 3-D case
with inputs of length M, N and P, outputs are of shape (N, M, P) for
'xy' indexing and (M, N, P) for 'ij' indexing. The difference is
illustrated by the following code snippet::
xv, yv = meshgrid(x, y, sparse=False, indexing='ij')
for i in range(nx):
for j in range(ny):
# treat xv[i,j], yv[i,j]
xv, yv = meshgrid(x, y, sparse=False, indexing='xy')
for i in range(nx):
for j in range(ny):
# treat xv[j,i], yv[j,i]
In the 1-D and 0-D case, the indexing and sparse keywords have no effect.
See Also
--------
index_tricks.mgrid : Construct a multi-dimensional "meshgrid"
using indexing notation.
index_tricks.ogrid : Construct an open multi-dimensional "meshgrid"
using indexing notation.
Examples
--------
>>> nx, ny = (3, 2)
>>> x = np.linspace(0, 1, nx)
>>> y = np.linspace(0, 1, ny)
>>> xv, yv = meshgrid(x, y)
>>> xv
array([[ 0. , 0.5, 1. ],
[ 0. , 0.5, 1. ]])
>>> yv
array([[ 0., 0., 0.],
[ 1., 1., 1.]])
>>> xv, yv = meshgrid(x, y, sparse=True) # make sparse output arrays
>>> xv
array([[ 0. , 0.5, 1. ]])
>>> yv
array([[ 0.],
[ 1.]])
`meshgrid` is very useful to evaluate functions on a grid.
>>> x = np.arange(-5, 5, 0.1)
>>> y = np.arange(-5, 5, 0.1)
>>> xx, yy = meshgrid(x, y, sparse=True)
>>> z = np.sin(xx**2 + yy**2) / (xx**2 + yy**2)
>>> h = plt.contourf(x,y,z)
"""
ndim = len(xi)
copy_ = kwargs.pop('copy', True)
sparse = kwargs.pop('sparse', False)
indexing = kwargs.pop('indexing', 'xy')
if kwargs:
raise TypeError("meshgrid() got an unexpected keyword argument '%s'"
% (list(kwargs)[0],))
if indexing not in ['xy', 'ij']:
raise ValueError(
"Valid values for `indexing` are 'xy' and 'ij'.")
s0 = (1,) * ndim
output = [np.asanyarray(x).reshape(s0[:i] + (-1,) + s0[i + 1::])
for i, x in enumerate(xi)]
shape = [x.size for x in output]
if indexing == 'xy' and ndim > 1:
# switch first and second axis
output[0].shape = (1, -1) + (1,)*(ndim - 2)
output[1].shape = (-1, 1) + (1,)*(ndim - 2)
shape[0], shape[1] = shape[1], shape[0]
if sparse:
if copy_:
return [x.copy() for x in output]
else:
return output
else:
# Return the full N-D matrix (not only the 1-D vector)
if copy_:
mult_fact = np.ones(shape, dtype=int)
return [x * mult_fact for x in output]
else:
return np.broadcast_arrays(*output)
def delete(arr, obj, axis=None):
"""
Return a new array with sub-arrays along an axis deleted. For a one
dimensional array, this returns those entries not returned by
`arr[obj]`.
Parameters
----------
arr : array_like
Input array.
obj : slice, int or array of ints
Indicate which sub-arrays to remove.
axis : int, optional
The axis along which to delete the subarray defined by `obj`.
If `axis` is None, `obj` is applied to the flattened array.
Returns
-------
out : ndarray
A copy of `arr` with the elements specified by `obj` removed. Note
that `delete` does not occur in-place. If `axis` is None, `out` is
a flattened array.
See Also
--------
insert : Insert elements into an array.
append : Append elements at the end of an array.
Notes
-----
Often it is preferable to use a boolean mask. For example:
>>> mask = np.ones(len(arr), dtype=bool)
>>> mask[[0,2,4]] = False
>>> result = arr[mask,...]
Is equivalent to `np.delete(arr, [0,2,4], axis=0)`, but allows further
use of `mask`.
Examples
--------
>>> arr = np.array([[1,2,3,4], [5,6,7,8], [9,10,11,12]])
>>> arr
array([[ 1, 2, 3, 4],
[ 5, 6, 7, 8],
[ 9, 10, 11, 12]])
>>> np.delete(arr, 1, 0)
array([[ 1, 2, 3, 4],
[ 9, 10, 11, 12]])
>>> np.delete(arr, np.s_[::2], 1)
array([[ 2, 4],
[ 6, 8],
[10, 12]])
>>> np.delete(arr, [1,3,5], None)
array([ 1, 3, 5, 7, 8, 9, 10, 11, 12])
"""
wrap = None
if type(arr) is not ndarray:
try:
wrap = arr.__array_wrap__
except AttributeError:
pass
arr = asarray(arr)
ndim = arr.ndim
arrorder = 'F' if arr.flags.fnc else 'C'
if axis is None:
if ndim != 1:
arr = arr.ravel()
ndim = arr.ndim
axis = ndim - 1
if ndim == 0:
# 2013-09-24, 1.9
warnings.warn(
"in the future the special handling of scalars will be removed "
"from delete and raise an error", DeprecationWarning)
if wrap:
return wrap(arr)
else:
return arr.copy()
slobj = [slice(None)]*ndim
N = arr.shape[axis]
newshape = list(arr.shape)
if isinstance(obj, slice):
start, stop, step = obj.indices(N)
xr = range(start, stop, step)
numtodel = len(xr)
if numtodel <= 0:
if wrap:
return wrap(arr.copy())
else:
return arr.copy()
# Invert if step is negative:
if step < 0:
step = -step
start = xr[-1]
stop = xr[0] + 1
newshape[axis] -= numtodel
new = empty(newshape, arr.dtype, arrorder)
# copy initial chunk
if start == 0:
pass
else:
slobj[axis] = slice(None, start)
new[slobj] = arr[slobj]
# copy end chunck
if stop == N:
pass
else:
slobj[axis] = slice(stop-numtodel, None)
slobj2 = [slice(None)]*ndim
slobj2[axis] = slice(stop, None)
new[slobj] = arr[slobj2]
# copy middle pieces
if step == 1:
pass
else: # use array indexing.
keep = ones(stop-start, dtype=bool)
keep[:stop-start:step] = False
slobj[axis] = slice(start, stop-numtodel)
slobj2 = [slice(None)]*ndim
slobj2[axis] = slice(start, stop)
arr = arr[slobj2]
slobj2[axis] = keep
new[slobj] = arr[slobj2]
if wrap:
return wrap(new)
else:
return new
_obj = obj
obj = np.asarray(obj)
# After removing the special handling of booleans and out of
# bounds values, the conversion to the array can be removed.
if obj.dtype == bool:
warnings.warn(
"in the future insert will treat boolean arrays and array-likes "
"as boolean index instead of casting it to integer", FutureWarning)
obj = obj.astype(intp)
if isinstance(_obj, (int, long, integer)):
# optimization for a single value
obj = obj.item()
if (obj < -N or obj >= N):
raise IndexError(
"index %i is out of bounds for axis %i with "
"size %i" % (obj, axis, N))
if (obj < 0):
obj += N
newshape[axis] -= 1
new = empty(newshape, arr.dtype, arrorder)
slobj[axis] = slice(None, obj)
new[slobj] = arr[slobj]
slobj[axis] = slice(obj, None)
slobj2 = [slice(None)]*ndim
slobj2[axis] = slice(obj+1, None)
new[slobj] = arr[slobj2]
else:
if obj.size == 0 and not isinstance(_obj, np.ndarray):
obj = obj.astype(intp)
if not np.can_cast(obj, intp, 'same_kind'):
# obj.size = 1 special case always failed and would just
# give superfluous warnings.
# 2013-09-24, 1.9
warnings.warn(
"using a non-integer array as obj in delete will result in an "
"error in the future", DeprecationWarning)
obj = obj.astype(intp)
keep = ones(N, dtype=bool)
# Test if there are out of bound indices, this is deprecated
inside_bounds = (obj < N) & (obj >= -N)
if not inside_bounds.all():
# 2013-09-24, 1.9
warnings.warn(
"in the future out of bounds indices will raise an error "
"instead of being ignored by `numpy.delete`.",
DeprecationWarning)
obj = obj[inside_bounds]
positive_indices = obj >= 0
if not positive_indices.all():
warnings.warn(
"in the future negative indices will not be ignored by "
"`numpy.delete`.", FutureWarning)
obj = obj[positive_indices]
keep[obj, ] = False
slobj[axis] = keep
new = arr[slobj]
if wrap:
return wrap(new)
else:
return new
def insert(arr, obj, values, axis=None):
"""
Insert values along the given axis before the given indices.
Parameters
----------
arr : array_like
Input array.
obj : int, slice or sequence of ints
Object that defines the index or indices before which `values` is
inserted.
.. versionadded:: 1.8.0
Support for multiple insertions when `obj` is a single scalar or a
sequence with one element (similar to calling insert multiple
times).
values : array_like
Values to insert into `arr`. If the type of `values` is different
from that of `arr`, `values` is converted to the type of `arr`.
`values` should be shaped so that ``arr[...,obj,...] = values``
is legal.
axis : int, optional
Axis along which to insert `values`. If `axis` is None then `arr`
is flattened first.
Returns
-------
out : ndarray
A copy of `arr` with `values` inserted. Note that `insert`
does not occur in-place: a new array is returned. If
`axis` is None, `out` is a flattened array.
See Also
--------
append : Append elements at the end of an array.
concatenate : Join a sequence of arrays along an existing axis.
delete : Delete elements from an array.
Notes
-----
Note that for higher dimensional inserts `obj=0` behaves very different
from `obj=[0]` just like `arr[:,0,:] = values` is different from
`arr[:,[0],:] = values`.
Examples
--------
>>> a = np.array([[1, 1], [2, 2], [3, 3]])
>>> a
array([[1, 1],
[2, 2],
[3, 3]])
>>> np.insert(a, 1, 5)
array([1, 5, 1, 2, 2, 3, 3])
>>> np.insert(a, 1, 5, axis=1)
array([[1, 5, 1],
[2, 5, 2],
[3, 5, 3]])
Difference between sequence and scalars:
>>> np.insert(a, [1], [[1],[2],[3]], axis=1)
array([[1, 1, 1],
[2, 2, 2],
[3, 3, 3]])
>>> np.array_equal(np.insert(a, 1, [1, 2, 3], axis=1),
... np.insert(a, [1], [[1],[2],[3]], axis=1))
True
>>> b = a.flatten()
>>> b
array([1, 1, 2, 2, 3, 3])
>>> np.insert(b, [2, 2], [5, 6])
array([1, 1, 5, 6, 2, 2, 3, 3])
>>> np.insert(b, slice(2, 4), [5, 6])
array([1, 1, 5, 2, 6, 2, 3, 3])
>>> np.insert(b, [2, 2], [7.13, False]) # type casting
array([1, 1, 7, 0, 2, 2, 3, 3])
>>> x = np.arange(8).reshape(2, 4)
>>> idx = (1, 3)
>>> np.insert(x, idx, 999, axis=1)
array([[ 0, 999, 1, 2, 999, 3],
[ 4, 999, 5, 6, 999, 7]])
"""
wrap = None
if type(arr) is not ndarray:
try:
wrap = arr.__array_wrap__
except AttributeError:
pass
arr = asarray(arr)
ndim = arr.ndim
arrorder = 'F' if arr.flags.fnc else 'C'
if axis is None:
if ndim != 1:
arr = arr.ravel()
ndim = arr.ndim
axis = ndim - 1
else:
if ndim > 0 and (axis < -ndim or axis >= ndim):
raise IndexError(
"axis %i is out of bounds for an array of "
"dimension %i" % (axis, ndim))
if (axis < 0):
axis += ndim
if (ndim == 0):
# 2013-09-24, 1.9
warnings.warn(
"in the future the special handling of scalars will be removed "
"from insert and raise an error", DeprecationWarning)
arr = arr.copy()
arr[...] = values
if wrap:
return wrap(arr)
else:
return arr
slobj = [slice(None)]*ndim
N = arr.shape[axis]
newshape = list(arr.shape)
if isinstance(obj, slice):
# turn it into a range object
indices = arange(*obj.indices(N), **{'dtype': intp})
else:
# need to copy obj, because indices will be changed in-place
indices = np.array(obj)
if indices.dtype == bool:
# See also delete
warnings.warn(
"in the future insert will treat boolean arrays and "
"array-likes as a boolean index instead of casting it to "
"integer", FutureWarning)
indices = indices.astype(intp)
# Code after warning period:
#if obj.ndim != 1:
# raise ValueError('boolean array argument obj to insert '
# 'must be one dimensional')
#indices = np.flatnonzero(obj)
elif indices.ndim > 1:
raise ValueError(
"index array argument obj to insert must be one dimensional "
"or scalar")
if indices.size == 1:
index = indices.item()
if index < -N or index > N:
raise IndexError(
"index %i is out of bounds for axis %i with "
"size %i" % (obj, axis, N))
if (index < 0):
index += N
# There are some object array corner cases here, but we cannot avoid
# that:
values = array(values, copy=False, ndmin=arr.ndim, dtype=arr.dtype)
if indices.ndim == 0:
# broadcasting is very different here, since a[:,0,:] = ... behaves
# very different from a[:,[0],:] = ...! This changes values so that
# it works likes the second case. (here a[:,0:1,:])
values = np.rollaxis(values, 0, (axis % values.ndim) + 1)
numnew = values.shape[axis]
newshape[axis] += numnew
new = empty(newshape, arr.dtype, arrorder)
slobj[axis] = slice(None, index)
new[slobj] = arr[slobj]
slobj[axis] = slice(index, index+numnew)
new[slobj] = values
slobj[axis] = slice(index+numnew, None)
slobj2 = [slice(None)] * ndim
slobj2[axis] = slice(index, None)
new[slobj] = arr[slobj2]
if wrap:
return wrap(new)
return new
elif indices.size == 0 and not isinstance(obj, np.ndarray):
# Can safely cast the empty list to intp
indices = indices.astype(intp)
if not np.can_cast(indices, intp, 'same_kind'):
# 2013-09-24, 1.9
warnings.warn(
"using a non-integer array as obj in insert will result in an "
"error in the future", DeprecationWarning)
indices = indices.astype(intp)
indices[indices < 0] += N
numnew = len(indices)
order = indices.argsort(kind='mergesort') # stable sort
indices[order] += np.arange(numnew)
newshape[axis] += numnew
old_mask = ones(newshape[axis], dtype=bool)
old_mask[indices] = False
new = empty(newshape, arr.dtype, arrorder)
slobj2 = [slice(None)]*ndim
slobj[axis] = indices
slobj2[axis] = old_mask
new[slobj] = values
new[slobj2] = arr
if wrap:
return wrap(new)
return new
def append(arr, values, axis=None):
"""
Append values to the end of an array.
Parameters
----------
arr : array_like
Values are appended to a copy of this array.
values : array_like
These values are appended to a copy of `arr`. It must be of the
correct shape (the same shape as `arr`, excluding `axis`). If
`axis` is not specified, `values` can be any shape and will be
flattened before use.
axis : int, optional
The axis along which `values` are appended. If `axis` is not
given, both `arr` and `values` are flattened before use.
Returns
-------
append : ndarray
A copy of `arr` with `values` appended to `axis`. Note that
`append` does not occur in-place: a new array is allocated and
filled. If `axis` is None, `out` is a flattened array.
See Also
--------
insert : Insert elements into an array.
delete : Delete elements from an array.
Examples
--------
>>> np.append([1, 2, 3], [[4, 5, 6], [7, 8, 9]])
array([1, 2, 3, 4, 5, 6, 7, 8, 9])
When `axis` is specified, `values` must have the correct shape.
>>> np.append([[1, 2, 3], [4, 5, 6]], [[7, 8, 9]], axis=0)
array([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]])
>>> np.append([[1, 2, 3], [4, 5, 6]], [7, 8, 9], axis=0)
Traceback (most recent call last):
...
ValueError: arrays must have same number of dimensions
"""
arr = asanyarray(arr)
if axis is None:
if arr.ndim != 1:
arr = arr.ravel()
values = ravel(values)
axis = arr.ndim-1
return concatenate((arr, values), axis=axis)
|
{
"content_hash": "f2b01771e99622e8b50bac6b6044c7a9",
"timestamp": "",
"source": "github",
"line_count": 4383,
"max_line_length": 99,
"avg_line_length": 32.92379648642482,
"alnum_prop": 0.5716364644329718,
"repo_name": "LumPenPacK/NetworkExtractionFromImages",
"id": "6a64ebe85402b156d01cc0e12f9faa64f098c550",
"size": "144305",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "osx_build/nefi2_osx_amd64_xcode_2015/site-packages/numpy_1.11/numpy/lib/function_base.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1577"
},
{
"name": "C",
"bytes": "3035840"
},
{
"name": "C++",
"bytes": "147394619"
},
{
"name": "CMake",
"bytes": "603"
},
{
"name": "CSS",
"bytes": "4298"
},
{
"name": "FORTRAN",
"bytes": "14321"
},
{
"name": "HTML",
"bytes": "41126"
},
{
"name": "Lex",
"bytes": "20920"
},
{
"name": "Makefile",
"bytes": "350419"
},
{
"name": "Python",
"bytes": "25507066"
},
{
"name": "QMake",
"bytes": "22941"
},
{
"name": "Shell",
"bytes": "19080"
},
{
"name": "Yacc",
"bytes": "248826"
}
],
"symlink_target": ""
}
|
from http import HTTPStatus
class ResponseError(Exception):
def __init__(self, message=None, status: HTTPStatus=None, *, body=None, headers=None,
correlation_id=None, reason=None, content_type=None, log=False):
super().__init__(message)
self.message = message
if hasattr(self, 'status') and status is None:
status = self.status
if hasattr(self, 'body') and body is None:
body = self.body
if hasattr(self, 'reason') and reason is None:
reason = self.reason
if hasattr(self, 'log') and log is False:
log = self.log
if hasattr(self, 'content_type') and content_type is None:
content_type = self.content_type
if reason and not body:
body = {'reason': reason}
self.status = status
self.body = body
self.log = log
self.headers = headers
self.correlation_id = correlation_id
self.reason = reason
self.content_type = content_type
class ParseError(ResponseError):
status = HTTPStatus.BAD_REQUEST
def __init__(self, reason):
"""
Example
raise ParseError("Invalid JSON")
"""
super().__init__(message=reason, reason=reason)
class UnsupportedMediaType(ResponseError):
status = HTTPStatus.UNSUPPORTED_MEDIA_TYPE
def __init__(self, content_type):
super().__init__(
message=content_type,
reason=f'Unsupported media type "{content_type}" in request'
)
class NotRoutableError(ResponseError):
status = HTTPStatus.NOT_FOUND
reason = 'No route found'
|
{
"content_hash": "60acc4b8561e1992cfa7accb402d41d7",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 89,
"avg_line_length": 30.77777777777778,
"alnum_prop": 0.5974729241877257,
"repo_name": "wasp/waspy",
"id": "b77cd1a59979ddfc682e3244d097c4bdaaa89014",
"size": "1662",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "waspy/exceptions.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "101203"
}
],
"symlink_target": ""
}
|
"""Integration test program for Subpar.
Test bootstrap interaction with __future__ imports and source file encodings.
"""
# Test __future__ imports
from __future__ import print_function
# Test the source file encoding specification above. See PEP 263 for
# details. In the line below, this source file contains a byte
# sequence that is valid latin-1 but not valid utf-8. Specifically,
# between the two single quotes is a single byte 0xE4 (latin-1
# encoding of LATIN SMALL LETTER A WITH DIAERESIS), and _not_ the
# two-byte UTF-8 sequence 0xC3 0xA4.
latin_1_bytes = u'ä'
assert len(latin_1_bytes) == 1
assert ord(latin_1_bytes[0]) == 0xE4
|
{
"content_hash": "8a3d828280ce074d8cb69f0c71d1e90c",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 77,
"avg_line_length": 36,
"alnum_prop": 0.7376543209876543,
"repo_name": "google/subpar",
"id": "03ef2d6bd6a9865fa3a49e658a7e20d69fbbc54e",
"size": "747",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/package_boilerplate/main.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "911"
},
{
"name": "Python",
"bytes": "86530"
},
{
"name": "Shell",
"bytes": "8952"
},
{
"name": "Starlark",
"bytes": "25116"
}
],
"symlink_target": ""
}
|
import os
import unittest
from mi.core.log import get_logger
from mi.dataset.dataset_driver import ParticleDataHandler
from mi.dataset.driver.parad_j.cspp.parad_j_cspp_recovered_driver import parse
from mi.dataset.driver.parad_j.cspp.resource import RESOURCE_PATH
__author__ = 'Joe Padula'
log = get_logger()
class SampleTest(unittest.TestCase):
def test_one(self):
source_file_path = os.path.join(RESOURCE_PATH, '11079364_PPB_PARS.txt')
particle_data_handler = ParticleDataHandler()
particle_data_handler = parse(None, source_file_path, particle_data_handler)
log.debug("SAMPLES: %s", particle_data_handler._samples)
log.debug("FAILURE: %s", particle_data_handler._failure)
self.assertEquals(particle_data_handler._failure, False)
if __name__ == '__main__':
test = SampleTest('test_one')
test.test_one()
|
{
"content_hash": "4f055ab71a4ba5ca3cf2da11656f9199",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 84,
"avg_line_length": 28.29032258064516,
"alnum_prop": 0.7058152793614595,
"repo_name": "vipullakhani/mi-instrument",
"id": "f5e8ad1e781b02f4efaf5f62d94ad22ea8977fc1",
"size": "877",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "mi/dataset/driver/parad_j/cspp/test/test_parad_j_cspp_recovered_driver.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "HTML",
"bytes": "4746"
},
{
"name": "Python",
"bytes": "9968191"
}
],
"symlink_target": ""
}
|
"""
"""
import py
def cmdexec(cmd):
import os, sys
import subprocess
from subprocess import Popen, PIPE
""" return unicode output of executing 'cmd' in a separate process.
raise cmdexec.Error exeception if the command failed.
the exception will provide an 'err' attribute containing
the error-output from the command.
if the subprocess module does not provide a proper encoding/unicode strings
sys.getdefaultencoding() will be used, if that does not exist, 'UTF-8'.
"""
process = subprocess.Popen(cmd, shell=True,
universal_newlines=True,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = process.communicate()
if sys.version_info[0] < 3: # on py3 we get unicode strings, on py2 not
try:
default_encoding = sys.getdefaultencoding() # jython may not have it
except AttributeError:
default_encoding = sys.stdout.encoding or 'UTF-8'
out = unicode(out, process.stdout.encoding or default_encoding)
err = unicode(err, process.stderr.encoding or default_encoding)
status = process.poll()
if status:
raise ExecutionFailed(status, status, cmd, out, err)
return out
class ExecutionFailed(py.error.Error):
def __init__(self, status, systemstatus, cmd, out, err):
Exception.__init__(self)
self.status = status
self.systemstatus = systemstatus
self.cmd = cmd
self.err = err
self.out = out
def __str__(self):
return "ExecutionFailed: %d %s\n%s" %(self.status, self.cmd, self.err)
# export the exception under the name 'py.process.cmdexec.Error'
cmdexec.Error = ExecutionFailed
try:
ExecutionFailed.__module__ = 'py.process.cmdexec'
ExecutionFailed.__name__ = 'Error'
except (AttributeError, TypeError):
pass
|
{
"content_hash": "e6f69d241b918f49504f379b573a9773",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 80,
"avg_line_length": 34.092592592592595,
"alnum_prop": 0.6632265073329712,
"repo_name": "mikewesner-wf/glasshouse",
"id": "98e7ed4089b28690212ff31007a8a2aafd2759c5",
"size": "1841",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "appengine/lib/py/_process/cmdexec.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "1449"
},
{
"name": "CSS",
"bytes": "490924"
},
{
"name": "JavaScript",
"bytes": "854636"
},
{
"name": "Python",
"bytes": "5578834"
},
{
"name": "Shell",
"bytes": "215"
}
],
"symlink_target": ""
}
|
'''Load the latest update for a Twitter user and leave it in an XHTML fragment'''
__author__ = 'dewitt@google.com'
import codecs
import getopt
import sys
import twitter
TEMPLATE = """
<div class="twitter">
<span class="twitter-user"><a href="http://twitter.com/%s">Twitter</a>: </span>
<span class="twitter-text">%s</span>
<span class="twitter-relative-created-at"><a href="http://twitter.com/%s/statuses/%s">Posted %s</a></span>
</div>
"""
def Usage():
print 'Usage: %s [options] twitterid' % __file__
print
print ' This script fetches a users latest twitter update and stores'
print ' the result in a file as an XHTML fragment'
print
print ' Options:'
print ' --help -h : print this help'
print ' --output : the output file [default: stdout]'
def FetchTwitter(user, output):
assert user
statuses = twitter.Api().GetUserTimeline(id=user, count=1)
s = statuses[0]
xhtml = TEMPLATE % (s.user.screen_name, s.text, s.user.screen_name, s.id, s.relative_created_at)
if output:
Save(xhtml, output)
else:
print xhtml
def Save(xhtml, output):
out = codecs.open(output, mode='w', encoding='ascii',
errors='xmlcharrefreplace')
out.write(xhtml)
out.close()
def main():
try:
opts, args = getopt.gnu_getopt(sys.argv[1:], 'ho', ['help', 'output='])
except getopt.GetoptError:
Usage()
sys.exit(2)
try:
user = args[0]
except:
Usage()
sys.exit(2)
output = None
for o, a in opts:
if o in ("-h", "--help"):
Usage()
sys.exit(2)
if o in ("-o", "--output"):
output = a
FetchTwitter(user, output)
if __name__ == "__main__":
main()
|
{
"content_hash": "3029fd90281831adc64121b56a551332",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 108,
"avg_line_length": 24.791044776119403,
"alnum_prop": 0.6225165562913907,
"repo_name": "MosheBerman/brisket-mashup",
"id": "8a386fd586430acb7b2a81d5f71bb41df6edad73",
"size": "1684",
"binary": false,
"copies": "10",
"ref": "refs/heads/master",
"path": "source/libraries/python-twitter-1.1/examples/twitter-to-xhtml.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "111593"
},
{
"name": "Python",
"bytes": "1368803"
},
{
"name": "Shell",
"bytes": "5115"
}
],
"symlink_target": ""
}
|
"""
Class-based, modern views for elephantblog
==========================================
Add the following code to ``settings.py`` if you want to integrate Elephantblog
through ApplicationContent::
def elephantblog_entry_url_app(self):
from feincms.apps import app_reverse
return app_reverse(
'elephantblog_entry_detail',
'elephantblog',
kwargs={
'year': self.published_on.strftime('%Y'),
'month': self.published_on.strftime('%m'),
'day': self.published_on.strftime('%d'),
'slug': self.slug,
})
def elephantblog_categorytranslation_url_app(self):
from feincms.apps import app_reverse
return app_reverse(
'elephantblog_category_detail',
'elephantblog',
kwargs={
'slug': self.slug,
})
ABSOLUTE_URL_OVERRIDES = {
'elephantblog.entry': elephantblog_entry_url_app,
'elephantblog.categorytranslation':\
elephantblog_categorytranslation_url_app,
}
NOTE! You need to register the app as follows for the application content
snippet::
Page.create_content_type(ApplicationContent, APPLICATIONS=(
('elephantblog', _('Blog'), {'urls': 'elephantblog.urls'}),
))
"""
from __future__ import absolute_import, unicode_literals
from django.conf.urls import patterns, url
from elephantblog.feeds import EntryFeed
from elephantblog import views
def elephantblog_patterns(list_kwargs={}, detail_kwargs={}):
"""
Returns an instance of ready-to-use URL patterns for the blog.
In the future, we will have a few configuration parameters here:
- A parameter to specify a custom mixin for all view classes (or for
list / detail view classes?)
- Parameters to specify the language handling (probably some initialization
arguments for the ``as_view`` methods)
- The format of the month (three chars or two digits)
- etc.
"""
return patterns(
'',
url(r'^feed/$', EntryFeed()),
url(r'^$',
views.ArchiveIndexView.as_view(**list_kwargs),
name='elephantblog_entry_archive'),
url(r'^(?P<year>\d{4})/$',
views.YearArchiveView.as_view(**list_kwargs),
name='elephantblog_entry_archive_year'),
url(r'^(?P<year>\d{4})/(?P<month>\d{2})/$',
views.MonthArchiveView.as_view(**list_kwargs),
name='elephantblog_entry_archive_month'),
url(r'^(?P<year>\d{4})/(?P<month>\d{2})/(?P<day>\d{2})/$',
views.DayArchiveView.as_view(**list_kwargs),
name='elephantblog_entry_archive_day'),
url(r'^(?P<year>\d{4})/(?P<month>\d{2})/(?P<day>\d{2})/'
r'(?P<slug>[-\w]+)/$',
views.DateDetailView.as_view(**detail_kwargs),
name='elephantblog_entry_detail'),
url(r'^category/(?P<slug>[-\w]+)/$',
views.CategoryArchiveIndexView.as_view(**list_kwargs),
name='elephantblog_category_detail'),
)
# Backwards compatibility: Create a URL patterns object with the default
# configuration
urlpatterns = elephantblog_patterns()
|
{
"content_hash": "3954680a5cd10b6b7c75f6317f877b43",
"timestamp": "",
"source": "github",
"line_count": 93,
"max_line_length": 79,
"avg_line_length": 34.55913978494624,
"alnum_prop": 0.596764156813939,
"repo_name": "sbaechler/feincms-elephantblog",
"id": "8a92e5b4daa74a2070f1ce88200c51195f9c3f8e",
"size": "3214",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "elephantblog/urls.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "7961"
},
{
"name": "Python",
"bytes": "83176"
},
{
"name": "Shell",
"bytes": "111"
}
],
"symlink_target": ""
}
|
import os
import sys
import hashlib
import commands
import threading
from six.moves import _thread, range, queue
import six
Lock = threading.Lock()
class WorkerPool(object):
def __init__(self, func, nworker=1):
self.nworker = nworker
self.func = func
self.queue = queue.Queue()
def start(self):
for __ in range(self.nworker):
_thread.start_new_thread(self.do_work, tuple())
def add_task(self, msg):
self.queue.put(msg)
def do_work(self):
while True:
msg = self.queue.get()
self.func(msg)
class TTS(object):
_instance = None
_inited = False
def __init__(self, config, method):
if TTS._inited:
return
print("Init singleton TTS")
TTS._inited = True
self.__ttsdriver = None
self.__pool = WorkerPool(self.__mplayer)
self.__pool.start()
if config == None:
config = {}
if method == 'baidu':
import baidutts
self.__ttsdriver = baidutts.BaiduTTS(config.get('apikey', ""),
config.get('secretkey', ""),
config.get('speed', 5),
config.get('pitch', 9),
config.get('volume', 9),
config.get('person', 3))
if method == 'iflytek':
import iflytek
self.__ttsdriver = iflytek.iflytekTTS(config.get('appid', '59b4d5d4'),
config.get('voice_name', 'xiaowanzi'),
config.get('speed', 50),
config.get('volume', 50),
config.get('pitch', 50))
def __new__(cls, *args, **kw):
if not cls._instance:
try:
Lock.acquire()
if not cls._instance:
cls._instance = super(TTS, cls).__new__(cls, *args, **kw)
finally:
Lock.release()
return cls._instance
def __text2tts(self, message):
filename = self.__md5sum(message)
return self.__ttsdriver.get_tts_audio(message, filename, 'zh')
def __md5sum(self, contents):
hash = hashlib.md5()
hash.update(contents)
return hash.hexdigest()
def __mplayer(self, msg):
f = msg[0]
st, output = commands.getstatusoutput('mplayer -really-quiet -noconsolecontrols -volume 90 -speed 0.9 {}'.format(f))
if st != 0:
print('mplayer output:\n {}'.format(output))
def __add_to_mplayer(self, f):
self.__pool.add_task((f, ''))
def text2play(self, message):
t, f = self.__text2tts(message)
#self.__add_to_mplayer(f)
return f
|
{
"content_hash": "1062d529b0bce0712a295b4781932137",
"timestamp": "",
"source": "github",
"line_count": 91,
"max_line_length": 124,
"avg_line_length": 32.54945054945055,
"alnum_prop": 0.47299122214719785,
"repo_name": "pengzhangdev/slackbot",
"id": "8e8e3ae2b34bf2ed8196e9597d4d161f1ee8e346",
"size": "3151",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "slackbot/plugins/component/ttsdriver/ttsdriver.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "288383"
},
{
"name": "Shell",
"bytes": "3443"
}
],
"symlink_target": ""
}
|
from setuptools import setup, find_packages
import os
import cms
CLASSIFIERS = [
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries :: Application Frameworks',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Framework :: Django',
'Framework :: Django :: 1.8',
'Framework :: Django :: 1.9',
]
INSTALL_REQUIREMENTS = [
'Django>=1.8,<1.10',
'django-classy-tags>=0.7.2',
'django-formtools>=1.0',
'django-treebeard>=4.0.1',
'django-sekizai>=0.7',
'djangocms-admin-style>=1.0',
]
setup(
author='Patrick Lauber',
author_email='digi@treepy.com',
name='django-cms',
version=cms.__version__,
description='An Advanced Django CMS',
long_description=open(os.path.join(os.path.dirname(__file__), 'README.rst')).read(),
url='https://www.django-cms.org/',
license='BSD License',
platforms=['OS Independent'],
classifiers=CLASSIFIERS,
install_requires=INSTALL_REQUIREMENTS,
packages=find_packages(exclude=['project', 'project.*']),
include_package_data=True,
zip_safe=False,
test_suite='runtests.main',
)
|
{
"content_hash": "edb4798f29ea3a6334c9d6baca90d247",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 88,
"avg_line_length": 31.215686274509803,
"alnum_prop": 0.6381909547738693,
"repo_name": "bittner/django-cms",
"id": "aa2a030e2eadcca72f8027d203aed5147ed6d649",
"size": "1592",
"binary": false,
"copies": "4",
"ref": "refs/heads/develop",
"path": "setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "143487"
},
{
"name": "HTML",
"bytes": "188292"
},
{
"name": "JavaScript",
"bytes": "1283742"
},
{
"name": "Python",
"bytes": "2205532"
},
{
"name": "Shell",
"bytes": "447"
}
],
"symlink_target": ""
}
|
import math
from itertools import chain
from rdkit import Chem
from ._base import Descriptor
__all__ = ("PathCount",)
class PathCountBase(Descriptor):
__slots__ = ()
explicit_hydrogens = False
class PathCountCache(PathCountBase):
__slots__ = ("_order", "_bonds")
def parameters(self):
return (self._order,)
def __init__(self, order):
self._order = order
def _gen_bonds(self):
self._bonds = [
(b.GetBeginAtomIdx(), b.GetEndAtomIdx()) for b in self.mol.GetBonds()
]
def _bond_ids_to_atom_ids(self, p):
it = iter(p)
try:
a0f, a0t = self._bonds[next(it)]
except StopIteration:
return []
try:
a1f, a1t = self._bonds[next(it)]
except StopIteration:
return a0f, a0t
if a0f in [a1f, a1t]:
path = [a0t, a0f]
current = a1f if a0f == a1t else a1t
else:
path = [a0f, a0t]
current = a1f if a0t == a1t else a1t
for i in it:
anf, ant = self._bonds[i]
path.append(current)
if anf == current:
current = ant
else:
current = anf
path.append(current)
return path
def calculate(self):
L = 0
pi = 0
self._gen_bonds()
for path in Chem.FindAllPathsOfLengthN(self.mol, self._order):
aids = set()
before = None
w = 1
for i in self._bond_ids_to_atom_ids(path):
if i in aids:
break
aids.add(i)
if before is not None:
bond = self.mol.GetBondBetweenAtoms(before, i)
w *= bond.GetBondTypeAsDouble()
before = i
else:
L += 1
pi += w
return L, pi
class PathCount(PathCountBase):
r"""path count descriptor.
:type order: int
:param order: path order(number of bonds in path)
:type pi: bool
:param pi: calculate pi-path count
:type total: bool
:param total: total path count(1 to order)
:type log: bool
:param log: use log scale
"""
since = "1.0.0"
__slots__ = ("_order", "_pi", "_total", "_log")
def description(self):
return "{}-ordered {}{}path count{}".format(
self._order,
"total " if self._total else "",
"pi-" if self._pi else "",
" (log scale)" if self._log else "",
)
@classmethod
def preset(cls, version):
return chain(
(cls(o, False, False, False) for o in range(2, 11)),
[cls(10, False, True, False)],
(cls(o, True, False, True) for o in range(1, 11)),
[cls(10, True, True, True)],
)
def __str__(self):
base = "T" if self._total else ""
pi = "piPC" if self._pi else "MPC"
return "{}{}{}".format(base, pi, self._order)
def parameters(self):
return self._order, self._pi, self._total, self._log
def __init__(self, order=1, pi=False, total=False, log=False):
assert order >= 0
self._order = order
self._pi = pi
self._total = total
self._log = log
def dependencies(self):
deps = {"PC": PathCountCache(self._order)}
if self._total and self._order > 0:
deps["acc"] = self.__class__(self._order - 1, self._pi, self._total)
return deps
def calculate(self, PC, acc=None):
if self._order == 0:
return self.rtype(self.mol.GetNumAtoms())
v = PC[1] if self._pi else PC[0]
if acc is not None:
v = acc + v
if self._log:
v = math.log(v + 1)
return v
@property
def rtype(self):
r"""Return type.
* pi = True: :py:class:`float`
* pi = False: :py:class:`int`
"""
return float if self._pi else int
|
{
"content_hash": "908741ab429b7b01d826d6a67882243e",
"timestamp": "",
"source": "github",
"line_count": 174,
"max_line_length": 81,
"avg_line_length": 23.160919540229884,
"alnum_prop": 0.4913151364764268,
"repo_name": "mordred-descriptor/mordred",
"id": "4298e6ef46c8f7a382cfed42e700912fc418ce4b",
"size": "4030",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "mordred/PathCount.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "252306"
},
{
"name": "Shell",
"bytes": "4077"
}
],
"symlink_target": ""
}
|
import cudamat_conv.cudamat_conv
reload (cudamat_conv.cudamat_conv)
import gnumpy as g
imSizeX = 10
numImages = 2
filterSizeX = 3
numChannels = 3
numGroups = 1
assert numChannels % numGroups == 0
numFilterColors = numChannels / numGroups
numFilters = 16 * numGroups
moduleStride = 1
numModulesX = (imSizeX - filterSizeX + 1)
numModules = numModulesX**2
### TODO: ask Alex about moduleStride and numGroups.
### But ignoring these I'm good to go.
paddingStart = 0 ## try it without padding for now.
numImgColors = numChannels
# create the images
images = g.randn((numChannels, imSizeX, imSizeX, numImages))+1
images[:, 3, 3, :] = 2
from cudamat_conv import MaxPool, AvgPool
from cudamat_conv.cudamat_conv_py import MaxPool as MaxPool_py, AvgPool as AvgPool_py
t1 = MaxPool(images,
subsX = 3,
startX = 0,
strideX = 2,
outputsX = imSizeX/2,
)
t2 = MaxPool_py(images,
subsX = 3,
startX = 0,
strideX = 2,
outputsX = imSizeX/2
)
print 'max pooling:'
print abs(t1).mean()
print abs(t2).mean()
print abs(t1-t2).mean()
t1 = AvgPool(images,
subsX = 3,
startX = 0,
strideX = 2,
outputsX = imSizeX/1,
)
t2 = AvgPool_py(images,
subsX = 3,
startX = 0,
strideX = 2,
outputsX = imSizeX/1
)
print 'avg pooling:'
print abs(t1).mean()
print abs(t2).mean()
print abs(t1-t2).mean()
|
{
"content_hash": "518759fbf5e8f5224e420f493853c00b",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 85,
"avg_line_length": 21.08108108108108,
"alnum_prop": 0.5801282051282052,
"repo_name": "hqxu/deepnet",
"id": "41eccffb0d215a0c773d6d5d4e982cca7859bbaf",
"size": "1560",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "cudamat_conv/tests/basic5.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
from __future__ import absolute_import
import uuid
from datetime import datetime
from sqlalchemy import Column, DateTime, String, Integer
from sqlalchemy.schema import Index, UniqueConstraint
from changes.config import db
from changes.constants import Result, Status
from changes.db.types.enum import Enum
from changes.db.types.guid import GUID
from changes.db.types.json import JSONEncodedDict
from changes.db.utils import model_repr
class Task(db.Model):
__tablename__ = 'task'
__table_args__ = (
Index('idx_task_parent_id', 'parent_id', 'task_name'),
Index('idx_task_child_id', 'child_id', 'task_name'),
UniqueConstraint('task_name', 'parent_id', 'child_id', name='unq_task_entity'),
)
id = Column(GUID, primary_key=True, default=uuid.uuid4)
task_name = Column(String(128), nullable=False)
task_id = Column('child_id', GUID, nullable=False)
parent_id = Column(GUID)
status = Column(Enum(Status), nullable=False, default=Status.unknown)
result = Column(Enum(Result), nullable=False, default=Result.unknown)
num_retries = Column(Integer, nullable=False, default=0)
date_started = Column(DateTime)
date_finished = Column(DateTime)
date_created = Column(DateTime, default=datetime.utcnow)
date_modified = Column(DateTime, default=datetime.utcnow)
data = Column(JSONEncodedDict)
__repr__ = model_repr('task_name', 'parent_id', 'child_id', 'status')
def __init__(self, **kwargs):
super(Task, self).__init__(**kwargs)
if self.id is None:
self.id = uuid.uuid4()
if self.result is None:
self.result = Result.unknown
if self.date_created is None:
self.date_created = datetime.utcnow()
if self.date_modified is None:
self.date_modified = self.date_created
@classmethod
def check(cls, task_name, parent_id):
"""
>>> if Task.check('my_task', parent_item.id) == Status.finished:
>>> print "all child tasks done!"
"""
# XXX(dcramer): we could make this fast if we're concerneda bout # of
# rows by doing two network hops (first check for in progress, then
# report result)
child_tasks = list(db.session.query(
cls.result, Task.status
).filter(
cls.task_name == task_name,
cls.parent_id == parent_id,
))
if any(r.status != Status.finished for r in child_tasks):
return Status.in_progress
return Status.finished
|
{
"content_hash": "ec07fd2afd1c4b20ab07624f34f4d5e6",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 87,
"avg_line_length": 37.39705882352941,
"alnum_prop": 0.642548171451042,
"repo_name": "alex/changes",
"id": "d52f8a2f308896897e0e6b1b1b45c799d964a4f4",
"size": "2543",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "changes/models/task.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
}
|
class OSNetworksV2(object):
def on_get(self, req, resp, tenant_id):
networks = []
client = req.env['sl_client']
sl_networks = client['Account'].getSubnets(
mask='id, modifyDate, gateway, networkVlanId, broadcastAddress, '
'netmask, networkIdentifier, cidr, reverseDomain, note')
networks = [format_network(network) for network in sl_networks]
resp.body = {'networks': networks}
class OSNetworkV2(object):
def on_get(self, req, resp, tenant_id, network_id):
client = req.env['sl_client']
sl_network = client['Network_Subnet'].getObject(
id=network_id,
mask='id, modifyDate, gateway, networkVlanId, broadcastAddress, '
'netmask, networkIdentifier, cidr, reverseDomain, note')
network = format_network(sl_network)
resp.body = {'network': network}
def format_network(sl_network):
return {
'label': sl_network.get('note'),
'updated_at': sl_network['modifyDate'],
'id': sl_network['id'],
'gateway': sl_network.get('gateway'),
'deleted': False,
'vlan': sl_network['networkVlanId'],
'broadcast': sl_network.get('broadcastAddress'),
'netmask': sl_network['netmask'],
'cidr': '%s/%s' % (sl_network['networkIdentifier'],
sl_network['cidr']),
'dns1': sl_network.get('reverseDomain'),
}
|
{
"content_hash": "814a2d99d7ebf3fde366f519a321c771",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 77,
"avg_line_length": 39.80555555555556,
"alnum_prop": 0.5910676901605024,
"repo_name": "BillArnold/barnoldjg",
"id": "517f76683782082302b6c0f323daea3b53a8b423",
"size": "1435",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "jumpgate/compute/drivers/sl/networks.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "162307"
},
{
"name": "Shell",
"bytes": "148"
}
],
"symlink_target": ""
}
|
import copy
import logging
from curwmysqladapter import Data
from cms_utils.UtilValidation import validate_timeseries
from cms_utils.UtilInterpolation import interpolate_timeseries
from cms_utils.InterpolationStrategy import InterpolationStrategy
# from ..config import Constants as con
def get_interpolated_timeseries(timeseries, variable):
if variable == 'Precipitation':
return interpolate_timeseries(InterpolationStrategy.Summation, timeseries)
elif variable == 'Temperature':
return interpolate_timeseries(InterpolationStrategy.Average, timeseries)
else:
logging.error('Unable to handle variable type: %s', variable)
return []
def create_processed_timeseries(adapter, stations, duration, opts):
print("""
*********************************************************
* Create Processed Data *
*********************************************************
""")
start_date_time = duration.get('start_date_time', None)
end_date_time = duration.get('end_date_time', None)
force_insert = opts.get('force_insert', False)
metaData = {
'station': 'Hanwella',
'variable': 'Precipitation',
'unit': 'mm',
'type': 'Observed',
'source': 'WeatherStation',
'name': 'WUnderground',
}
for station in stations:
print('\n**************** STATION **************')
print('station:', station['name'])
# Check whether station exists
is_station_exists = adapter.get_station({'name': station['name']})
if is_station_exists is None:
logging.warning('Station %s does not exists. Continue with others', station['name'])
continue
meta = copy.deepcopy(metaData)
meta['station'] = station['name']
variables = station['variables']
units = station['units']
max_values = station['max_values']
min_values = station['min_values']
if 'run_name' in station:
meta['name'] = station['run_name']
for i in range(0, len(variables)):
meta['variable'] = variables[i]
meta['unit'] = units[i]
# Get Existing Raw Data
eventId = adapter.get_event_id(meta)
opts = {
'from': start_date_time.strftime("%Y-%m-%d %H:%M:%S"),
'to': end_date_time.strftime("%Y-%m-%d %H:%M:%S")
}
rawTimeseries = adapter.retrieve_timeseries(meta, opts)
if len(rawTimeseries) and len(rawTimeseries[0]['timeseries']) > 0:
rawTimeseries = rawTimeseries[0]['timeseries']
else:
print('INFO: Timeseries does not have any data on :', end_date_time.strftime("%Y-%m-%d"), rawTimeseries)
continue
validationObj = {
'max_value': max_values[i],
'min_value': min_values[i],
}
validated_timeseries = validate_timeseries(rawTimeseries, validationObj)
filled_timeseries = get_interpolated_timeseries(validated_timeseries, variables[i])
# Check whether processed timeseries exists
new_opts = {
'from': start_date_time.strftime("%Y-%m-%d %H:%M:%S"),
'to': end_date_time.strftime("%Y-%m-%d %H:%M:%S"),
'mode': Data.processed_data,
}
existingTimeseries = adapter.retrieve_timeseries(meta, new_opts)
if len(existingTimeseries) and len(existingTimeseries[0]['timeseries']) > 0 and not force_insert:
print('\n')
continue
print('Interpolated Timeseries::')
for l in filled_timeseries[:2] + filled_timeseries[-2:]:
print(l)
rowCount = \
adapter.insert_timeseries(eventId, filled_timeseries, upsert=force_insert, mode=Data.processed_data)
print('%s rows inserted.\n' % rowCount)
|
{
"content_hash": "f95983d64417f106f6653efb1a84aff6",
"timestamp": "",
"source": "github",
"line_count": 98,
"max_line_length": 120,
"avg_line_length": 40.673469387755105,
"alnum_prop": 0.5637230306071249,
"repo_name": "gihankarunarathne/cfcwm-cms",
"id": "6bbf1e568c79c9ab3aee3d54f3418e0f2e760c84",
"size": "3986",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "observation/obs_processed_data/ObsProcessedData.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "496"
},
{
"name": "Python",
"bytes": "136577"
}
],
"symlink_target": ""
}
|
import unittest
from pyparsing import ParseException
from tests.utils.grammar import get_record_grammar
"""
CWR acknowledgement grammar tests.
The following cases are tested:
"""
__author__ = 'Bernardo Martínez Garrido'
__license__ = 'MIT'
__status__ = 'Development'
class TestAcknowledgementGrammar(unittest.TestCase):
def setUp(self):
self.grammar = get_record_grammar('acknowledgement')
def test_valid_full(self):
"""
Tests that the Acknowledgement grammar parses correctly formatted strings.
This test contains all the optional fields.
"""
record = 'ACK0000123400000023201201021020300123401234567AGRTHE CREATION TITLE ABCD1234512345123456ABCD123451234512345720130203AS'
result = self.grammar.parseString(record)[0]
self.assertEqual('ACK', result.record_type)
self.assertEqual(1234, result.transaction_sequence_n)
self.assertEqual(23, result.record_sequence_n)
self.assertEqual(2012, result.creation_date_time.year)
self.assertEqual(1, result.creation_date_time.month)
self.assertEqual(2, result.creation_date_time.day)
self.assertEqual(10, result.creation_date_time.hour)
self.assertEqual(20, result.creation_date_time.minute)
self.assertEqual(30, result.creation_date_time.second)
self.assertEqual(1234, result.original_group_id)
self.assertEqual(1234567, result.original_transaction_sequence_n)
self.assertEqual('AGR', result.original_transaction_type)
self.assertEqual('THE CREATION TITLE', result.creation_title)
self.assertEqual('ABCD1234512345123456', result.submitter_creation_n)
self.assertEqual('ABCD1234512345123457', result.recipient_creation_n)
self.assertEqual(2013, result.processing_date.year)
self.assertEqual(2, result.processing_date.month)
self.assertEqual(3, result.processing_date.day)
self.assertEqual('AS', result.transaction_status)
def test_valid_min(self):
"""
Tests that the Acknowledgement grammar parses correctly formatted strings.
This test contains none of the optional fields.
"""
record = 'ACK0000123400000023201201021020300123401234567AGR 20130203AS'
result = self.grammar.parseString(record)[0]
self.assertEqual('ACK', result.record_type)
self.assertEqual(1234, result.transaction_sequence_n)
self.assertEqual(23, result.record_sequence_n)
self.assertEqual(2012, result.creation_date_time.year)
self.assertEqual(1, result.creation_date_time.month)
self.assertEqual(2, result.creation_date_time.day)
self.assertEqual(10, result.creation_date_time.hour)
self.assertEqual(20, result.creation_date_time.minute)
self.assertEqual(30, result.creation_date_time.second)
self.assertEqual(1234, result.original_group_id)
self.assertEqual(1234567, result.original_transaction_sequence_n)
self.assertEqual('AGR', result.original_transaction_type)
self.assertEqual(None, result.creation_title)
self.assertEqual(None, result.submitter_creation_n)
self.assertEqual(None, result.recipient_creation_n)
self.assertEqual(2013, result.processing_date.year)
self.assertEqual(2, result.processing_date.month)
self.assertEqual(3, result.processing_date.day)
self.assertEqual('AS', result.transaction_status)
class TestAcknowledgementGrammarException(unittest.TestCase):
def setUp(self):
self.grammar = get_record_grammar('acknowledgement')
def test_empty(self):
"""
Tests that a exception is thrown when the Original Transaction Type is NWR and not Creation Title is set.
"""
record = ''
self.assertRaises(ParseException, self.grammar.parseString, record)
def test_invalid(self):
record = 'This is an invalid string'
self.assertRaises(ParseException, self.grammar.parseString, record)
|
{
"content_hash": "e9dde9e81445fbef0ae458b5a3bf4381",
"timestamp": "",
"source": "github",
"line_count": 98,
"max_line_length": 178,
"avg_line_length": 42.43877551020408,
"alnum_prop": 0.6838182255349844,
"repo_name": "weso/CWR-DataApi",
"id": "8f78aa1500fc69dc65478eb189ee4a15fa5435fa",
"size": "4184",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/grammar/factory/record/test_acknowledgement.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "3830"
},
{
"name": "Makefile",
"bytes": "2366"
},
{
"name": "Python",
"bytes": "997385"
}
],
"symlink_target": ""
}
|
from clientbase import FaceClientBase
class Person(FaceClientBase):
def __init__(self):
super(Person, self).__init__()
def create(self, person_name, face_id):
return self.api.person.create(person_name=person_name, face_id=face_id)
def delete(self, person_name):
return self.api.person.delete(person_name=person_name)
def add_face(self):
pass
def remove_face(self):
pass
def get_info(self):
pass
def set_info(self):
pass
|
{
"content_hash": "b15650b7597f64eeef16319ac75cb163",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 79,
"avg_line_length": 21.333333333333332,
"alnum_prop": 0.62109375,
"repo_name": "KellyChan/Python",
"id": "12dfe4b6085803bfbf2e76bc02fddc3dfa39cdc7",
"size": "512",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "python/facepp/facepp/faceclient/person.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "1186772"
},
{
"name": "Batchfile",
"bytes": "79181"
},
{
"name": "C",
"bytes": "36468971"
},
{
"name": "C++",
"bytes": "397352"
},
{
"name": "CSS",
"bytes": "9853"
},
{
"name": "Common Lisp",
"bytes": "24481"
},
{
"name": "DIGITAL Command Language",
"bytes": "52804"
},
{
"name": "Groff",
"bytes": "492261"
},
{
"name": "HTML",
"bytes": "414186"
},
{
"name": "Inno Setup",
"bytes": "18796"
},
{
"name": "JavaScript",
"bytes": "20910"
},
{
"name": "Makefile",
"bytes": "208458"
},
{
"name": "Objective-C",
"bytes": "66324"
},
{
"name": "PLSQL",
"bytes": "45772"
},
{
"name": "PostScript",
"bytes": "13803"
},
{
"name": "PowerShell",
"bytes": "1420"
},
{
"name": "Prolog",
"bytes": "1114"
},
{
"name": "Python",
"bytes": "48074049"
},
{
"name": "R",
"bytes": "7906"
},
{
"name": "Shell",
"bytes": "865630"
},
{
"name": "TeX",
"bytes": "646204"
},
{
"name": "VimL",
"bytes": "9546"
},
{
"name": "Visual Basic",
"bytes": "962"
}
],
"symlink_target": ""
}
|
import logging as real_logging
import os
import sys
from telemetry.core import discover
from telemetry.core import local_server
from telemetry.core import memory_cache_http_server
from telemetry.core import network_controller
from telemetry.core import tracing_controller
from telemetry.core import util
from telemetry.internal.platform import (
platform_backend as platform_backend_module)
_host_platform = None
# Remote platform is a dictionary from device ids to remote platform instances.
_remote_platforms = {}
def _InitHostPlatformIfNeeded():
global _host_platform
if _host_platform:
return
backend = None
backends = _IterAllPlatformBackendClasses()
for platform_backend_class in backends:
if platform_backend_class.IsPlatformBackendForHost():
backend = platform_backend_class()
break
if not backend:
raise NotImplementedError()
_host_platform = Platform(backend)
def GetHostPlatform():
_InitHostPlatformIfNeeded()
return _host_platform
def _IterAllPlatformBackendClasses():
platform_dir = os.path.dirname(
os.path.realpath(platform_backend_module.__file__))
return discover.DiscoverClasses(
platform_dir, util.GetTelemetryDir(),
platform_backend_module.PlatformBackend).itervalues()
def GetPlatformForDevice(device, finder_options, logging=real_logging):
""" Returns a platform instance for the device.
Args:
device: a device.Device instance.
"""
if device.guid in _remote_platforms:
return _remote_platforms[device.guid]
try:
for platform_backend_class in _IterAllPlatformBackendClasses():
if platform_backend_class.SupportsDevice(device):
_remote_platforms[device.guid] = (
platform_backend_class.CreatePlatformForDevice(device,
finder_options))
return _remote_platforms[device.guid]
return None
except Exception:
current_exception = sys.exc_info()
logging.error('Fail to create platform instance for %s.', device.name)
raise current_exception[0], current_exception[1], current_exception[2]
class Platform(object):
"""The platform that the target browser is running on.
Provides a limited interface to interact with the platform itself, where
possible. It's important to note that platforms may not provide a specific
API, so check with IsFooBar() for availability.
"""
def __init__(self, platform_backend):
self._platform_backend = platform_backend
self._platform_backend.InitPlatformBackend()
self._platform_backend.SetPlatform(self)
self._network_controller = network_controller.NetworkController(
self._platform_backend.network_controller_backend)
self._tracing_controller = tracing_controller.TracingController(
self._platform_backend.tracing_controller_backend)
self._local_server_controller = local_server.LocalServerController(
self._platform_backend)
self._is_monitoring_power = False
@property
def is_host_platform(self):
return self == GetHostPlatform()
@property
def network_controller(self):
"""Control network settings and servers to simulate the Web."""
return self._network_controller
@property
def tracing_controller(self):
return self._tracing_controller
def CanMonitorThermalThrottling(self):
"""Platforms may be able to detect thermal throttling.
Some fan-less computers go into a reduced performance mode when their heat
exceeds a certain threshold. Performance tests in particular should use this
API to detect if this has happened and interpret results accordingly.
"""
return self._platform_backend.CanMonitorThermalThrottling()
def IsThermallyThrottled(self):
"""Returns True if the device is currently thermally throttled."""
return self._platform_backend.IsThermallyThrottled()
def HasBeenThermallyThrottled(self):
"""Returns True if the device has been thermally throttled."""
return self._platform_backend.HasBeenThermallyThrottled()
def GetDeviceTypeName(self):
"""Returns a string description of the Platform device, or None.
Examples: Nexus 7, Nexus 6, Desktop"""
return self._platform_backend.GetDeviceTypeName()
def GetArchName(self):
"""Returns a string description of the Platform architecture.
Examples: x86_64 (posix), AMD64 (win), armeabi-v7a, x86"""
return self._platform_backend.GetArchName()
def GetOSName(self):
"""Returns a string description of the Platform OS.
Examples: WIN, MAC, LINUX, CHROMEOS"""
return self._platform_backend.GetOSName()
def GetOSVersionName(self):
"""Returns a logically sortable, string-like description of the Platform OS
version.
Examples: VISTA, WIN7, LION, MOUNTAINLION"""
return self._platform_backend.GetOSVersionName()
def GetOSVersionNumber(self):
"""Returns an integer description of the Platform OS major version.
Examples: On Mac, 13 for Mavericks, 14 for Yosemite."""
return self._platform_backend.GetOSVersionNumber()
def CanFlushIndividualFilesFromSystemCache(self):
"""Returns true if the disk cache can be flushed for specific files."""
return self._platform_backend.CanFlushIndividualFilesFromSystemCache()
def FlushEntireSystemCache(self):
"""Flushes the OS's file cache completely.
This function may require root or administrator access."""
return self._platform_backend.FlushEntireSystemCache()
def FlushSystemCacheForDirectory(self, directory):
"""Flushes the OS's file cache for the specified directory.
This function does not require root or administrator access."""
return self._platform_backend.FlushSystemCacheForDirectory(directory)
def FlushDnsCache(self):
"""Flushes the OS's DNS cache completely.
This function may require root or administrator access."""
return self._platform_backend.FlushDnsCache()
def LaunchApplication(self, application, parameters=None,
elevate_privilege=False):
""""Launches the given |application| with a list of |parameters| on the OS.
Set |elevate_privilege| to launch the application with root or admin rights.
Returns:
A popen style process handle for host platforms.
"""
return self._platform_backend.LaunchApplication(
application, parameters, elevate_privilege=elevate_privilege)
def IsApplicationRunning(self, application):
"""Returns whether an application is currently running."""
return self._platform_backend.IsApplicationRunning(application)
def CanLaunchApplication(self, application):
"""Returns whether the platform can launch the given application."""
return self._platform_backend.CanLaunchApplication(application)
def InstallApplication(self, application):
"""Installs the given application."""
return self._platform_backend.InstallApplication(application)
def CanCaptureVideo(self):
"""Returns a bool indicating whether the platform supports video capture."""
return self._platform_backend.CanCaptureVideo()
def StartVideoCapture(self, min_bitrate_mbps):
"""Starts capturing video.
Outer framing may be included (from the OS, browser window, and webcam).
Args:
min_bitrate_mbps: The minimum capture bitrate in MegaBits Per Second.
The platform is free to deliver a higher bitrate if it can do so
without increasing overhead.
Raises:
ValueError if the required |min_bitrate_mbps| can't be achieved.
"""
return self._platform_backend.StartVideoCapture(min_bitrate_mbps)
def StopVideoCapture(self):
"""Stops capturing video.
Returns:
A telemetry.core.video.Video object.
"""
return self._platform_backend.StopVideoCapture()
def CanMonitorPower(self):
"""Returns True iff power can be monitored asynchronously via
StartMonitoringPower() and StopMonitoringPower().
"""
return self._platform_backend.CanMonitorPower()
def CanMeasurePerApplicationPower(self):
"""Returns True if the power monitor can measure power for the target
application in isolation. False if power measurement is for full system
energy consumption."""
return self._platform_backend.CanMeasurePerApplicationPower()
def StartMonitoringPower(self, browser):
"""Starts monitoring power utilization statistics.
Args:
browser: The browser to monitor.
"""
assert self._platform_backend.CanMonitorPower()
self._platform_backend.StartMonitoringPower(browser)
self._is_monitoring_power = True
def StopMonitoringPower(self):
"""Stops monitoring power utilization and returns stats
Returns:
None if power measurement failed for some reason, otherwise a dict of
power utilization statistics containing: {
# An identifier for the data provider. Allows to evaluate the precision
# of the data. Example values: monsoon, powermetrics, ds2784
'identifier': identifier,
# The instantaneous power (voltage * current) reading in milliwatts at
# each sample.
'power_samples_mw': [mw0, mw1, ..., mwN],
# The full system energy consumption during the sampling period in
# milliwatt hours. May be estimated by integrating power samples or may
# be exact on supported hardware.
'energy_consumption_mwh': mwh,
# The target application's energy consumption during the sampling period
# in milliwatt hours. Should be returned iff
# CanMeasurePerApplicationPower() return true.
'application_energy_consumption_mwh': mwh,
# A platform-specific dictionary of additional details about the
# utilization of individual hardware components.
component_utilization: {
...
}
# Platform-specific data not attributed to any particular hardware
# component.
platform_info: {
# Device-specific onboard temperature sensor.
'average_temperature_c': c,
...
}
}
"""
ret_val = self._platform_backend.StopMonitoringPower()
self._is_monitoring_power = False
return ret_val
def IsMonitoringPower(self):
"""Returns true if power is currently being monitored, false otherwise."""
# TODO(rnephew): Remove when crbug.com/553601 is solved.
real_logging.info('IsMonitoringPower: %s', self._is_monitoring_power)
return self._is_monitoring_power
def CanMonitorNetworkData(self):
"""Returns true if network data can be retrieved, false otherwise."""
return self._platform_backend.CanMonitorNetworkData()
def GetNetworkData(self, browser):
"""Get current network data.
Returns:
Tuple of (sent_data, received_data) in kb if data can be found,
None otherwise.
"""
assert browser.platform == self
return self._platform_backend.GetNetworkData(browser)
def IsCooperativeShutdownSupported(self):
"""Indicates whether CooperativelyShutdown, below, is supported.
It is not necessary to implement it on all platforms."""
return self._platform_backend.IsCooperativeShutdownSupported()
def CooperativelyShutdown(self, proc, app_name):
"""Cooperatively shut down the given process from subprocess.Popen.
Currently this is only implemented on Windows. See
crbug.com/424024 for background on why it was added.
Args:
proc: a process object returned from subprocess.Popen.
app_name: on Windows, is the prefix of the application's window
class name that should be searched for. This helps ensure
that only the application's windows are closed.
Returns True if it is believed the attempt succeeded.
"""
return self._platform_backend.CooperativelyShutdown(proc, app_name)
def CanTakeScreenshot(self):
return self._platform_backend.CanTakeScreenshot()
# TODO(nednguyen): Implement this on Mac, Linux & Win. (crbug.com/369490)
def TakeScreenshot(self, file_path):
""" Takes a screenshot of the platform and save to |file_path|.
Note that this method may not be supported on all platform, so check with
CanTakeScreenshot before calling this.
Args:
file_path: Where to save the screenshot to. If the platform is remote,
|file_path| is the path on the host platform.
Returns True if it is believed the attempt succeeded.
"""
return self._platform_backend.TakeScreenshot(file_path)
def StartLocalServer(self, server):
"""Starts a LocalServer and associates it with this platform.
|server.Close()| should be called manually to close the started server.
"""
self._local_server_controller.StartServer(server)
@property
def http_server(self):
return self._local_server_controller.GetRunningServer(
memory_cache_http_server.MemoryCacheHTTPServer, None)
def SetHTTPServerDirectories(self, paths):
"""Returns True if the HTTP server was started, False otherwise."""
if isinstance(paths, basestring):
paths = set([paths])
paths = set(os.path.realpath(p) for p in paths)
# If any path is in a subdirectory of another, remove the subdirectory.
duplicates = set()
for parent_path in paths:
for sub_path in paths:
if parent_path == sub_path:
continue
if os.path.commonprefix((parent_path, sub_path)) == parent_path:
duplicates.add(sub_path)
paths -= duplicates
if self.http_server:
if paths and self.http_server.paths == paths:
return False
self.http_server.Close()
if not paths:
return False
server = memory_cache_http_server.MemoryCacheHTTPServer(paths)
self.StartLocalServer(server)
return True
def StopAllLocalServers(self):
self._local_server_controller.Close()
@property
def local_servers(self):
"""Returns the currently running local servers."""
return self._local_server_controller.local_servers
|
{
"content_hash": "dca99307dac9ba9322974d0cfffde34d",
"timestamp": "",
"source": "github",
"line_count": 393,
"max_line_length": 80,
"avg_line_length": 35.51908396946565,
"alnum_prop": 0.7167418869546529,
"repo_name": "Bysmyyr/chromium-crosswalk",
"id": "3199369416dff1922575f0049e4a59fc1bde67f6",
"size": "14121",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tools/telemetry/telemetry/core/platform.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy.sql import text
from flask_ask import Ask, statement, dialog, elicit, delegate, question
from configobj import ConfigObj
from geopy.geocoders import GoogleV3
from pyproj import Proj, transform
from datetime import date
import logging
import pprint
config = ConfigObj("../.env")
app = Flask(__name__)
log = logging.getLogger("flask_ask")
log.setLevel(logging.DEBUG)
log.info("Lets go!")
app.config['ASK_VERIFY_REQUESTS'] = config['verify_requests']
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config['SQLALCHEMY_DATABASE_URI'] = "mysql+mysqldb://%(database_user)s:%(database_password)s@%(database_host)s/%(database_name)s" % config
db = SQLAlchemy(app)
ask = Ask(app, '/')
@ask.intent('AMAZON.StopIntent')
def stop():
return statement('Ok')
@ask.intent('AMAZON.CancelIntent')
def cancel():
return statement('Ok')
@ask.intent('AMAZON.HelpIntent')
def help():
return question('Ottawa Garbage can tell you the next time garbage or recycling will be picked up by asking When is the next pickup date? Or you can give a specific address like, When is the next pickup date for 190 Main Street? You can change your address by saying Change my address to 190 Main Street. You can ask for your current address by asking What is my address. What would you like to do now?')
@ask.intent('NextPickupForAddress')
def next_pickup_for_address(address):
connection = db.engine.connect()
if address != None:
if ask.request.dialogState == 'COMPLETED':
new_address = full_address(address)
location = location_from_address(new_address)
if (location == None):
return question("I don't know the address " + address + ". Try a different address near by.")
else:
x, y = position_from_location(location)
return statement("For " + new_address + ", " + pickup_statement_for(x, y))
else:
return dialog(delegate())
return dialog(elicit('address', 'For what address?'))
@ask.intent('ChangeAddress')
def change_address(address):
connection = db.engine.connect()
user_id = ask.context.System.user.userId
if address != None:
if ask.request.dialogState == 'COMPLETED':
new_address = set_address_for_user(address, user_id)
if (new_address == None):
return question("I don't know the address " + address + ". Try a different address near by.")
return statement("Your new address is " + new_address)
else:
return dialog(delegate())
return dialog(elicit('address', 'What is your new address?'))
def set_address_for_user(address, user_id):
connection = db.engine.connect()
new_address = full_address(address)
location = location_from_address(new_address)
if (location == None):
return None
x, y = position_from_location(location)
latlong = str(location.latitude) + ',' + str(location.longitude)
connection.execute(text("delete from addresses where user_id=:user_id").bindparams(user_id=user_id))
connection.execute(text("insert into addresses (user_id, address, latlong, position) values (:user_id, :address, :latlong, POINT(:x, :y))"), user_id=user_id, address=new_address, latlong=latlong, x=x, y=y)
return new_address
def get_xy_for_user(user_id):
connection = db.engine.connect()
result = connection.execute(text("select id, st_x(position) x, st_y(position) y from addresses where user_id = :user_id").bindparams(user_id=user_id)).first()
return result['x'], result['y']
@ask.intent('CurrentAddress')
def current_address():
connection = db.engine.connect()
user_id = ask.context.System.user.userId
result = connection.execute(text("select address from addresses where user_id = :user_id").bindparams(user_id=user_id)).first()
if result == None:
return statement('I have no address on file for you at the moment')
return statement('Your address is ' + result['address'])
@ask.launch
def start_skill():
return question('Do you want to know the next pickup date?')
@ask.intent('No')
def no_intent():
return statement('Have a nice day then eh!')
@ask.intent('NextPickup')
def next_pickup(address):
connection = db.engine.connect()
user_id = ask.context.System.user.userId
if address != None:
if ask.request.dialogState == 'COMPLETED':
new_address = set_address_for_user(address, user_id)
if (new_address == None):
return question("I don't know the address " + address + ". Try a different address near by.")
x,y = get_xy_for_user(user_id)
return statement("For " + new_address + ", " + pickup_statement_for(x, y))
else:
return dialog(delegate())
result = connection.execute(text("select id, st_x(position) x, st_y(position) y from addresses where user_id = :user_id").bindparams(user_id=user_id))
if result.rowcount == 0:
return dialog(elicit('address', 'What is your address'))
else:
res = result.first()
return statement(pickup_statement_for(res['x'], res['y']))
def pickup_statement_for(x, y):
connection = db.engine.connect()
weekdays = {0: 'Monday', 1: 'Tuesday', 2: 'Wednesday', 3: 'Thursday', 4: 'Friday', 5: 'Saturday', 6: 'Sunday'}
pickup_res = connection.execute(text("select * from routes where st_contains(area, point(:x, :y))").bindparams(x=x, y=y)).first()
pickup_day_res = connection.execute(text("SELECT date_add(DATE_ADD(CURDATE(), INTERVAL - WEEKDAY(CURDATE()) DAY), interval :days day) dt").bindparams(days=pickup_res['pickup_day'])).first()
offset = days_to_offset(pickup_day_res['dt'])
pickup_day = pickup_res['pickup_day'] + offset
pickup_with_offset_res = connection.execute(text("select date_add(:date, interval :days day)").bindparams(date=pickup_day_res['dt'], days=offset))
current_weekday_res = connection.execute(text("select weekday(now()) wd")).first()
current_weekday = current_weekday_res['wd']
if (pickup_day < current_weekday - 1):
pickup_str = 'was picked up ' + weekdays[pickup_day] + '.'
elif (pickup_day == current_weekday - 1):
pickup_str = 'was picked up yesterday.'
elif (pickup_day == current_weekday):
pickup_str = 'was picked up today.'
elif (pickup_day == current_weekday + 1):
pickup_str = 'will be picked up tomorrow.'
else:
pickup_str = 'will be picked up on ' + weekdays[pickup_day] + '.'
next_pickup_day_res = connection.execute(text("SELECT date_add(date_add(DATE_ADD(CURDATE(), INTERVAL - WEEKDAY(CURDATE()) DAY), interval 7 day), interval :days day) dt").bindparams(days=pickup_res['pickup_day'])).first()
log.debug("next pickup date: " + str(next_pickup_day_res['dt']))
next_offset = days_to_offset(next_pickup_day_res['dt'])
next_pickup_day = pickup_res['pickup_day'] + next_offset
next_pickup_str = 'is next ' + weekdays[next_pickup_day] + '.'
week_number_res = connection.execute(text("select weekofyear(now()) wk")).first()
week_number = week_number_res['wk']
pickup_type, next_pickup_type = pickup_type_str(pickup_res['schedule'][0], week_number)
return pickup_type + " " + pickup_str + " " + next_pickup_type + " " + next_pickup_str
def pickup_type_str(schedule, week_number):
pickup_order = ['Recycling', 'Garbage']
if (schedule == 'A'):
pickup_order = list(reversed(pickup_order))
if (week_number % 2 == 0):
pickup_order = list(reversed(pickup_order))
return pickup_order[0], pickup_order[1]
def days_to_offset(date):
connection = db.engine.connect()
result = connection.execute(text("select count(*) cnt from holidays where year=year(:date) and week_number = weekofyear(:date) and weekday(:date) > weekday").bindparams(date=date)).first()
return result['cnt']
def full_address(address):
cities = ["dalmeny","pana","antrim","corkery","dwyer hill","burritts rapids","ashton","galetta","dunrobin","kinburn","kenmore","fallowfield","edwards","sarsfield","vernon","kars","fitzroy harbour","fitzroy","marionville","vars","munster","carp","navan","north gower","cumberland","metcalfe","constance bay","osgoode","richmond","greely","manotick","orleans","barrhaven","stittsville","bells corners","blackburn hamlet","hunt club","morgan's grant","riverside south","riverview","goulbourn","osgoode","rideau","west carleton","rockcliffe park","cumberland","gloucester","kanata","nepean","vanier","gatineau","huntley","torbolton","ottawa"]
old_address = address.lower()
if any(old_address.endswith(x) for x in cities):
new_address = old_address + ", Ontario, Canada"
else:
new_address = old_address + ", Ottawa, Ontario, Canada"
return new_address
def location_from_address(address):
geolocator = GoogleV3(api_key=config['google_maps_api_key'])
location = geolocator.geocode(address)
return location
def position_from_location(location):
outProj = Proj(init='epsg:2951')
inProj = Proj(init='epsg:4326')
x,y = transform(inProj,outProj,location.longitude,location.latitude)
return x,y
if __name__ == '__main__':
app.run()
|
{
"content_hash": "bfaad2ce0ac479ee1afd0d01f03353a8",
"timestamp": "",
"source": "github",
"line_count": 196,
"max_line_length": 642,
"avg_line_length": 47.295918367346935,
"alnum_prop": 0.6687162891046387,
"repo_name": "stevemulligan/ottawagarbage",
"id": "617e5745ef39ed61665ebd186f662878200e48db",
"size": "9270",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/garbage.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "2673"
},
{
"name": "Python",
"bytes": "11125"
}
],
"symlink_target": ""
}
|
import json
from django.conf import settings
from django.core.management import BaseCommand
from django.utils import timezone
from django.utils.dateparse import parse_datetime
from django.utils.translation import ugettext_lazy
from odk_logger.models import Instance
class Command(BaseCommand):
help = ugettext_lazy("Fixes deleted instances by syncing "
"deleted items from mongo.")
def handle(self, *args, **kwargs):
# Reset all sql deletes to None
Instance.objects.exclude(
deleted_at=None, xform__form_active=True).update(deleted_at=None)
# Get all mongo deletes
query = '{"$and": [{"_deleted_at": {"$exists": true}}, ' \
'{"_deleted_at": {"$ne": null}}]}'
query = json.loads(query)
xform_instances = settings.MONGO_DB.instances
cursor = xform_instances.find(query)
for record in cursor:
# update sql instance with deleted_at datetime from mongo
try:
i = Instance.objects.get(
uuid=record["_uuid"], xform__form_active=True)
except Instance.DoesNotExist:
continue
else:
deleted_at = parse_datetime(record["_deleted_at"])
if not timezone.is_aware(deleted_at):
deleted_at = timezone.make_aware(
deleted_at, timezone.utc)
i.set_deleted(deleted_at)
|
{
"content_hash": "60244608efbd44d3d29d918a956c2ede",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 77,
"avg_line_length": 39.67567567567568,
"alnum_prop": 0.5946866485013624,
"repo_name": "eHealthAfrica/formhub",
"id": "0ec19cd104399411da30715858eba77a8a12630e",
"size": "1538",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "odk_logger/management/commands/sync_deleted_instances_fix.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "60276"
},
{
"name": "HTML",
"bytes": "251331"
},
{
"name": "JavaScript",
"bytes": "722151"
},
{
"name": "Makefile",
"bytes": "2286"
},
{
"name": "Nginx",
"bytes": "793"
},
{
"name": "Python",
"bytes": "1651739"
},
{
"name": "Shell",
"bytes": "12227"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import keops.models.fields
class Migration(migrations.Migration):
initial = True
dependencies = [
('contenttypes', '0002_remove_content_type_name'),
('auth', '0008_alter_user_username_max_length'),
]
operations = [
migrations.CreateModel(
name='Action',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', keops.models.fields.CharField(blank=True, max_length=256, null=True)),
('action_type', keops.models.fields.CharField(blank=True, editable=False, max_length=16, null=True)),
('usage', keops.models.fields.TextField(blank=True, null=True)),
('help', keops.models.fields.TextField(blank=True, null=True)),
],
),
migrations.CreateModel(
name='Menu',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', keops.models.fields.CharField(blank=True, max_length=128, null=True)),
('active', models.BooleanField(default=True, verbose_name='active')),
('sequence', keops.models.fields.IntegerField(blank=True, default=100, null=True)),
('icon', keops.models.fields.CharField(blank=True, max_length=256, null=True)),
('url', keops.models.fields.CharField(blank=True, max_length=512, null=True)),
],
options={
'ordering': ('sequence', 'parent_id', 'id'),
},
),
migrations.CreateModel(
name='Rule',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', keops.models.fields.CharField(blank=True, max_length=256, null=True, unique=True)),
('model', keops.models.fields.CharField(blank=True, db_index=True, max_length=128, null=True)),
('active', models.BooleanField(db_index=True, verbose_name='Active')),
('domain', keops.models.fields.TextField(blank=True, null=True, verbose_name='Domain')),
('group', keops.models.fields.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='auth.Group', verbose_name='Group')),
],
),
migrations.CreateModel(
name='ReportAction',
fields=[
('action_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='base.Action')),
],
bases=('base.action',),
),
migrations.CreateModel(
name='WindowAction',
fields=[
('action_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='base.Action')),
('domain', keops.models.fields.TextField(blank=True, null=True)),
('context', keops.models.fields.TextField(blank=True, null=True)),
('object_id', keops.models.fields.BigIntegerField(blank=True, null=True)),
('target', keops.models.fields.CharField(blank=True, choices=[('current', 'Current Window'), ('new', 'New')], default='current', max_length=16, null=True)),
('view_mode', keops.models.fields.CharField(blank=True, default='list,form', max_length=128, null=True)),
('view_type', keops.models.fields.CharField(blank=True, choices=[('list', 'List'), ('form', 'Form')], default='form', max_length=16, null=True)),
('limit', models.PositiveIntegerField(default=100)),
('filter', models.BooleanField(default=False)),
('auto_search', models.BooleanField(default=True)),
('model', keops.models.fields.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='+', to='contenttypes.ContentType')),
('source_model', keops.models.fields.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='+', to='contenttypes.ContentType')),
],
options={
'db_table': 'base_window_action',
},
bases=('base.action',),
),
migrations.AddField(
model_name='menu',
name='action',
field=keops.models.fields.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='base.Action'),
),
migrations.AddField(
model_name='menu',
name='groups',
field=models.ManyToManyField(blank=True, to='auth.Group'),
),
migrations.AddField(
model_name='menu',
name='parent',
field=keops.models.fields.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='children', to='base.Menu'),
),
]
|
{
"content_hash": "38cc3b41c42c11c1f5dbbf82716ddacc",
"timestamp": "",
"source": "github",
"line_count": 96,
"max_line_length": 188,
"avg_line_length": 54.697916666666664,
"alnum_prop": 0.5913159398209865,
"repo_name": "katrid/keops",
"id": "1b83de9ecf23216c60b487cdb24b7272cc873e61",
"size": "5324",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "keops/contrib/base/migrations/0001_initial.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "289902"
},
{
"name": "CoffeeScript",
"bytes": "126960"
},
{
"name": "HTML",
"bytes": "23179"
},
{
"name": "JavaScript",
"bytes": "2721551"
},
{
"name": "Python",
"bytes": "77093"
}
],
"symlink_target": ""
}
|
import six
def create_multi_node_evaluator(actual_evaluator, communicator):
"""Create a multi node evaluator from a normal evaluator.
Actually this method patches the evaluator to work in multi node
environment. This method adds several hidden attributes starting
with `_mn_` prefix.
Args:
actual_evaluator: evaluator to be patched
(e.g., ``chainer.training.extensions.Evaluator``)
communicator: ChainerMN communicator
Returns:
The multi-node patched ``actual_evaluator``.
.. note:: After patched, original evaluator does not work
correctly in non-MPI environment.
"""
actual_evaluator._mn_original_evaluate = actual_evaluator.evaluate
actual_evaluator._mn_communicator = communicator
def new_evaluate(self):
local_mean_dict = self._mn_original_evaluate()
global_mean_dict = {
name:
self._mn_communicator.allreduce_obj(
value) / self._mn_communicator.size
for name, value in sorted(local_mean_dict.items())
}
return global_mean_dict
actual_evaluator.evaluate = six.create_bound_method(
new_evaluate, actual_evaluator)
return actual_evaluator
|
{
"content_hash": "df9d57057ce2beea23436e7ab17e1e09",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 70,
"avg_line_length": 31.94871794871795,
"alnum_prop": 0.6613162118780096,
"repo_name": "tkerola/chainer",
"id": "4144422710f830298b46bf3e209a7428b4a0cf43",
"size": "1246",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "chainermn/extensions/multi_node_evaluator.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "3368"
},
{
"name": "PowerShell",
"bytes": "7197"
},
{
"name": "Python",
"bytes": "3471733"
}
],
"symlink_target": ""
}
|
"""Tests for the file entry implementation using SQLite blob."""
import unittest
from dfvfs.lib import definitions
from dfvfs.path import factory as path_spec_factory
from dfvfs.resolver import context
from dfvfs.vfs import sqlite_blob_file_entry
from dfvfs.vfs import sqlite_blob_file_system
from tests import test_lib as shared_test_lib
class SQLiteBlobFileEntryTest(shared_test_lib.BaseTestCase):
"""Tests for the SQLite blob file entry."""
def setUp(self):
"""Sets up the needed objects used throughout the test."""
self._resolver_context = context.Context()
test_path = self._GetTestFilePath(['blob.db'])
self._SkipIfPathNotExists(test_path)
test_os_path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_OS, location=test_path)
self._sqlite_blob_path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_SQLITE_BLOB, column_name='blobs',
parent=test_os_path_spec, row_condition=('name', '==', 'mmssms.db'),
table_name='myblobs')
self._sqlite_blob_path_spec_2 = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_SQLITE_BLOB, column_name='blobs',
parent=test_os_path_spec, row_index=2, table_name='myblobs')
self._sqlite_blob_path_spec_3 = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_SQLITE_BLOB, column_name='blobs',
parent=test_os_path_spec, row_condition=('name', '==', 4),
table_name='myblobs')
self._sqlite_blob_path_spec_directory = (
path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_SQLITE_BLOB, column_name='blobs',
parent=test_os_path_spec, table_name='myblobs'))
self._file_system = sqlite_blob_file_system.SQLiteBlobFileSystem(
self._resolver_context, self._sqlite_blob_path_spec)
self._file_system.Open()
def tearDown(self):
"""Cleans up the needed objects used throughout the test."""
self._resolver_context.Empty()
def testIntialize(self):
"""Test the __init__ function."""
file_entry = sqlite_blob_file_entry.SQLiteBlobFileEntry(
self._resolver_context, self._file_system, self._sqlite_blob_path_spec)
self.assertIsNotNone(file_entry)
file_entry = sqlite_blob_file_entry.SQLiteBlobFileEntry(
self._resolver_context, self._file_system,
self._sqlite_blob_path_spec_2)
self.assertIsNotNone(file_entry)
# TODO: add tests for _GetDirectory
# TODO: add tests for _GetSubFileEntries
def testName(self):
"""Test name property."""
file_entry = self._file_system.GetFileEntryByPathSpec(
self._sqlite_blob_path_spec)
self.assertTrue(file_entry.name == (
'WHERE name == \'mmssms.db\''))
file_entry = self._file_system.GetFileEntryByPathSpec(
self._sqlite_blob_path_spec_3)
self.assertTrue(file_entry.name == 'WHERE name == 4')
file_entry = self._file_system.GetFileEntryByPathSpec(
self._sqlite_blob_path_spec_directory)
self.assertTrue(file_entry.name == 'myblobs.blobs')
def testSize(self):
"""Test the size property."""
file_entry = sqlite_blob_file_entry.SQLiteBlobFileEntry(
self._resolver_context, self._file_system, self._sqlite_blob_path_spec)
self.assertIsNotNone(file_entry)
self.assertEqual(file_entry.size, 110592)
# TODO: add tests for GetNumberOfRows
def testGetFileEntryByPathSpec(self):
"""Test the get a file entry by path specification functionality."""
file_entry = self._file_system.GetFileEntryByPathSpec(
self._sqlite_blob_path_spec)
self.assertIsNotNone(file_entry)
file_entry = self._file_system.GetFileEntryByPathSpec(
self._sqlite_blob_path_spec_2)
self.assertIsNotNone(file_entry)
def testGetParentFileEntry(self):
"""Tests the GetParentFileEntry function."""
file_entry = self._file_system.GetFileEntryByPathSpec(
self._sqlite_blob_path_spec)
self.assertIsNotNone(file_entry)
parent_file_entry = file_entry.GetParentFileEntry()
self.assertIsNotNone(parent_file_entry)
self.assertEqual(parent_file_entry.name, 'myblobs.blobs')
def testIsFunctions(self):
"""Test the Is? functions."""
file_entry = self._file_system.GetFileEntryByPathSpec(
self._sqlite_blob_path_spec)
self.assertFalse(file_entry.IsRoot())
self.assertFalse(file_entry.IsVirtual())
self.assertTrue(file_entry.IsAllocated())
self.assertFalse(file_entry.IsDevice())
self.assertFalse(file_entry.IsDirectory())
self.assertTrue(file_entry.IsFile())
self.assertFalse(file_entry.IsLink())
self.assertFalse(file_entry.IsPipe())
self.assertFalse(file_entry.IsSocket())
def testSubFileEntries(self):
"""Test the sub file entries iteration functionality."""
file_entry = self._file_system.GetFileEntryByPathSpec(
self._sqlite_blob_path_spec_directory)
self.assertTrue(file_entry.IsDirectory())
self.assertIsNotNone(file_entry)
self.assertEqual(file_entry.number_of_sub_file_entries, 4)
expected_sub_file_entry_names = [
'OFFSET 0', 'OFFSET 1', 'OFFSET 2', 'OFFSET 3']
sub_file_entry_names = []
for sub_file_entry in file_entry.sub_file_entries:
sub_file_entry_names.append(sub_file_entry.name)
self.assertEqual(
len(sub_file_entry_names), len(expected_sub_file_entry_names))
self.assertEqual(
sorted(sub_file_entry_names), expected_sub_file_entry_names)
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "f8f7a259d30014c6a54ceb2522361013",
"timestamp": "",
"source": "github",
"line_count": 156,
"max_line_length": 79,
"avg_line_length": 35.52564102564103,
"alnum_prop": 0.6975821003247925,
"repo_name": "log2timeline/dfvfs",
"id": "1e14d081dcf30ea5cc907cc43b9b643edf5bf143",
"size": "5588",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "tests/vfs/sqlite_blob_file_entry.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "14212"
},
{
"name": "Makefile",
"bytes": "122"
},
{
"name": "PowerShell",
"bytes": "1021"
},
{
"name": "Python",
"bytes": "2176548"
},
{
"name": "Shell",
"bytes": "19355"
}
],
"symlink_target": ""
}
|
from telemetry.core import platform
from telemetry.util import wpr_modes
from telemetry.internal.browser import browser_finder
from telemetry.internal.browser import browser_finder_exceptions
class ProfileExtender(object):
"""Abstract base class for an object that constructs a Chrome profile."""
def __init__(self, finder_options):
"""Initializer.
|finder_options| is an instance of BrowserFinderOptions. When subclass
implementations of this method inevitably attempt to find and launch a
browser, they should pass |finder_options| to the relevant methods.
Several properties of |finder_options| might require direct manipulation by
subclasses. These are:
|finder_options.output_profile_path|: The path at which the profile
should be created.
|finder_options.browser_options.profile_dir|: If this property is None,
then a new profile is created. Otherwise, the existing profile is
appended on to.
"""
self._finder_options = finder_options
# A reference to the browser that will be performing all of the tab
# navigations.
# This member is initialized during SetUpBrowser().
self._browser = None
def Run(self):
"""Creates or extends the profile."""
raise NotImplementedError()
def WebPageReplayArchivePath(self):
"""Returns the path to the WPR archive.
Can be overridden by subclasses.
"""
return None
@property
def finder_options(self):
"""The options to use to find and run the browser."""
return self._finder_options
@property
def profile_path(self):
"""The path of the profile that the browser will use while it's running."""
return self.finder_options.output_profile_path
@property
def browser(self):
return self._browser
def EnabledOSList(self):
"""Returns a list of OSes that this extender can run on.
Can be overridden by subclasses.
Returns:
List of OS ('win', 'mac', or 'linux') that this extender can run on.
None if this extender can run on all platforms.
"""
return None
def SetUpBrowser(self):
"""Finds and starts the browser.
Can be overridden by subclasses. The subclass implementation must call the
super class implementation.
Subclasses do not need to call this method. This method is only necessary
if the subclass needs to start a browser. If a subclass does call this
method, the subclass must also call TearDownBrowser().
"""
possible_browser = self._GetPossibleBrowser(self.finder_options)
os_name = possible_browser.platform.GetOSName()
enabled_os_list = self.EnabledOSList()
if enabled_os_list is not None and os_name not in enabled_os_list:
raise NotImplementedError(
'This profile extender on %s is not yet supported'
% os_name)
assert possible_browser.supports_tab_control
assert (platform.GetHostPlatform().GetOSName() in
["win", "mac", "linux"])
self._SetUpWebPageReplay(self.finder_options, possible_browser)
self._browser = possible_browser.Create(self.finder_options)
def TearDownBrowser(self):
"""Tears down the browser.
Can be overridden by subclasses. The subclass implementation must call the
super class implementation.
"""
if self._browser:
self._browser.Close()
self._browser = None
def FetchWebPageReplayArchives(self):
"""Fetches the web page replay archives.
Can be overridden by subclasses.
"""
pass
def _SetUpWebPageReplay(self, finder_options, possible_browser):
"""Sets up Web Page Replay, if necessary."""
wpr_archive_path = self.WebPageReplayArchivePath()
if not wpr_archive_path:
return
self.FetchWebPageReplayArchives()
# The browser options needs to be passed to both the network controller
# as well as the browser backend.
browser_options = finder_options.browser_options
if finder_options.use_live_sites:
browser_options.wpr_mode = wpr_modes.WPR_OFF
else:
browser_options.wpr_mode = wpr_modes.WPR_REPLAY
network_controller = possible_browser.platform.network_controller
make_javascript_deterministic = True
network_controller.SetReplayArgs(
wpr_archive_path, browser_options.wpr_mode, browser_options.netsim,
browser_options.extra_wpr_args, make_javascript_deterministic)
def _GetPossibleBrowser(self, finder_options):
"""Return a possible_browser with the given options."""
possible_browser = browser_finder.FindBrowser(finder_options)
if not possible_browser:
raise browser_finder_exceptions.BrowserFinderException(
'No browser found.\n\nAvailable browsers:\n%s\n' %
'\n'.join(browser_finder.GetAllAvailableBrowserTypes(finder_options)))
finder_options.browser_options.browser_type = (
possible_browser.browser_type)
return possible_browser
|
{
"content_hash": "6b4653e58fb8e8a4694b6e3526e89a19",
"timestamp": "",
"source": "github",
"line_count": 146,
"max_line_length": 80,
"avg_line_length": 33.54109589041096,
"alnum_prop": 0.7104349601797019,
"repo_name": "lihui7115/ChromiumGStreamerBackend",
"id": "95b9fb0f55b337bed3254787ec3c7bdc21bd31d2",
"size": "5060",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tools/perf/profile_creators/profile_extender.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "AppleScript",
"bytes": "6973"
},
{
"name": "Arduino",
"bytes": "464"
},
{
"name": "Assembly",
"bytes": "37073"
},
{
"name": "Batchfile",
"bytes": "8451"
},
{
"name": "C",
"bytes": "9508834"
},
{
"name": "C++",
"bytes": "242598549"
},
{
"name": "CSS",
"bytes": "943747"
},
{
"name": "DM",
"bytes": "60"
},
{
"name": "Groff",
"bytes": "2494"
},
{
"name": "HTML",
"bytes": "27281878"
},
{
"name": "Java",
"bytes": "14561064"
},
{
"name": "JavaScript",
"bytes": "20540839"
},
{
"name": "Makefile",
"bytes": "70864"
},
{
"name": "Objective-C",
"bytes": "1745880"
},
{
"name": "Objective-C++",
"bytes": "10008668"
},
{
"name": "PHP",
"bytes": "97817"
},
{
"name": "PLpgSQL",
"bytes": "178732"
},
{
"name": "Perl",
"bytes": "63937"
},
{
"name": "Protocol Buffer",
"bytes": "482954"
},
{
"name": "Python",
"bytes": "8626890"
},
{
"name": "Shell",
"bytes": "481888"
},
{
"name": "Standard ML",
"bytes": "5106"
},
{
"name": "XSLT",
"bytes": "418"
},
{
"name": "nesC",
"bytes": "18347"
}
],
"symlink_target": ""
}
|
from fabric.api import *
from fabric.contrib.files import *
from fabric.contrib.project import rsync_project
from subprocess import check_output
env.use_ssh_config = True
env.user = 'ubuntu'
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
HOME_DIR = '/home/ubuntu'
DEPLOY_PATH = '%s/cabot' % HOME_DIR
LOG_DIR = '/var/log/cabot/'
VENV_DIR = '%s/venv' % HOME_DIR
BACKUP_DIR = '/tmp/'
PG_DATABASE = 'index'
PG_USERNAME = 'cabot'
PG_PASSWORD = 'cabot' # You should probably change this
def _ensure_dirs():
dirs = [LOG_DIR]
for d in dirs:
sudo('mkdir -p {d}'.format(d=d))
sudo('chmod -R 777 {d}'.format(d=d))
def _setup_venv():
with settings(warn_only=True):
if sudo('test -d %s' % VENV_DIR).failed:
sudo('virtualenv %s' % VENV_DIR)
def install_requirements(deploy_path=DEPLOY_PATH):
with cd(deploy_path):
with prefix("source {venv}/bin/activate".format(venv=VENV_DIR)):
sudo(
"{venv}/bin/pip install -r requirements.txt --exists-action=w".format(venv=VENV_DIR))
def run_migrations(deploy_path=DEPLOY_PATH):
with cd(deploy_path):
with prefix("source {venv}/bin/activate".format(venv=VENV_DIR)):
sudo(
"foreman run python manage.py syncdb -e conf/{env}.env".format(env=env.deploy_version))
sudo(
"foreman run python manage.py migrate cabotapp --noinput -e conf/{env}.env".format(env=env.deploy_version))
# Wrap in failure for legacy reasons
with settings(warn_only=True):
sudo(
"foreman run python manage.py migrate djcelery --noinput -e conf/{env}.env".format(env=env.deploy_version))
def collect_static(deploy_path=DEPLOY_PATH):
with cd(deploy_path):
with prefix("source {venv}/bin/activate".format(venv=VENV_DIR)):
sudo(
"foreman run python manage.py collectstatic --noinput -e conf/{env}.env".format(env=env.deploy_version))
sudo(
"foreman run python manage.py compress -e conf/{env}.env".format(env=env.deploy_version))
def setup_upstart(deploy_path=DEPLOY_PATH):
with cd(deploy_path):
# Point at master (i.e. symlinked) path
procfile = os.path.join(DEPLOY_PATH, 'Procfile')
env_file = os.path.join(DEPLOY_PATH, 'conf', '%s.env' %
env.deploy_version)
template_file = os.path.join(DEPLOY_PATH, 'upstart')
sudo('foreman export upstart /etc/init -f {conf} -e {env} -u ubuntu -a cabot -t {tmplt}'.format(
conf=procfile, env=env_file, tmplt=template_file))
def production():
"""
Select production instance(s)
"""
env.hosts = ['cabot.arachnys.com']
def restart():
with settings(warn_only=True):
if sudo('restart cabot').failed:
sudo('start cabot')
def stop():
with settings(warn_only=True):
sudo('stop cabot')
def provision():
"""
Provision a clean Ubuntu 12.04 instance with dependencies
"""
with open(os.path.expanduser('~/.ssh/id_rsa.pub')) as f:
local_ssh_key = f.read().strip('\n')
put('bin/setup_dependencies.sh', '/tmp/setup_dependencies.sh')
sudo('LOCAL_SSH_KEY="%s" bash /tmp/setup_dependencies.sh' % local_ssh_key)
# Clean up
run('rm /tmp/setup_dependencies.sh')
def deploy(deploy_version=None):
"""
Deploy a new version of code to production or test server.
Push code to remote server, install requirements, apply migrations,
collect and compress static assets, export foreman to upstart,
restart service
"""
# TODO: replace this with
# - zip up working directory
# - upload and unzip into DEPLOY_PATH
env.deploy_version = deploy_version or 'production'
dirname = check_output(
["echo \"$(date +'%Y-%m-%d')-$(git log --pretty=format:'%h' -n 1)\""], shell=True).strip('\n ')
deploy_path = os.path.join(HOME_DIR, dirname)
run('mkdir -p {}'.format(deploy_path))
print 'Uploading project to %s' % deploy_path
rsync_project(
remote_dir=deploy_path,
local_dir='./',
exclude=['.git', 'backups', 'venv',
'static/CACHE', '.vagrant', '*.pyc', 'dev.db'],
)
with cd(deploy_path):
_setup_venv()
create_database()
install_requirements(deploy_path)
run_migrations(deploy_path)
collect_static(deploy_path)
# This may cause a bit of downtime
run('ln -sfn {new} {current}'.format(
new=deploy_path,
current=DEPLOY_PATH
))
setup_upstart(deploy_path)
restart()
print "Done!"
def backup():
"""
Back up database locally
TODO: send backups to s3
"""
backup_file = 'outfile.sql.gz'
with cd(BACKUP_DIR):
run('PGPASSWORD=cabot pg_dump -U cabot index | gzip > {}'.format(backup_file))
get(backup_file, 'backups/%(basename)s')
def create_database():
"""Creates role and database"""
with settings(warn_only=True):
sudo(
'psql -c "CREATE USER %s WITH NOCREATEDB NOCREATEUSER ENCRYPTED PASSWORD E\'%s\'"' %
(PG_USERNAME, PG_PASSWORD), user='postgres')
sudo('psql -c "CREATE DATABASE %s WITH OWNER %s"' %
(PG_DATABASE, PG_USERNAME), user='postgres')
@parallel
def logs():
"""
Tail logfiles
"""
sudo('tail -f {logdir}* /var/log/nginx/*.log'.format(logdir=LOG_DIR))
|
{
"content_hash": "5b22b732feaa9c049236eecc5ae003bc",
"timestamp": "",
"source": "github",
"line_count": 172,
"max_line_length": 127,
"avg_line_length": 31.924418604651162,
"alnum_prop": 0.6049899836095429,
"repo_name": "labxio/cabot",
"id": "7bc0e844e320b446ef44e2a631a79ec52b89ba77",
"size": "5491",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "fabfile.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "21910"
},
{
"name": "JavaScript",
"bytes": "1224213"
},
{
"name": "Python",
"bytes": "124963"
},
{
"name": "Shell",
"bytes": "6210"
}
],
"symlink_target": ""
}
|
from Folders import *
|
{
"content_hash": "9b3e5199bda8202882004a29bbfdd4bf",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 21,
"avg_line_length": 22,
"alnum_prop": 0.7727272727272727,
"repo_name": "tartakynov/enso",
"id": "c654fcffd3ba5615289a3595eefd65276f9b9f7a",
"size": "22",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "enso/platform/win32/system/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "9734342"
},
{
"name": "C++",
"bytes": "403798"
},
{
"name": "JavaScript",
"bytes": "3338"
},
{
"name": "Objective-C",
"bytes": "15094"
},
{
"name": "Python",
"bytes": "765642"
},
{
"name": "Shell",
"bytes": "235"
}
],
"symlink_target": ""
}
|
class Solution:
dict = {}
# @param node, a undirected graph node
# @return a undirected graph node
def cloneGraph(self, node):
if not node:
return None
if node not in self.dict:
self.dict[node] = UndirectedGraphNode(node.label)
for e in node.neighbors:
self.dict[node].neighbors.append(self.cloneGraph(e))
return self.dict[node]
|
{
"content_hash": "b7e76cde5242fdb8b753e66f9c67350d",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 68,
"avg_line_length": 30.428571428571427,
"alnum_prop": 0.5868544600938967,
"repo_name": "KickAlgorithm/leetcode",
"id": "189907fdf164b1152c52aeb2bf0f18eef11071e4",
"size": "581",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "python/131-140/Clone Graph.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "302274"
},
{
"name": "Java",
"bytes": "2393"
},
{
"name": "Python",
"bytes": "35212"
}
],
"symlink_target": ""
}
|
from django.test import TestCase
from django.test import Client
from django.core.urlresolvers import reverse
from django.contrib.auth import get_user_model
User = get_user_model()
USERNAME = 'test'
PASSWORD = 'test'
EMAIL = 'test@test.com'
NEXT = '/about/'
class ViewTests(TestCase):
"""
Tests the core views.
"""
def setUp(self):
self.user = User.objects.create_user(USERNAME, EMAIL, PASSWORD)
self.client = Client()
def test_login_view(self):
response = self.client.get(reverse('account_login'))
self.assertContains(response, 'Sign In')
def test_login_valid_user(self):
response = self.client.post(reverse('account_login'),
{
'username': USERNAME,
'password': PASSWORD,
})
self.assertRedirects(response, '/')
def test_login_valid_user_next(self):
response = self.client.post(reverse('account_login'),
{
'username': USERNAME,
'password': PASSWORD,
'redir_path': NEXT,
})
self.assertRedirects(response, NEXT)
def test_login_invalid_user(self):
response = self.client.post(reverse('account_login'),
{
'username': USERNAME,
'password': 'foo',
})
self.assertContains(response, 'Invalid username or password.')
def test_login_inactive_user(self):
self.user.is_active = False
self.user.save()
response = self.client.post(reverse('account_login'),
{
'username': USERNAME,
'password': PASSWORD,
})
self.assertContains(response, 'Account has been disabled.')
def test_logout(self):
response = self.client.get(reverse('account_logout'), follow=True)
self.assertContains(response, 'Successfully signed out!')
|
{
"content_hash": "119d9315284ea240671e0539cbb06ccd",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 74,
"avg_line_length": 36.34375,
"alnum_prop": 0.4815133276010318,
"repo_name": "ainmosni/jolly_roger",
"id": "da0af52f58820c1cd09be1fcea472f7012ee93a4",
"size": "2350",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "jolly_roger/core/tests/test_views.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "595"
},
{
"name": "JavaScript",
"bytes": "394"
},
{
"name": "Python",
"bytes": "120812"
},
{
"name": "Shell",
"bytes": "5105"
}
],
"symlink_target": ""
}
|
import random
import hashlib
import smtplib
import socket
import sys
import time
import mailpile.util
from mailpile.util import *
from mailpile.i18n import gettext as _
from mailpile.i18n import ngettext as _n
from mailpile.config import ssl, socks
from mailpile.mailutils import CleanMessage, MessageAsString
from mailpile.eventlog import Event
from mailpile.safe_popen import Popen, PIPE
from mailpile.vcard import VCardLine
def sha512_512k(data):
#
# This abuse of sha512 forces it to work with at least 512kB of data,
# no matter what it started with. On each iteration, we add one
# hexdigest to the front of the string (to prevent reuse of state).
# Each hexdigest is 128 bytes, so that gives:
#
# Total == 128 * (0 + 1 + 2 + ... + 90) + 128 == 128 * 4096 == 524288
#
# Max memory use is sadly only 10KB or so - hardly memory-hard. :-)
# Oh well! I'm no cryptographer, and yes, we should probably just
# be using scrypt.
#
sha512 = hashlib.sha512
for i in range(0, 91):
data = sha512(data).hexdigest() + data
return sha512(data).hexdigest()
def sha512_512kCheck(challenge, bits, solution):
hexchars = bits // 4
wanted = '0' * hexchars
digest = sha512_512k('-'.join([solution, challenge]))
return (digest[:hexchars] == wanted)
def sha512_512kCollide(challenge, bits, callback1k=None):
hexchars = bits // 4
wanted = '0' * hexchars
for i in xrange(1, 0x10000):
if callback1k is not None:
callback1k(i)
challenge_i = '-'.join([str(i), challenge])
for j in xrange(0, 1024):
collision = '-'.join([str(j), challenge_i])
if sha512_512k(collision)[:hexchars] == wanted:
return '-'.join(collision.split('-')[:2])
return None
SMTORP_HASHCASH_RCODE = 450
SMTORP_HASHCASH_PREFIX = 'Please collide'
SMTORP_HASHCASH_FORMAT = (SMTORP_HASHCASH_PREFIX +
' %(bits)d,%(challenge)s or retry. See: %(url)s')
def SMTorP_HashCash(rcpt, msg, callback1k=None):
bits_challenge_etc = msg[len(SMTORP_HASHCASH_PREFIX):].strip()
bits, challenge = bits_challenge_etc.split()[0].split(',', 1)
def cb(*args, **kwargs):
play_nice_with_threads()
if callback1k:
callback1k(*args, **kwargs)
return '%s##%s' % (rcpt, sha512_512kCollide(challenge, int(bits),
callback1k=cb))
def _AddSocksHooks(cls, SSL=False):
class Socksified(cls):
def _get_socket(self, host, port, timeout):
new_socket = self.socket()
new_socket.connect((host, port))
if SSL and ssl is not None:
new_socket = ssl.wrap_socket(new_socket,
self.keyfile, self.certfile)
self.file = smtplib.SSLFakeFile(new_socket)
return new_socket
def connect(self, host='localhost', port=0, socket_cls=None):
self.socket = socket_cls or socket.socket
return cls.connect(self, host=host, port=port)
return Socksified
class SMTP(_AddSocksHooks(smtplib.SMTP)):
pass
if ssl is not None:
class SMTP_SSL(_AddSocksHooks(smtplib.SMTP_SSL, SSL=True)):
pass
else:
SMTP_SSL = SMTP
class SendMailError(IOError):
def __init__(self, msg, details=None):
IOError.__init__(self, msg)
self.error_info = details or {}
def _RouteTuples(session, from_to_msg_ev_tuples, test_route=None):
tuples = []
for frm, to, msg, events in from_to_msg_ev_tuples:
dest = {}
for recipient in to:
# If any of the events thinks this message has been delivered,
# then don't try to send it again.
frm_to = '>'.join([frm, recipient])
for ev in (events or []):
if ev.private_data.get(frm_to, False):
recipient = None
break
if recipient:
route = {"protocol": "",
"username": "",
"password": "",
"command": "",
"host": "",
"port": 25
}
if test_route:
route.update(test_route)
else:
route.update(session.config.get_sendmail(frm, [recipient]))
if route["command"]:
txtroute = "|%(command)s" % route
else:
# FIXME: This is dumb, makes it hard to handle usernames
# or passwords with funky characters in them :-(
txtroute = "%(protocol)s://%(username)s:%(password)s@" \
+ "%(host)s:%(port)d"
txtroute %= route
dest[txtroute] = dest.get(txtroute, [])
dest[txtroute].append(recipient)
for route in dest:
tuples.append((frm, route, dest[route], msg, events))
return tuples
def SendMail(session, msg_mid, from_to_msg_ev_tuples,
test_only=False, test_route=None):
routes = _RouteTuples(session, from_to_msg_ev_tuples,
test_route=test_route)
# Randomize order of routes, so we don't always try the broken
# one first. Any failure will bail out, but we do keep track of
# our successes via. the event, so eventually everything sendable
# should get sent.
routes.sort(key=lambda k: random.randint(0, 10))
# Update initial event state before we go through and start
# trying to deliver stuff.
for frm, sendmail, to, msg, events in routes:
for ev in (events or []):
for rcpt in to:
ev.private_data['>'.join([frm, rcpt])] = False
for frm, sendmail, to, msg, events in routes:
for ev in events:
ev.data['recipients'] = len(ev.private_data.keys())
ev.data['delivered'] = len([k for k in ev.private_data
if ev.private_data[k]])
def mark(msg, events, log=True):
for ev in events:
ev.flags = Event.RUNNING
ev.message = msg
if log:
session.config.event_log.log_event(ev)
session.ui.mark(msg)
def fail(msg, events, details=None):
mark(msg, events, log=True)
for ev in events:
ev.data['last_error'] = msg
raise SendMailError(msg, details=details)
def smtp_do_or_die(msg, events, method, *args, **kwargs):
rc, msg = method(*args, **kwargs)
if rc != 250:
fail(msg + ' (%s %s)' % (rc, msg), events,
details={'smtp_error': '%s: %s' % (rc, msg)})
# Do the actual delivering...
for frm, sendmail, to, msg, events in routes:
frm_vcard = session.config.vcards.get_vcard(frm)
update_to_vcards = msg and msg["x-mp-internal-pubkeys-attached"]
if 'sendmail' in session.config.sys.debug:
sys.stderr.write(_('SendMail: from %s (%s), to %s via %s\n'
) % (frm,
frm_vcard and frm_vcard.random_uid or '',
to, sendmail.split('@')[-1]))
sm_write = sm_close = lambda: True
mark(_('Connecting to %s') % sendmail.split('@')[-1], events)
if sendmail.startswith('|'):
sendmail %= {"rcpt": ",".join(to)}
cmd = sendmail[1:].strip().split()
proc = Popen(cmd, stdin=PIPE, long_running=True)
sm_startup = None
sm_write = proc.stdin.write
def sm_close():
proc.stdin.close()
rv = proc.wait()
if rv != 0:
fail(_('%s failed with exit code %d') % (cmd, rv), events,
details={'failed_command': cmd,
'exit_code': rv})
sm_cleanup = lambda: [proc.stdin.close(), proc.wait()]
# FIXME: Update session UI with progress info
for ev in events:
ev.data['proto'] = 'subprocess'
ev.data['command'] = cmd[0]
elif (sendmail.startswith('smtp:') or
sendmail.startswith('smtorp:') or
sendmail.startswith('smtpssl:') or
sendmail.startswith('smtptls:')):
proto = sendmail.split(':', 1)[0]
host, port = sendmail.split(':', 1
)[1].replace('/', '').rsplit(':', 1)
smtp_ssl = proto in ('smtpssl', ) # FIXME: 'smtorp'
if '@' in host:
userpass, host = host.rsplit('@', 1)
user, pwd = userpass.split(':', 1)
else:
user = pwd = None
for ev in events:
ev.data['proto'] = proto
ev.data['host'] = host
ev.data['auth'] = bool(user and pwd)
if 'sendmail' in session.config.sys.debug:
sys.stderr.write(_('SMTP connection to: %s:%s as %s\n'
) % (host, port, user or '(anon)'))
server = (smtp_ssl and SMTP_SSL or SMTP
)(local_hostname='mailpile.local', timeout=25)
def sm_startup():
if 'sendmail' in session.config.sys.debug:
server.set_debuglevel(1)
if proto == 'smtorp':
server.connect(host, int(port),
socket_cls=session.config.get_tor_socket())
else:
server.connect(host, int(port))
if not smtp_ssl:
# We always try to enable TLS, even if the user just
# requested plain-text smtp. But we only throw errors
# if the user asked for encryption.
try:
server.starttls()
except:
if sendmail.startswith('smtptls'):
raise InsecureSmtpError()
if user and pwd:
try:
server.login(user.encode('utf-8'), pwd.encode('utf-8'))
except UnicodeDecodeError:
fail(_('Bad character in username or password'),
events,
details={'authentication_error': True})
except smtplib.SMTPAuthenticationError:
fail(_('Invalid username or password'), events,
details={'authentication_error': True})
smtp_do_or_die(_('Sender rejected by SMTP server'),
events, server.mail, frm)
for rcpt in to:
rc, msg = server.rcpt(rcpt)
if (rc == SMTORP_HASHCASH_RCODE and
msg.startswith(SMTORP_HASHCASH_PREFIX)):
rc, msg = server.rcpt(SMTorP_HashCash(rcpt, msg))
if rc != 250:
fail(_('Server rejected recpient: %s') % rcpt, events)
rcode, rmsg = server.docmd('DATA')
if rcode != 354:
fail(_('Server rejected DATA: %s %s') % (rcode, rmsg))
def sm_write(data):
for line in data.splitlines(True):
if line.startswith('.'):
server.send('.')
server.send(line)
def sm_close():
server.send('\r\n.\r\n')
smtp_do_or_die(_('Error spooling mail'),
events, server.getreply)
def sm_cleanup():
if hasattr(server, 'sock'):
server.close()
else:
fail(_('Invalid sendmail command/SMTP server: %s') % sendmail,
events)
try:
# Run the entire connect/login sequence in a single timer, but
# give it plenty of time in case the network is lame.
if sm_startup:
RunTimed(300, sm_startup)
if test_only:
return True
mark(_('Preparing message...'), events)
msg_string = MessageAsString(CleanMessage(session.config, msg))
total = len(msg_string)
while msg_string:
if mailpile.util.QUITTING:
raise TimedOut(_('Quitting'))
mark(('Sending message... (%d%%)'
) % (100 * (total-len(msg_string))/total), events,
log=False)
RunTimed(120, sm_write, msg_string[:20480])
msg_string = msg_string[20480:]
RunTimed(30, sm_close)
mark(_n('Message sent, %d byte',
'Message sent, %d bytes',
total
) % total, events)
for ev in events:
for rcpt in to:
vcard = session.config.vcards.get_vcard(rcpt)
if vcard:
vcard.record_history('send', time.time(), msg_mid)
if frm_vcard:
vcard.prefer_sender(rcpt, frm_vcard)
if update_to_vcards:
vcard.gpgshared = int(time.time())
vcard.save()
ev.private_data['>'.join([frm, rcpt])] = True
ev.data['bytes'] = total
ev.data['delivered'] = len([k for k in ev.private_data
if ev.private_data[k]])
finally:
sm_cleanup()
return True
|
{
"content_hash": "c3fceeb2d59847a04ea15bce7f7fc822",
"timestamp": "",
"source": "github",
"line_count": 365,
"max_line_length": 79,
"avg_line_length": 37.90958904109589,
"alnum_prop": 0.4983739249837392,
"repo_name": "jparyani/Mailpile",
"id": "eb1a7980f9afaade527c7150059b497f78b48a02",
"size": "13837",
"binary": false,
"copies": "2",
"ref": "refs/heads/sandstorm",
"path": "mailpile/smtp_client.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "130573"
},
{
"name": "Cap'n Proto",
"bytes": "1254"
},
{
"name": "JavaScript",
"bytes": "577920"
},
{
"name": "Makefile",
"bytes": "6768"
},
{
"name": "Python",
"bytes": "1310077"
},
{
"name": "Shell",
"bytes": "19360"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
import argparse
import os
import re
import subprocess
REPO_BASE = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
CLI = argparse.ArgumentParser("Set the package version information")
CLI.add_argument(
"target",
help="The part of the current version to bump.",
choices=('major', 'minor', 'patch', None),
nargs='?',
default=None,
)
CLI.add_argument(
"-f",
"--version-file",
help="The file containing the __version__ field.",
default=os.path.join(REPO_BASE, 'cpy2py', 'meta.py'),
)
CLI.add_argument(
"-t",
"--tag-message",
help="Message for an annotated git tag. Required for minor and major version bumps.",
)
VERSION_STR_RE = r'^__version__\s*=\s*"([0-9]+(?:[.][0-9]+(?:[.][0-9]+)))"(.*)$'
def read_version(version_file):
"""Read the current version number"""
version_str = '0.0.0'
with open(version_file, 'r') as vfile:
for line in vfile:
if re.match(VERSION_STR_RE, line):
version_str = re.match(VERSION_STR_RE, line).group(1)
break
else:
raise ValueError("No version information in '%s'" % version_file)
if version_str.count('.') > 2:
raise ValueError("Version string '%s' does not match <major>.<minor>.<patch> scheme" % version_str)
return [int(mver) for mver in version_str.split('.')] + [0] * (2 - version_str.count('.'))
def format_version(version):
"""Create version str vom version tuple"""
return '.'.join(str(mver) for mver in version)
def bump_version(version, target):
"""Bump the corresponding target part of bump_version"""
if target is None:
return version
if target == 'patch':
return version[:2] + [version[2] + 1]
elif target == 'minor':
return [version[0], (version[1] + 1), 0]
elif target == 'major':
return [(version[0] + 1), 0, 0]
raise ValueError
def write_version(version_file, new_version):
"""Update the version file with a new version number"""
version_file_tmp = version_file + '.vtp'
with open(version_file, 'r') as in_file, open(version_file_tmp, 'w') as out_file:
for line in in_file:
if re.match(VERSION_STR_RE, line):
line_remainder = re.match(VERSION_STR_RE, line).group(2)
line = '__version__ = "%s"%s\n' % (format_version(new_version), line_remainder)
out_file.write(line)
# rename when done writing
stat = os.stat(version_file)
os.chmod(version_file_tmp, stat.st_mode)
os.chown(version_file_tmp, stat.st_uid, stat.st_gid)
os.rename(version_file_tmp, version_file)
def make_commit(version_file, new_version, message=None):
commit_message = 'v' + format_version(new_version)
if message:
commit_message += '\n' + message
# make sure version is committed
subprocess.check_call([
'git', 'reset', 'HEAD'
])
subprocess.check_call([
'git', 'add', version_file
])
# make commit
subprocess.check_call([
'git', 'commit', '-m', commit_message
])
def make_version_tag_commit(new_version, message):
tag = 'v' + format_version(new_version)
subprocess.check_call([
'git', 'tag',
'-a', tag,
'-m', message
])
return tag
def main():
"""Run the main update loop"""
options = CLI.parse_args()
version = read_version(options.version_file)
print('current version:', format_version(version))
if options.target is None:
return
if options.target in ('major', 'manior') and not options.tag_message:
raise ValueError("Must specify a tag message when bumping minor or major version.")
new_version = bump_version(version, options.target)
write_version(options.version_file, new_version)
print('updated version:', format_version(new_version))
make_commit(options.version_file, new_version, options.tag_message)
print('commited version')
if options.tag_message:
tag = make_version_tag_commit(new_version, options.tag_message)
print('added tag:', tag)
if __name__ == "__main__":
main()
|
{
"content_hash": "2f7a0322134c1a57b7c50944d16c3a99",
"timestamp": "",
"source": "github",
"line_count": 129,
"max_line_length": 107,
"avg_line_length": 32.286821705426355,
"alnum_prop": 0.6156062424969988,
"repo_name": "maxfischer2781/cpy2py",
"id": "a7fce00ae08e6eb128c4eb6860447d30853a5f73",
"size": "4792",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cpy2py_dev_tools/set_version.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "7860"
},
{
"name": "Python",
"bytes": "230870"
}
],
"symlink_target": ""
}
|
"""Create VM with a single disk.
Creates a Persistent Disk. Then creates an instance that attaches
that Persistent Disk as a data disk.
"""
COMPUTE_URL_BASE = 'https://www.googleapis.com/compute/v1/'
def GenerateConfig(context):
"""Create instance with disks."""
datadisk = 'datadisk-'+ context.env['deployment']
resources = [{
'type': 'compute.v1.disk',
'name': datadisk,
'properties': {
'zone': context.properties['zone'],
'sizeGb': 10,
# Disk type is a full URI. Example uses pd-standard
# but pd-ssd can be used as well
'type': ''.join([COMPUTE_URL_BASE, 'projects/',
context.env['project'], '/zones/',
context.properties['zone'],
'/diskTypes/pd-standard'])
}
}, {
'type': 'compute.v1.instance',
'name': 'vm-' + context.env['deployment'],
'properties': {
'zone': context.properties['zone'],
'machineType': ''.join([COMPUTE_URL_BASE, 'projects/',
context.env['project'], '/zones/',
context.properties['zone'],
'/machineTypes/f1-micro']),
'metadata': {
'items': [{
# For more ways to use startup scripts on an instance, see:
# https://cloud.google.com/compute/docs/startupscript
'key': 'startup-script',
'value': '#!/bin/bash\npython -m SimpleHTTPServer 8080'
}]
},
'disks': [{
'deviceName': 'boot',
'type': 'PERSISTENT',
'boot': True,
'autoDelete': True,
'initializeParams': {
'diskName': 'disk-' + context.env['deployment'],
'sourceImage': ''.join([COMPUTE_URL_BASE, 'projects/',
'debian-cloud/global/',
'images/family/debian-9'])}
}, {
# Specify the data disk to mount. The deviceName can be anything,
# but by convention is typically set to the same name.
# This is the value is used in
# /dev/disk/by-id/google-<deviceName>.
# If not set, it will be
# /dev/disk/by-id/google-persisent-disk-<number>.
'deviceName': 'datadisk',
'type': 'PERSISTENT',
'source': '$(ref.' + datadisk + '.selfLink)',
'autoDelete': True
}],
'networkInterfaces': [{
'network': ''.join([COMPUTE_URL_BASE, 'projects/',
context.env['project'],
'/global/networks/default']),
'accessConfigs': [{
'name': 'External NAT',
'type': 'ONE_TO_ONE_NAT'
}]
}]
}
}]
return {'resources': resources}
|
{
"content_hash": "5ef73ae4b4e310fc3a68f733cbf1c0cf",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 79,
"avg_line_length": 39.35064935064935,
"alnum_prop": 0.463036303630363,
"repo_name": "aljim/deploymentmanager-samples",
"id": "e39c1fc0266c5b7d8a618e5914f0f0bf8f76b857",
"size": "3627",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/v2/single_vm/python/vm_template.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Groovy",
"bytes": "6428"
},
{
"name": "HTML",
"bytes": "106754"
},
{
"name": "JavaScript",
"bytes": "70015"
},
{
"name": "Makefile",
"bytes": "4430"
},
{
"name": "Python",
"bytes": "443622"
},
{
"name": "Shell",
"bytes": "251698"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('volunteerapp', '0013_auto_20171117_0045'),
]
operations = [
migrations.AddField(
model_name='project',
name='created_at',
field=models.DateTimeField(auto_now_add=True, default=datetime.datetime(2017, 11, 23, 7, 25, 38, 162969, tzinfo=utc)),
preserve_default=False,
),
migrations.AddField(
model_name='project',
name='updated_at',
field=models.DateTimeField(auto_now=True),
),
]
|
{
"content_hash": "88aca98bdb34584d6807133215f23cc6",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 130,
"avg_line_length": 27.53846153846154,
"alnum_prop": 0.6089385474860335,
"repo_name": "mclark4386/volunteer_connection",
"id": "3f10b1459915e690215f0722abd93deaacf53276",
"size": "789",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "volunteerapp/migrations/0014_auto_20171123_0725.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "457"
},
{
"name": "HTML",
"bytes": "10556"
},
{
"name": "Python",
"bytes": "29608"
}
],
"symlink_target": ""
}
|
__author__ = 'Bitvis AS'
__copyright__ = "Copyright 2017, Bitvis AS"
__version__ = "1.1.1"
__email__ = "support@bitvis.no"
import os
division_line = "--========================================================================================================================"
class Channel:
def __init__(self, name):
self.name = name
self.queue_names = ["cmd"]
def add_queue(self, name):
self.queue_names.append(name)
def len_of_queue(self):
return len(self.queue_names)
def print_linefeed(file_handle):
file_handle.write("\n")
def fill_with_n_spaces(used_spaces, required_spaces):
retstr = ""
if required_spaces >= used_spaces:
for i in range(required_spaces-used_spaces):
retstr = retstr + " "
return retstr
# Check if name contains illegal VHDL characters
def is_input_vhdl_legal(requested_vvc_name):
illegal_chars = set("<->!¤%&/()=?`\´}][{€$£@ ^¨~'*;:.,|§\" ")
if any((c in illegal_chars) for c in requested_vvc_name):
print("Input contains illegal VHDL characters. Please try again.")
return False
if requested_vvc_name.__len__() < 1:
print("Input too short. Please try again.")
return False
if requested_vvc_name.__len__() > 14:
print("WARNING: Name exceeds default maximum name length, defined in UVVM Utility Library constant C_LOG_SCOPE_WIDTH")
print(" - Please increase C_LOG_SCOPE_WIDTH in the adaptations_pkg.vhd")
if (requested_vvc_name[0] == '_') or (requested_vvc_name[0].isdigit()):
print("Input must start with a letter")
return False
return True
# Ask user if VVC is multi-channel
def is_multi_channel_vvc():
input_accepted = False
choice = ''
while not input_accepted:
choice = input("\rUse multiple, concurrent channels for this VVC? [y/n]: ")
choice = choice.lower()
if choice == 'y':
input_accepted = True
elif choice == 'n':
input_accepted = True
else:
print("Input not accepted. Please use either y or n")
return choice
# Get the number of channels in the VVC, if multi-channel VVC.
def get_number_of_channels():
input_accepted = False
while not input_accepted:
raw_input = input("\rSet the number of concurrent channels to use [2-99]: ")
try:
number_selected = int(raw_input)
except ValueError:
print("Input was not an integer!")
continue
if number_selected < 2:
print("Selected number "+ str(number_selected) + " is too small. Please use a number between 2 and 99")
elif number_selected > 99:
print("Selected number "+ str(number_selected) + " is too large. Please use a number between 2 and 99")
else:
input_accepted = True
return number_selected
# Get the channel name and check if it is valid
def get_channel_name():
requested_vvc_channel_name = input("\rPlease enter a channel name (e.g. tx or rx): ")
if is_input_vhdl_legal(requested_vvc_channel_name.lower()) is False:
return get_channel_name()
return requested_vvc_channel_name
# Ask user if channel is multi-queue
def is_multi_queue_channel():
input_accepted = False
choice = ''
while not input_accepted:
choice = input("\rUse multiple queues for this channel? [y/n]: ")
choice = choice.lower()
if choice == 'y':
input_accepted = True
elif choice == 'n':
input_accepted = True
else:
print("Input not accepted. Please use either y or n")
return choice
# Get the number of queues in the channel, if multi-queue channel.
def get_number_of_queues():
input_accepted = False
while not input_accepted:
raw_input = input("\rSet the number of concurrent queues to use [2-99], included cmd queue: ")
try:
number_selected = int(raw_input)
except ValueError:
print("Input was not an integer!")
continue
if number_selected < 2:
print("Selected number "+ str(number_selected) + " is too small. Please use a number between 2 and 99")
elif number_selected > 99:
print("Selected number "+ str(number_selected) + " is too large. Please use a number between 2 and 99")
else:
input_accepted = True
return number_selected
# Get the channel name and check if it is valid
def get_queue_name():
requested_queue_name = input("\rPlease enter a queue name (e.g. read): ")
if is_input_vhdl_legal(requested_queue_name.lower()) is False:
return get_queue_name()
return requested_queue_name
# Get the VVC name and check if it is valid
def get_vvc_name():
requested_vvc_name = input("\rPlease enter the VVC Name (e.g. SBI, UART, axilite): ")
if is_input_vhdl_legal(requested_vvc_name.lower()) is False:
return get_vvc_name()
return requested_vvc_name
# Adds header to the output file
def add_vvc_header(file_handle):
file_handle.write(division_line+"\n")
file_handle.write("-- This VVC was generated with Bitvis VVC Generator\n")
file_handle.write(division_line+"\n")
print_linefeed(file_handle)
print_linefeed(file_handle)
# Adds included libraries to a leaf VVC
def add_leaf_includes(file_handle, vvc_name):
file_handle.write("library ieee;\n")
file_handle.write("use ieee.std_logic_1164.all;\n")
file_handle.write("use ieee.numeric_std.all;\n")
print_linefeed(file_handle)
file_handle.write("library uvvm_util;\n")
file_handle.write("context uvvm_util.uvvm_util_context;\n")
print_linefeed(file_handle)
file_handle.write("library uvvm_vvc_framework;\n")
file_handle.write("use uvvm_vvc_framework.ti_vvc_framework_support_pkg.all;\n")
print_linefeed(file_handle)
file_handle.write("use work."+vvc_name.lower()+"_bfm_pkg.all;\n")
file_handle.write("use work.vvc_methods_pkg.all;\n")
file_handle.write("use work.vvc_cmd_pkg.all;\n")
file_handle.write("use work.td_target_support_pkg.all;\n")
file_handle.write("use work.td_vvc_entity_support_pkg.all;\n")
file_handle.write("use work.td_cmd_queue_pkg.all;\n")
file_handle.write("use work.td_result_queue_pkg.all;\n")
print_linefeed(file_handle)
file_handle.write(division_line+"\n")
# Adds included libraries to a wrapper VVC
def add_wrapper_includes(file_handle, vvc_name):
file_handle.write("library ieee;\n")
file_handle.write("use ieee.std_logic_1164.all;\n")
file_handle.write("use ieee.numeric_std.all;\n")
print_linefeed(file_handle)
file_handle.write("library uvvm_util;\n")
file_handle.write("context uvvm_util.uvvm_util_context;\n")
print_linefeed(file_handle)
file_handle.write("use work."+vvc_name.lower()+"_bfm_pkg.all;\n")
print_linefeed(file_handle)
file_handle.write(division_line+"\n")
def add_vvc_entity(file_handle, vvc_name, vvc_channel):
if vvc_channel != "NA":
file_handle.write("entity "+vvc_name.lower()+"_"+vvc_channel.lower()+"_vvc is\n")
else:
file_handle.write("entity "+vvc_name.lower()+"_vvc is\n")
file_handle.write(" generic (\n")
file_handle.write(" --<USER_INPUT> Insert interface specific generic constants here\n")
file_handle.write(" -- Example: \n")
file_handle.write(" -- GC_ADDR_WIDTH : integer range 1 to C_VVC_CMD_ADDR_MAX_LENGTH;\n")
file_handle.write(" -- GC_DATA_WIDTH : integer range 1 to C_VVC_CMD_DATA_MAX_LENGTH;\n")
file_handle.write(" GC_INSTANCE_IDX : natural;\n")
if vvc_channel != "NA":
file_handle.write(" GC_CHANNEL : t_channel;\n")
file_handle.write(" GC_"+vvc_name.upper()+"_BFM_CONFIG"+fill_with_n_spaces(vvc_name.__len__(),26)+
": t_"+vvc_name.lower()+"_bfm_config"+fill_with_n_spaces(vvc_name.__len__(),13)+
":= C_"+vvc_name.upper()+"_BFM_CONFIG_DEFAULT;\n")
file_handle.write(" GC_CMD_QUEUE_COUNT_MAX : natural := 1000;\n")
file_handle.write(" GC_CMD_QUEUE_COUNT_THRESHOLD : natural := 950;\n")
file_handle.write(" GC_CMD_QUEUE_COUNT_THRESHOLD_SEVERITY : t_alert_level := WARNING;\n")
file_handle.write(" GC_RESULT_QUEUE_COUNT_MAX : natural := 1000;\n")
file_handle.write(" GC_RESULT_QUEUE_COUNT_THRESHOLD : natural := 950;\n")
file_handle.write(" GC_RESULT_QUEUE_COUNT_THRESHOLD_SEVERITY : t_alert_level := WARNING\n")
file_handle.write(" );\n")
file_handle.write(" port (\n")
file_handle.write(" --<USER_INPUT> Insert BFM interface signals here\n")
file_handle.write(" -- Example: \n")
if(vvc_channel == "NA"):
file_handle.write(" -- "+vvc_name.lower()+"_vvc_if"+fill_with_n_spaces(vvc_name.__len__(),21)+
": inout t_"+vvc_name.lower()+"_if := init_"+vvc_name.lower()+
"_if_signals(GC_ADDR_WIDTH, GC_DATA_WIDTH); \n")
else:
file_handle.write(" -- "+vvc_name.lower()+"_"+vvc_channel.lower()+"_vvc_if" +
fill_with_n_spaces(vvc_name.__len__()+vvc_channel.__len__(), 20) +
": inout t_"+vvc_name.lower()+"_"+vvc_channel.lower()+"_if := init_"+vvc_name.lower()+
"_"+vvc_channel.lower()+"_if_signals(GC_ADDR_WIDTH, GC_DATA_WIDTH); \n")
file_handle.write(" -- VVC control signals: \n")
file_handle.write(" -- rst : in std_logic; -- Optional VVC Reset\n")
file_handle.write(" clk : in std_logic\n")
file_handle.write(" );\n")
if vvc_channel != "NA":
file_handle.write("end entity "+vvc_name.lower()+"_"+vvc_channel.lower()+"_vvc;\n")
else:
file_handle.write("end entity "+vvc_name.lower()+"_vvc;\n")
print_linefeed(file_handle)
file_handle.write(division_line+"\n")
file_handle.write(division_line+"\n")
def add_architecture_declaration(file_handle, vvc_name, vvc_channel):
len_of_queue = vvc_channel.len_of_queue()
if vvc_channel.name != "NA":
file_handle.write("architecture behave of "+vvc_name.lower()+"_"+vvc_channel.name.lower()+"_vvc is\n")
else:
file_handle.write("architecture behave of "+vvc_name.lower()+"_vvc is\n")
print_linefeed(file_handle)
file_handle.write(" constant C_SCOPE : string := C_VVC_NAME & \",\" & to_string(GC_INSTANCE_IDX);\n")
file_handle.write(" constant C_VVC_LABELS : t_vvc_labels := assign_vvc_labels(C_SCOPE, C_VVC_NAME,")
if vvc_channel.name == "NA":
file_handle.write(" GC_INSTANCE_IDX, NA);\n")
else:
file_handle.write(" GC_INSTANCE_IDX, GC_CHANNEL);\n")
print_linefeed(file_handle)
file_handle.write(" signal executor_is_busy : boolean := false;\n")
file_handle.write(" signal queue_is_increasing : boolean := false;\n")
file_handle.write(" signal last_cmd_idx_executed : natural := 0;\n")
if len_of_queue > 1:
for i in range(1, len_of_queue):
file_handle.write(" signal "+vvc_channel.queue_names[i]+"_is_busy : boolean := false;\n")
file_handle.write(" signal "+vvc_channel.queue_names[i]+"_queue_is_increasing : boolean := false;\n")
file_handle.write(" signal last_"+vvc_channel.queue_names[i]+"_idx_executed : natural := 0;\n")
file_handle.write(" signal terminate_current_cmd : t_flag_record;\n")
print_linefeed(file_handle)
file_handle.write(" -- Instantiation of the element dedicated Queue\n")
file_handle.write(" shared variable command_queue : work.td_cmd_queue_pkg.t_generic_queue;\n")
if len_of_queue > 1:
for i in range(1, len_of_queue):
file_handle.write(" shared variable "+vvc_channel.queue_names[i]+"_queue : work.td_cmd_queue_pkg.t_generic_queue;\n")
file_handle.write(" shared variable result_queue : work.td_result_queue_pkg.t_generic_queue;\n")
print_linefeed(file_handle)
if vvc_channel.name == "NA":
file_handle.write(" alias vvc_config : t_vvc_config is shared_"+vvc_name.lower()+"_vvc_config(GC_INSTANCE_IDX);\n")
file_handle.write(" alias vvc_status : t_vvc_status is shared_"+vvc_name.lower()+"_vvc_status(GC_INSTANCE_IDX);\n")
file_handle.write(" alias transaction_info : t_transaction_info is "
+"shared_"+vvc_name.lower()+"_transaction_info(GC_INSTANCE_IDX);\n")
else:
file_handle.write(" alias vvc_config : t_vvc_config is shared_"+vvc_name.lower()+"_vvc_config(GC_CHANNEL, GC_INSTANCE_IDX);\n")
file_handle.write(" alias vvc_status : t_vvc_status is shared_"+vvc_name.lower()+"_vvc_status(GC_CHANNEL, GC_INSTANCE_IDX);\n")
file_handle.write(" alias transaction_info : t_transaction_info is "
+"shared_"+vvc_name.lower()+"_transaction_info(GC_CHANNEL, GC_INSTANCE_IDX);\n")
print_linefeed(file_handle)
file_handle.write("begin\n")
print_linefeed(file_handle)
print_linefeed(file_handle)
def add_wrapper_architecture_declaration(file_handle, vvc_name):
file_handle.write("architecture struct of "+vvc_name.lower()+"_vvc is\n")
print_linefeed(file_handle)
file_handle.write("begin\n")
print_linefeed(file_handle)
def add_wrapper_architecture_end(file_handle):
print_linefeed(file_handle)
file_handle.write("end struct;\n")
print_linefeed(file_handle)
def add_vvc_constructor(file_handle, vvc_name):
file_handle.write(division_line+"\n")
file_handle.write("-- Constructor\n")
file_handle.write("-- - Set up the defaults and show constructor if enabled\n")
file_handle.write(division_line+"\n")
file_handle.write(" work.td_vvc_entity_support_pkg.vvc_constructor(C_SCOPE, GC_INSTANCE_IDX, vvc_config, command_queue, result_queue, "+
"GC_"+vvc_name.upper()+"_BFM_CONFIG,\n")
file_handle.write(" GC_CMD_QUEUE_COUNT_MAX, GC_CMD_QUEUE_COUNT_THRESHOLD, "+
"GC_CMD_QUEUE_COUNT_THRESHOLD_SEVERITY,\n")
file_handle.write(" GC_RESULT_QUEUE_COUNT_MAX, GC_RESULT_QUEUE_COUNT_THRESHOLD, "+
"GC_RESULT_QUEUE_COUNT_THRESHOLD_SEVERITY);\n")
file_handle.write(division_line+"\n")
print_linefeed(file_handle)
print_linefeed(file_handle)
def add_vvc_interpreter(file_handle, vvc_channel):
len_of_queue = vvc_channel.len_of_queue()
file_handle.write(division_line+"\n")
file_handle.write("-- Command interpreter\n")
file_handle.write("-- - Interpret, decode and acknowledge commands from the central sequencer\n")
file_handle.write(division_line+"\n")
file_handle.write(" cmd_interpreter : process\n")
file_handle.write(" variable v_cmd_has_been_acked : boolean; -- Indicates if acknowledge_cmd() has been called for the current shared_vvc_cmd\n")
file_handle.write(" variable v_local_vvc_cmd : t_vvc_cmd_record := C_VVC_CMD_DEFAULT;\n")
file_handle.write(" begin\n")
print_linefeed(file_handle)
file_handle.write(" -- 0. Initialize the process prior to first command\n")
file_handle.write(" work.td_vvc_entity_support_pkg.initialize_interpreter(terminate_current_cmd, global_awaiting_completion);\n")
file_handle.write(" -- initialise shared_vvc_last_received_cmd_idx for channel and instance\n")
if vvc_channel.name == "NA":
file_handle.write(" shared_vvc_last_received_cmd_idx(NA, GC_INSTANCE_IDX) := 0;\n")
else:
file_handle.write(" shared_vvc_last_received_cmd_idx(GC_CHANNEL, GC_INSTANCE_IDX) := 0;\n")
print_linefeed(file_handle)
file_handle.write(" -- Then for every single command from the sequencer\n")
file_handle.write(" loop -- basically as long as new commands are received\n")
print_linefeed(file_handle)
file_handle.write(" -- 1. wait until command targeted at this VVC. Must match VVC name, instance and channel"+
" (if applicable)\n")
file_handle.write(" -- releases global semaphore\n")
file_handle.write(" -------------------------------------------------------------------------\n")
file_handle.write(" work.td_vvc_entity_support_pkg.await_cmd_from_sequencer(C_VVC_LABELS, vvc_config, THIS_VVCT, "+
"VVC_BROADCAST, global_vvc_busy, global_vvc_ack, shared_vvc_cmd, v_local_vvc_cmd);\n")
file_handle.write(" v_cmd_has_been_acked := false; -- Clear flag\n")
file_handle.write(" -- update shared_vvc_last_received_cmd_idx with received command index\n")
if vvc_channel.name == "NA":
file_handle.write(" shared_vvc_last_received_cmd_idx(NA, GC_INSTANCE_IDX) := v_local_vvc_cmd.cmd_idx;\n")
else:
file_handle.write(" shared_vvc_last_received_cmd_idx(GC_CHANNEL, GC_INSTANCE_IDX) := v_local_vvc_cmd.cmd_idx;\n")
print_linefeed(file_handle)
file_handle.write(" -- 2a. Put command on the queue if intended for the executor\n")
file_handle.write(" -------------------------------------------------------------------------\n")
file_handle.write(" if v_local_vvc_cmd.command_type = QUEUED then\n")
file_handle.write(" work.td_vvc_entity_support_pkg.put_command_on_queue(v_local_vvc_cmd, command_queue, vvc_status, "+
"queue_is_increasing);\n")
print_linefeed(file_handle)
file_handle.write(" -- 2b. Otherwise command is intended for immediate response\n")
file_handle.write(" -------------------------------------------------------------------------\n")
file_handle.write(" elsif v_local_vvc_cmd.command_type = IMMEDIATE then\n")
file_handle.write(" case v_local_vvc_cmd.operation is\n")
print_linefeed(file_handle)
file_handle.write(" when AWAIT_COMPLETION =>\n")
file_handle.write(" -- Await completion of all commands in the cmd_executor queue\n")
file_handle.write(" work.td_vvc_entity_support_pkg.interpreter_await_completion(v_local_vvc_cmd, command_queue, "
"vvc_config, executor_is_busy, C_VVC_LABELS, last_cmd_idx_executed);\n")
if len_of_queue > 1:
for i in range(1, len_of_queue):
queue_name = vvc_channel.queue_names[i]
file_handle.write(" -- Await completion of all commands in the "+queue_name+" queue\n")
file_handle.write(" work.td_vvc_entity_support_pkg.interpreter_await_completion(v_local_vvc_cmd, "+queue_name+"_queue, "
"vvc_config, "+queue_name+"_is_busy, C_VVC_LABELS, last_"+queue_name+"_idx_executed);\n")
print_linefeed(file_handle)
file_handle.write(" when AWAIT_ANY_COMPLETION =>\n")
file_handle.write(" if not v_local_vvc_cmd.gen_boolean then\n")
file_handle.write(" -- Called with lastness = NOT_LAST: Acknowledge immediately to let the sequencer continue\n")
file_handle.write(" work.td_target_support_pkg.acknowledge_cmd(global_vvc_ack,v_local_vvc_cmd.cmd_idx);\n")
file_handle.write(" v_cmd_has_been_acked := true;\n")
file_handle.write(" end if;\n")
file_handle.write(" work.td_vvc_entity_support_pkg.interpreter_await_any_completion(v_local_vvc_cmd, command_queue, vvc_config, "+
"executor_is_busy, C_VVC_LABELS, last_cmd_idx_executed, global_awaiting_completion);\n")
print_linefeed(file_handle)
file_handle.write(" when DISABLE_LOG_MSG =>\n")
file_handle.write(" uvvm_util.methods_pkg.disable_log_msg(v_local_vvc_cmd.msg_id, vvc_config.msg_id_panel"
+", to_string(v_local_vvc_cmd.msg) & format_command_idx(v_local_vvc_cmd), C_SCOPE, v_local_vvc_cmd.quietness);\n")
print_linefeed(file_handle)
file_handle.write(" when ENABLE_LOG_MSG =>\n")
file_handle.write(" uvvm_util.methods_pkg.enable_log_msg(v_local_vvc_cmd.msg_id, vvc_config.msg_id_panel"+
", to_string(v_local_vvc_cmd.msg) & format_command_idx(v_local_vvc_cmd), C_SCOPE, v_local_vvc_cmd.quietness);\n")
print_linefeed(file_handle)
file_handle.write(" when FLUSH_COMMAND_QUEUE =>\n")
file_handle.write(" work.td_vvc_entity_support_pkg.interpreter_flush_command_queue(v_local_vvc_cmd, command_queue"+
", vvc_config, vvc_status, C_VVC_LABELS);\n")
print_linefeed(file_handle)
file_handle.write(" when TERMINATE_CURRENT_COMMAND =>\n")
file_handle.write(" work.td_vvc_entity_support_pkg.interpreter_terminate_current_command(v_local_vvc_cmd, "+
"vvc_config, C_VVC_LABELS, terminate_current_cmd);\n")
print_linefeed(file_handle)
file_handle.write(" -- when FETCH_RESULT =>\n")
file_handle.write(" -- work.td_vvc_entity_support_pkg.interpreter_fetch_result(result_queue, v_local_vvc_cmd, "+
"vvc_config, C_VVC_LABELS, last_cmd_idx_executed, shared_vvc_response);\n")
print_linefeed(file_handle)
file_handle.write(" when others =>\n")
file_handle.write(" tb_error(\"Unsupported command received for IMMEDIATE execution: '\" & "+
"to_string(v_local_vvc_cmd.operation) & \"'\", C_SCOPE);\n")
print_linefeed(file_handle)
file_handle.write(" end case;\n")
print_linefeed(file_handle)
file_handle.write(" else\n")
file_handle.write(" tb_error(\"command_type is not IMMEDIATE or QUEUED\", C_SCOPE);\n")
file_handle.write(" end if;\n")
print_linefeed(file_handle)
file_handle.write(" -- 3. Acknowledge command after runing or queuing the command\n")
file_handle.write(" -------------------------------------------------------------------------\n")
file_handle.write(" if not v_cmd_has_been_acked then\n")
file_handle.write(" work.td_target_support_pkg.acknowledge_cmd(global_vvc_ack,v_local_vvc_cmd.cmd_idx);\n")
file_handle.write(" end if;\n")
print_linefeed(file_handle)
file_handle.write(" end loop;\n")
file_handle.write(" end process;\n")
file_handle.write(division_line+"\n")
print_linefeed(file_handle)
print_linefeed(file_handle)
print_linefeed(file_handle)
def add_vvc_executor(file_handle, vvc_channel):
len_of_queue = vvc_channel.len_of_queue()
file_handle.write(division_line+"\n")
file_handle.write("-- Command executor\n")
file_handle.write("-- - Fetch and execute the commands\n")
file_handle.write(division_line+"\n")
file_handle.write(" cmd_executor : process\n")
file_handle.write(" variable v_cmd : t_vvc_cmd_record;\n")
file_handle.write(" -- variable v_read_data : t_vvc_result; -- See vvc_cmd_pkg\n")
file_handle.write(" variable v_timestamp_start_of_current_bfm_access : time := 0 ns;\n")
file_handle.write(" variable v_timestamp_start_of_last_bfm_access : time := 0 ns;\n")
file_handle.write(" variable v_timestamp_end_of_last_bfm_access : time := 0 ns;\n")
file_handle.write(" variable v_command_is_bfm_access : boolean;\n")
file_handle.write(" -- variable v_normalised_addr : unsigned(GC_ADDR_WIDTH-1 downto 0) := (others => '0');\n")
file_handle.write(" -- variable v_normalised_data : std_logic_vector(GC_DATA_WIDTH-1 downto 0) := (others => '0');\n")
file_handle.write(" begin\n")
print_linefeed(file_handle)
file_handle.write(" -- 0. Initialize the process prior to first command\n")
file_handle.write(" -------------------------------------------------------------------------\n")
file_handle.write(" work.td_vvc_entity_support_pkg.initialize_executor(terminate_current_cmd);\n")
file_handle.write(" loop\n")
print_linefeed(file_handle)
file_handle.write(" -- 1. Set defaults, fetch command and log\n")
file_handle.write(" -------------------------------------------------------------------------\n")
file_handle.write(" work.td_vvc_entity_support_pkg.fetch_command_and_prepare_executor(v_cmd, command_queue, vvc_config"+
", vvc_status, queue_is_increasing, executor_is_busy, C_VVC_LABELS);\n")
print_linefeed(file_handle)
file_handle.write(" -- Reset the transaction info for waveview\n")
file_handle.write(" transaction_info := C_TRANSACTION_INFO_DEFAULT;\n")
file_handle.write(" transaction_info.operation := v_cmd.operation;\n")
file_handle.write(" transaction_info.msg := pad_string(to_string(v_cmd.msg)"
", ' ', transaction_info.msg'length);\n")
print_linefeed(file_handle)
file_handle.write(" -- Check if command is a BFM access\n")
file_handle.write(" --<USER_INPUT> Replace this if statement with a check of the current v_cmd.operation, in "
"order to set v_cmd_is_bfm_access to true if this is a BFM access command\n")
file_handle.write(" -- Example:\n")
file_handle.write(" -- if v_cmd.operation = WRITE or v_cmd.operation = READ or v_cmd.operation = CHECK or v_cmd.operation = POLL_UNTIL then \n")
file_handle.write(" if true then -- Replace this line with actual check\n")
file_handle.write(" v_command_is_bfm_access := true;\n")
file_handle.write(" else\n")
file_handle.write(" v_command_is_bfm_access := false;\n")
file_handle.write(" end if;\n")
print_linefeed(file_handle)
file_handle.write(" -- Insert delay if needed\n")
file_handle.write(" work.td_vvc_entity_support_pkg.insert_inter_bfm_delay_if_requested(vvc_config"
" => vvc_config,\n")
file_handle.write(" "
"command_is_bfm_access => v_command_is_bfm_access,\n")
file_handle.write(" "
"timestamp_start_of_last_bfm_access => v_timestamp_start_of_last_bfm_access,\n")
file_handle.write(" "
"timestamp_end_of_last_bfm_access => v_timestamp_end_of_last_bfm_access,\n")
file_handle.write(" "
"scope => C_SCOPE);\n")
print_linefeed(file_handle)
file_handle.write(" if v_command_is_bfm_access then\n")
file_handle.write(" v_timestamp_start_of_current_bfm_access := now;\n")
file_handle.write(" end if;\n")
print_linefeed(file_handle)
file_handle.write(" -- 2. Execute the fetched command\n")
file_handle.write(" -------------------------------------------------------------------------\n")
file_handle.write(" case v_cmd.operation is -- Only operations in the dedicated record are relevant\n")
print_linefeed(file_handle)
file_handle.write(" -- VVC dedicated operations\n")
file_handle.write(" --===================================\n")
print_linefeed(file_handle)
file_handle.write(" --<USER_INPUT>: Insert BFM procedure calls here\n")
file_handle.write(" -- Example:\n")
file_handle.write(" -- when WRITE =>\n")
file_handle.write(" -- v_normalised_addr := normalize_and_check(v_cmd.addr, v_normalised_addr, ALLOW_WIDER_NARROWER, \"addr\", \"shared_vvc_cmd.addr\", \""+vvc_name.lower()+"_write() called with to wide address. \" & v_cmd.msg);\n")
file_handle.write(" -- v_normalised_data := normalize_and_check(v_cmd.data, v_normalised_data, ALLOW_WIDER_NARROWER, \"data\", \"shared_vvc_cmd.data\", \""+vvc_name.lower()+"_write() called with to wide data. \" & v_cmd.msg);\n")
file_handle.write(" -- -- Add info to the transaction_for_waveview_struct if needed\n")
file_handle.write(" -- transaction_info.data(GC_DATA_WIDTH - 1 downto 0) := "
"v_normalised_data;\n")
file_handle.write(" -- transaction_info.addr(GC_ADDR_WIDTH - 1 downto 0) := "
"v_normalised_addr;\n")
file_handle.write(" -- -- Call the corresponding procedure in the BFM package.\n")
file_handle.write(" -- "+vvc_name.lower()+"_write(addr_value => v_normalised_addr,\n")
file_handle.write(" -- data_value => v_normalised_data,\n")
file_handle.write(" -- msg => format_msg(v_cmd),\n")
file_handle.write(" -- clk => clk,\n")
file_handle.write(" -- "+vvc_name.lower()+"_if => "+vvc_name.lower()+"_vvc_if,\n")
file_handle.write(" -- scope => C_SCOPE,\n")
file_handle.write(" -- msg_id_panel => vvc_config.msg_id_panel,\n")
file_handle.write(" -- config => vvc_config.bfm_config);\n")
print_linefeed(file_handle)
if len_of_queue > 1:
file_handle.write(" -- -- Eksample of pipelined read, eg. Avalon interface.\n")
else:
file_handle.write(" -- -- If the result from the BFM call is to be stored, e.g. in a read call, "
"use the additional procedure illustrated in this read example\n")
file_handle.write(" -- when READ =>\n")
file_handle.write(" -- v_normalised_addr := normalize_and_check(v_cmd.addr, v_normalised_addr, ALLOW_WIDER_NARROWER, \"addr\", \"shared_vvc_cmd.addr\", \""+vvc_name.lower()+"_write() called with to wide address. \" & v_cmd.msg);\n")
file_handle.write(" -- -- Add info to the transaction_for_waveview_struct if needed\n")
file_handle.write(" -- transaction_info.addr(GC_ADDR_WIDTH - 1 downto 0) := "
"v_normalised_addr;\n")
file_handle.write(" -- -- Call the corresponding procedure in the BFM package.\n")
if len_of_queue > 1:
file_handle.write(" -- if vvc_config.use_read_pipeline then\n")
file_handle.write(" -- -- Stall until response command queue is no longer full\n")
file_handle.write(" -- while command_response_queue.get_count(VOID) > vvc_config.num_pipeline_stages loop\n")
file_handle.write(" -- wait for vvc_config.bfm_config.clock_period;\n")
file_handle.write(" -- end loop;\n")
file_handle.write(" -- avalon_mm_read_request( addr_value => v_normalised_addr,\n")
file_handle.write(" -- msg => format_msg(v_cmd),\n")
file_handle.write(" -- clk => clk,\n")
file_handle.write(" -- avalon_mm_if => avalon_mm_vvc_master_if,\n")
file_handle.write(" -- scope => C_SCOPE,\n")
file_handle.write(" -- msg_id_panel => vvc_config.msg_id_panel,\n")
file_handle.write(" -- config => vvc_config.bfm_config);\n")
file_handle.write(" -- work.td_vvc_entity_support_pkg.put_command_on_queue(v_cmd, command_response_queue, vvc_status, response_queue_is_increasing);\n")
print_linefeed(file_handle)
file_handle.write(" -- else\n")
file_handle.write(" -- avalon_mm_read( addr_value => v_normalised_addr,\n")
file_handle.write(" -- data_value => v_read_data(GC_DATA_WIDTH-1 downto 0),\n")
file_handle.write(" -- msg => format_msg(v_cmd),\n")
file_handle.write(" -- clk => clk,\n")
file_handle.write(" -- avalon_mm_if => avalon_mm_vvc_master_if,\n")
file_handle.write(" -- scope => C_SCOPE,\n")
file_handle.write(" -- msg_id_panel => vvc_config.msg_id_panel,\n")
file_handle.write(" -- config => vvc_config.bfm_config);\n")
file_handle.write(" -- -- Store the result\n")
file_handle.write(" -- work.td_vvc_entity_support_pkg.store_result(result_queue => result_queue,\n")
file_handle.write(" -- cmd_idx => v_cmd.cmd_idx,\n")
file_handle.write(" -- result => v_read_data );\n")
file_handle.write(" -- end if;\n")
else:
file_handle.write(" -- "+vvc_name.lower()+"_read(addr_value => v_normalised_addr,\n")
file_handle.write(" -- data_value => v_read_data,\n")
file_handle.write(" -- msg => format_msg(v_cmd),\n")
file_handle.write(" -- clk => clk,\n")
file_handle.write(" -- "+vvc_name.lower()+"_if => "+vvc_name.lower()+"_vvc_if,\n")
file_handle.write(" -- scope => C_SCOPE,\n")
file_handle.write(" -- msg_id_panel => vvc_config.msg_id_panel,\n")
file_handle.write(" -- config => vvc_config.bfm_config);\n")
file_handle.write(" -- -- Store the result\n")
file_handle.write(" -- work.td_vvc_entity_support_pkg.store_result(instance_idx => GC_INSTANCE_IDX,\n")
file_handle.write(" -- cmd_idx => v_cmd.cmd_idx,\n")
file_handle.write(" -- data => v_read_data);\n")
print_linefeed(file_handle)
print_linefeed(file_handle)
file_handle.write(" -- UVVM common operations\n")
file_handle.write(" --===================================\n")
file_handle.write(" when INSERT_DELAY =>\n")
file_handle.write(" log(ID_INSERTED_DELAY, \"Running: \" & to_string(v_cmd.proc_call) & \" \" & "
"format_command_idx(v_cmd), C_SCOPE, vvc_config.msg_id_panel);\n")
file_handle.write(" if v_cmd.gen_integer_array(0) = -1 then\n")
file_handle.write(" -- Delay specified using time\n")
file_handle.write(" wait until terminate_current_cmd.is_active = '1' for v_cmd.delay;\n")
file_handle.write(" else\n")
file_handle.write(" -- Delay specified using integer\n")
file_handle.write(" wait until terminate_current_cmd.is_active = '1' for v_cmd.gen_integer_array(0) * vvc_config.bfm_config.clock_period;\n")
file_handle.write(" end if;\n")
print_linefeed(file_handle)
file_handle.write(" when others =>\n")
file_handle.write(" tb_error(\"Unsupported local command received for execution: '\" & "
"to_string(v_cmd.operation) & \"'\", C_SCOPE);\n")
file_handle.write(" end case;\n")
print_linefeed(file_handle)
file_handle.write(" if v_command_is_bfm_access then\n")
file_handle.write(" v_timestamp_end_of_last_bfm_access := now;\n")
file_handle.write(" v_timestamp_start_of_last_bfm_access := v_timestamp_start_of_current_bfm_access;\n")
file_handle.write(" if ((vvc_config.inter_bfm_delay.delay_type = TIME_START2START) and \n")
file_handle.write(" ((now - v_timestamp_start_of_current_bfm_access) > vvc_config.inter_bfm_delay.delay_in_time)) then\n")
file_handle.write(" alert(vvc_config.inter_bfm_delay.inter_bfm_delay_violation_severity, \"BFM access exceeded specified "
"start-to-start inter-bfm delay, \" & \n")
file_handle.write(" to_string(vvc_config.inter_bfm_delay.delay_in_time) & \".\", C_SCOPE);\n")
file_handle.write(" end if;\n")
file_handle.write(" end if;\n")
print_linefeed(file_handle)
file_handle.write(" -- Reset terminate flag if any occurred\n")
file_handle.write(" if (terminate_current_cmd.is_active = '1') then\n")
file_handle.write(" log(ID_CMD_EXECUTOR, \"Termination request received\", C_SCOPE, "
"vvc_config.msg_id_panel);\n")
file_handle.write(" uvvm_vvc_framework.ti_vvc_framework_support_pkg.reset_flag(terminate_current_cmd);\n")
file_handle.write(" end if;\n")
print_linefeed(file_handle)
file_handle.write(" last_cmd_idx_executed <= v_cmd.cmd_idx;\n")
file_handle.write(" -- Reset the transaction info for waveview\n")
file_handle.write(" transaction_info := C_TRANSACTION_INFO_DEFAULT;\n")
print_linefeed(file_handle)
file_handle.write(" end loop;\n")
file_handle.write(" end process;\n")
file_handle.write(division_line+"\n")
print_linefeed(file_handle)
print_linefeed(file_handle)
print_linefeed(file_handle)
def add_vvc_pipeline_step(file_handle, queue_name):
file_handle.write(division_line+"\n")
file_handle.write("-- Pipelined step\n")
file_handle.write("-- - Fetch and execute the commands in the "+queue_name+" queue\n")
file_handle.write(division_line+"\n")
file_handle.write(" "+queue_name+"_executor : process\n")
file_handle.write(" variable v_cmd : t_vvc_cmd_record;\n")
file_handle.write(" -- variable v_read_data : t_vvc_result; -- See vvc_cmd_pkg\n")
file_handle.write(" -- variable v_normalised_addr : unsigned(GC_ADDR_WIDTH-1 downto 0) := (others => '0');\n")
file_handle.write(" -- variable v_normalised_data : std_logic_vector(GC_DATA_WIDTH-1 downto 0) := (others => '0');\n")
file_handle.write(" begin\n")
file_handle.write(" -- Set the "+queue_name+" queue up with the same settings as the command queue\n")
file_handle.write(" "+queue_name+"_queue.set_scope(C_SCOPE & \":"+queue_name.upper()+"\");\n")
file_handle.write(" "+queue_name+"_queue.set_queue_count_max(vvc_config.cmd_queue_count_max);\n")
file_handle.write(" "+queue_name+"_queue.set_queue_count_threshold(vvc_config.cmd_queue_count_threshold);\n")
file_handle.write(" "+queue_name+"_queue.set_queue_count_threshold_severity(vvc_config.cmd_queue_count_threshold_severity);\n")
file_handle.write(" wait for 0 ns; -- Wait for "+queue_name+" queue to initialize completely\n")
print_linefeed(file_handle)
file_handle.write(" loop\n")
file_handle.write(" -- Fetch commands\n")
file_handle.write(" -------------------------------------------------------------------------\n")
file_handle.write(" work.td_vvc_entity_support_pkg.fetch_command_and_prepare_executor(v_cmd, "+queue_name+"_queue, vvc_config"+
", vvc_status, "+queue_name+"_queue_is_increasing, "+queue_name+"_is_busy, C_VVC_LABELS);\n")
print_linefeed(file_handle)
print_linefeed(file_handle)
file_handle.write(" -- Execute the fetched command\n")
file_handle.write(" -------------------------------------------------------------------------\n")
file_handle.write(" case v_cmd.operation is -- Only operations in the dedicated record are relevant\n")
file_handle.write(" --<USER_INPUT>: Insert BFM procedure calls here\n")
file_handle.write(" -- Example of pipelined step used for read operations on the Avalon interface:\n")
file_handle.write(" -- when READ =>\n")
file_handle.write(" -- -- Initiate read response\n")
file_handle.write(" -- avalon_mm_read_response(addr_value => v_normalised_addr,\n")
file_handle.write(" -- data_value => v_read_data(GC_DATA_WIDTH-1 downto 0),\n")
file_handle.write(" -- msg => format_msg(v_cmd),\n")
file_handle.write(" -- clk => clk,\n")
file_handle.write(" -- avalon_mm_if => avalon_mm_vvc_master_if,\n")
file_handle.write(" -- scope => C_SCOPE,\n")
file_handle.write(" -- msg_id_panel => vvc_config.msg_id_panel,\n")
file_handle.write(" -- config => vvc_config.bfm_config);\n")
file_handle.write(" -- -- Store the result\n")
file_handle.write(" -- work.td_vvc_entity_support_pkg.store_result(result_queue => result_queue,\n")
file_handle.write(" -- cmd_idx => v_cmd.cmd_idx,\n")
file_handle.write(" -- data => v_read_data);\n")
print_linefeed(file_handle)
file_handle.write(" -- when CHECK =>\n")
file_handle.write(" -- -- Initiate check response\n")
file_handle.write(" -- avalon_mm_check_response(addr_value => v_normalised_addr,\n")
file_handle.write(" -- data_exp => v_normalised_data,\n")
file_handle.write(" -- msg => format_msg(v_cmd),\n")
file_handle.write(" -- clk => clk,\n")
file_handle.write(" -- avalon_mm_if => avalon_mm_vvc_master_if,\n")
file_handle.write(" -- alert_level => v_cmd.alert_level,\n")
file_handle.write(" -- scope => C_SCOPE,\n")
file_handle.write(" -- msg_id_panel => vvc_config.msg_id_panel,\n")
file_handle.write(" -- config => vvc_config.bfm_config);\n")
print_linefeed(file_handle)
file_handle.write(" when others =>")
file_handle.write(" tb_error(\"Unsupported local command received for execution: '\" & to_string(v_cmd.operation) & \"'\", C_SCOPE);\n")
print_linefeed(file_handle)
file_handle.write(" end case;\n")
print_linefeed(file_handle)
file_handle.write(" last_read_response_idx_executed <= v_cmd.cmd_idx;\n")
print_linefeed(file_handle)
file_handle.write(" end loop;\n")
print_linefeed(file_handle)
file_handle.write(" end process;\n")
print_linefeed(file_handle)
file_handle.write(division_line+"\n")
print_linefeed(file_handle)
print_linefeed(file_handle)
print_linefeed(file_handle)
def add_vvc_terminator(file_handle):
file_handle.write(division_line+"\n")
file_handle.write("-- Command termination handler\n")
file_handle.write("-- - Handles the termination request record (sets and resets terminate flag on request)\n")
file_handle.write(division_line+"\n")
file_handle.write(" cmd_terminator : uvvm_vvc_framework.ti_vvc_framework_support_pkg.flag_handler(terminate_current_cmd);"
" -- flag: is_active, set, reset\n")
file_handle.write(division_line+"\n")
print_linefeed(file_handle)
print_linefeed(file_handle)
def add_end_of_architecture(file_handle):
file_handle.write("end behave;\n")
print_linefeed(file_handle)
print_linefeed(file_handle)
def add_leaf_vvc_entity(file_handle, vvc_name, channel):
print_linefeed(file_handle)
file_handle.write(" -- " + vvc_name.upper() + " " + channel.upper() + " VVC\n")
file_handle.write(" i1_"+vvc_name.lower()+"_"+channel.lower()+": entity work."+vvc_name.lower()+"_"+
channel.lower()+"_vvc\n")
file_handle.write(" generic map(\n")
file_handle.write(" --<USER_INPUT> Insert interface specific generic constants here\n")
file_handle.write(" -- Example: \n")
file_handle.write(" -- GC_DATA_WIDTH => GC_DATA_WIDTH,\n")
file_handle.write(" GC_INSTANCE_IDX => GC_INSTANCE_IDX,\n")
file_handle.write(" GC_CHANNEL => "+channel.upper()+",\n")
file_handle.write(" GC_"+vvc_name.upper()+"_BFM_CONFIG"+fill_with_n_spaces(vvc_name.__len__(),28)+
"=> GC_"+vvc_name.upper()+"_BFM_CONFIG,\n")
file_handle.write(" GC_CMD_QUEUE_COUNT_MAX => GC_CMD_QUEUE_COUNT_MAX,\n")
file_handle.write(" GC_CMD_QUEUE_COUNT_THRESHOLD => GC_CMD_QUEUE_COUNT_THRESHOLD,\n")
file_handle.write(" GC_CMD_QUEUE_COUNT_THRESHOLD_SEVERITY => GC_CMD_QUEUE_COUNT_THRESHOLD_SEVERITY\n")
file_handle.write(" )\n")
file_handle.write(" port map(\n")
file_handle.write(" --<USER_INPUT> Please insert the proper interface needed for this leaf VVC\n")
file_handle.write(" -- Example:\n")
file_handle.write(" -- " + vvc_name.lower() + "_vvc_" + channel.lower() + " => " +
vvc_name.lower()+"_vvc_if." + vvc_name.lower()+"_vvc_"+ channel.lower()+",\n")
file_handle.write(" -- rst => rst, -- Optional VVC Reset\n")
file_handle.write(" clk => clk\n")
file_handle.write(" );\n")
print_linefeed(file_handle)
def add_vvc_cmd_pkg_includes(file_handle):
file_handle.write("library ieee;\n")
file_handle.write("use ieee.std_logic_1164.all;\n")
file_handle.write("use ieee.numeric_std.all;\n")
print_linefeed(file_handle)
file_handle.write("library uvvm_util;\n")
file_handle.write("context uvvm_util.uvvm_util_context;\n")
print_linefeed(file_handle)
file_handle.write("library uvvm_vvc_framework;\n")
file_handle.write("use uvvm_vvc_framework.ti_vvc_framework_support_pkg.all;\n")
print_linefeed(file_handle)
file_handle.write(division_line+"\n")
file_handle.write(division_line+"\n")
def add_vvc_cmd_pkg_header(file_handle):
file_handle.write("package vvc_cmd_pkg is\n")
print_linefeed(file_handle)
print_linefeed(file_handle)
file_handle.write(" "+division_line+"\n")
file_handle.write(" -- t_operation\n")
file_handle.write(" -- - VVC and BFM operations\n")
file_handle.write(" "+division_line+"\n")
file_handle.write(" type t_operation is (\n")
file_handle.write(" NO_OPERATION,\n")
file_handle.write(" AWAIT_COMPLETION,\n")
file_handle.write(" AWAIT_ANY_COMPLETION,\n")
file_handle.write(" ENABLE_LOG_MSG,\n")
file_handle.write(" DISABLE_LOG_MSG,\n")
file_handle.write(" FLUSH_COMMAND_QUEUE,\n")
file_handle.write(" FETCH_RESULT,\n")
file_handle.write(" INSERT_DELAY,\n")
file_handle.write(" TERMINATE_CURRENT_COMMAND\n")
file_handle.write(" --<USER_INPUT> Expand this type with enums for BFM procedures.\n")
file_handle.write(" -- Example: \n")
file_handle.write(" -- TRANSMIT, RECEIVE, EXPECT\n")
file_handle.write(" );\n")
print_linefeed(file_handle)
file_handle.write(" --<USER_INPUT> Create constants for the maximum sizes to use in this VVC.\n")
file_handle.write(" -- You can create VVCs with smaller sizes than these constants, but not larger.\n")
file_handle.write(" -- For example, given a VVC with parallel data bus and address bus, constraints should be "
"added for maximum data length\n")
file_handle.write(" -- and address length \n")
file_handle.write(" -- Example:\n")
file_handle.write(" constant C_VVC_CMD_DATA_MAX_LENGTH : natural := 8;\n")
file_handle.write(" -- constant C_VVC_CMD_ADDR_MAX_LENGTH : natural := 8;\n")
file_handle.write(" constant C_VVC_CMD_STRING_MAX_LENGTH : natural := 300;\n")
print_linefeed(file_handle)
file_handle.write(" "+division_line+"\n")
file_handle.write(" -- t_vvc_cmd_record\n")
file_handle.write(" -- - Record type used for communication with the VVC\n")
file_handle.write(" "+division_line+"\n")
file_handle.write(" type t_vvc_cmd_record is record\n")
file_handle.write(" -- VVC dedicated fields\n")
file_handle.write(" --<USER_INPUT> Insert all data types needed to transport data to the BFM here.\n")
file_handle.write(" -- This includes data field, address field, constraints (e.g. timeout), etc.\n")
file_handle.write(" -- Example: \n")
file_handle.write(" -- data : std_logic_vector(C_VVC_CMD_DATA_MAX_LENGTH-1 downto 0);\n")
file_handle.write(" -- max_receptions : integer;\n")
file_handle.write(" -- timeout : time;\n")
file_handle.write(" -- Common VVC fields\n")
file_handle.write(" operation : t_operation;\n")
file_handle.write(" proc_call : string(1 to C_VVC_CMD_STRING_MAX_LENGTH);\n")
file_handle.write(" msg : string(1 to C_VVC_CMD_STRING_MAX_LENGTH);\n")
file_handle.write(" cmd_idx : natural;\n")
file_handle.write(" command_type : t_immediate_or_queued;\n")
file_handle.write(" msg_id : t_msg_id;\n")
file_handle.write(" gen_integer_array : t_integer_array(0 to 1); -- Increase array length if needed\n")
file_handle.write(" gen_boolean : boolean; -- Generic boolean\n")
file_handle.write(" timeout : time;\n")
file_handle.write(" alert_level : t_alert_level;\n")
file_handle.write(" delay : time;\n")
file_handle.write(" quietness : t_quietness;\n")
file_handle.write(" end record;\n")
print_linefeed(file_handle)
file_handle.write(" constant C_VVC_CMD_DEFAULT : t_vvc_cmd_record := (\n")
file_handle.write(" --<USER_INPUT> Set the fields you added to the t_vvc_cmd_record above to their default "
"value here\n")
file_handle.write(" -- Example:\n")
file_handle.write(" -- data => (others => '0'),\n")
file_handle.write(" -- max_receptions => 1,\n")
file_handle.write(" -- timeout => 0 ns,\n")
file_handle.write(" -- Common VVC fields\n")
file_handle.write(" operation => NO_OPERATION,\n")
file_handle.write(" proc_call => (others => NUL),\n")
file_handle.write(" msg => (others => NUL),\n")
file_handle.write(" cmd_idx => 0,\n")
file_handle.write(" command_type => NO_COMMAND_TYPE,\n")
file_handle.write(" msg_id => NO_ID,\n")
file_handle.write(" gen_integer_array => (others => -1),\n")
file_handle.write(" gen_boolean => false,\n")
file_handle.write(" timeout => 0 ns,\n")
file_handle.write(" alert_level => FAILURE,\n")
file_handle.write(" delay => 0 ns,\n")
file_handle.write(" quietness => NON_QUIET\n")
file_handle.write(" );\n")
print_linefeed(file_handle)
file_handle.write(" "+division_line+"\n")
file_handle.write(" -- shared_vvc_cmd\n")
file_handle.write(" -- - Shared variable used for transmitting VVC commands\n")
file_handle.write(" "+division_line+"\n")
file_handle.write(" shared variable shared_vvc_cmd : t_vvc_cmd_record := C_VVC_CMD_DEFAULT;\n")
print_linefeed(file_handle)
file_handle.write(" "+division_line+"\n")
file_handle.write(" -- t_vvc_result, t_vvc_result_queue_element, t_vvc_response and shared_vvc_response :\n")
file_handle.write(" -- \n")
file_handle.write(" -- - Used for storing the result of a BFM procedure called by the VVC,\n")
file_handle.write(" -- so that the result can be transported from the VVC to for example a sequencer via\n")
file_handle.write(" -- fetch_result() as described in VVC_Framework_common_methods_QuickRef\n")
file_handle.write(" -- \n")
file_handle.write(" -- - t_vvc_result includes the return value of the procedure in the BFM.\n")
file_handle.write(" -- It can also be defined as a record if multiple values shall be transported from the BFM\n")
file_handle.write(" "+division_line+"\n")
file_handle.write(" subtype t_vvc_result is std_logic_vector(C_VVC_CMD_DATA_MAX_LENGTH-1 downto 0);\n")
print_linefeed(file_handle)
file_handle.write(" type t_vvc_result_queue_element is record\n")
file_handle.write(" cmd_idx : natural; -- from UVVM handshake mechanism\n")
file_handle.write(" result : t_vvc_result;\n")
file_handle.write(" end record;\n")
print_linefeed(file_handle)
file_handle.write(" type t_vvc_response is record\n")
file_handle.write(" fetch_is_accepted : boolean;\n")
file_handle.write(" transaction_result : t_transaction_result;\n")
file_handle.write(" result : t_vvc_result;\n")
file_handle.write(" end record;\n")
print_linefeed(file_handle)
file_handle.write(" shared variable shared_vvc_response : t_vvc_response;\n")
print_linefeed(file_handle)
file_handle.write(" "+division_line+"\n")
file_handle.write(" -- t_last_received_cmd_idx : \n")
file_handle.write(" -- - Used to store the last queued cmd in vvc interpreter.\n")
file_handle.write(" "+division_line+"\n")
file_handle.write(" type t_last_received_cmd_idx is array (t_channel range <>,natural range <>) of integer;\n")
print_linefeed(file_handle)
file_handle.write(" "+division_line+"\n")
file_handle.write(" -- shared_vvc_last_received_cmd_idx\n")
file_handle.write(" -- - Shared variable used to get last queued index from vvc to sequencer\n")
file_handle.write(" "+division_line+"\n")
file_handle.write(" shared variable shared_vvc_last_received_cmd_idx : t_last_received_cmd_idx"
"(t_channel'left to t_channel'right, 0 to C_MAX_VVC_INSTANCE_NUM) := (others => (others => -1));\n")
print_linefeed(file_handle)
file_handle.write("end package vvc_cmd_pkg;\n")
print_linefeed(file_handle)
def add_vvc_cmd_pkg_body(file_handle):
print_linefeed(file_handle)
file_handle.write("package body vvc_cmd_pkg is\n")
file_handle.write("end package body vvc_cmd_pkg;\n")
print_linefeed(file_handle)
def add_methods_pkg_includes(file_handle, vvc_name):
file_handle.write("library ieee;\n")
file_handle.write("use ieee.std_logic_1164.all;\n")
file_handle.write("use ieee.numeric_std.all;\n")
print_linefeed(file_handle)
file_handle.write("library uvvm_util;\n")
file_handle.write("context uvvm_util.uvvm_util_context;\n")
print_linefeed(file_handle)
file_handle.write("library uvvm_vvc_framework;\n")
file_handle.write("use uvvm_vvc_framework.ti_vvc_framework_support_pkg.all;\n")
print_linefeed(file_handle)
file_handle.write("use work."+vvc_name.lower()+"_bfm_pkg.all;\n")
file_handle.write("use work.vvc_cmd_pkg.all;\n")
file_handle.write("use work.td_target_support_pkg.all;\n")
print_linefeed(file_handle)
file_handle.write(division_line+"\n")
file_handle.write(division_line+"\n")
def add_methods_pkg_header(file_handle, vvc_name, vvc_channels):
file_handle.write("package vvc_methods_pkg is\n")
print_linefeed(file_handle)
file_handle.write(" "+division_line+"\n")
file_handle.write(" -- Types and constants for the "+vvc_name.upper()+" VVC \n")
file_handle.write(" "+division_line+"\n")
file_handle.write(" constant C_VVC_NAME : string := \""+vvc_name.upper()+"_VVC\";\n")
print_linefeed(file_handle)
file_handle.write(" signal "+vvc_name.upper()+"_VVCT"+fill_with_n_spaces(vvc_name.__len__(),13)+
": t_vvc_target_record := set_vvc_target_defaults(C_VVC_NAME);\n")
file_handle.write(" alias THIS_VVCT : t_vvc_target_record is "+vvc_name.upper()+"_VVCT;\n")
file_handle.write(" alias t_bfm_config is t_"+vvc_name.lower()+"_bfm_config;\n")
print_linefeed(file_handle)
file_handle.write(" -- Type found in UVVM-Util types_pkg\n")
file_handle.write(" constant C_"+vvc_name.upper()+"_INTER_BFM_DELAY_DEFAULT : t_inter_bfm_delay := (\n")
file_handle.write(" delay_type => NO_DELAY,\n")
file_handle.write(" delay_in_time => 0 ns,\n")
file_handle.write(" inter_bfm_delay_violation_severity => WARNING\n")
file_handle.write(" );\n")
print_linefeed(file_handle)
file_handle.write(" type t_vvc_config is\n")
file_handle.write(" record\n")
file_handle.write(" inter_bfm_delay : t_inter_bfm_delay;-- Minimum delay between BFM "+
"accesses from the VVC. If parameter delay_type is set to NO_DELAY, BFM accesses will be back to back, i.e. no delay.\n")
file_handle.write(" cmd_queue_count_max : natural; -- Maximum pending number in command "+
"queue before queue is full. Adding additional commands will result in an ERROR.\n")
file_handle.write(" cmd_queue_count_threshold : natural; -- An alert with severity 'cmd_queue_count_threshold_severity' "+
"will be issued if command queue exceeds this count. Used for early warning if command queue is almost full. Will be ignored if set to 0.\n")
file_handle.write(" cmd_queue_count_threshold_severity : t_alert_level; -- Severity of alert to be initiated if exceeding cmd_queue_count_threshold\n")
file_handle.write(" result_queue_count_max : natural;\n")
file_handle.write(" result_queue_count_threshold_severity : t_alert_level;\n")
file_handle.write(" result_queue_count_threshold : natural;\n")
file_handle.write(" bfm_config : t_"+vvc_name.lower()+"_bfm_config; -- Configuration for the BFM. See BFM quick reference\n")
file_handle.write(" msg_id_panel : t_msg_id_panel; -- VVC dedicated message ID panel\n")
file_handle.write(" end record;\n")
print_linefeed(file_handle)
if vvc_channels.__len__() == 1:
file_handle.write(" type t_vvc_config_array is array (natural range <>) of t_vvc_config;\n")
else:
file_handle.write(" type t_vvc_config_array is array (t_channel range <>, natural range <>) of t_vvc_config;\n")
print_linefeed(file_handle)
file_handle.write(" constant C_"+vvc_name.upper()+"_VVC_CONFIG_DEFAULT : t_vvc_config := (\n")
file_handle.write(" inter_bfm_delay => C_"+vvc_name.upper()+"_INTER_BFM_DELAY_DEFAULT,\n")
file_handle.write(" cmd_queue_count_max => C_CMD_QUEUE_COUNT_MAX, -- from adaptation package\n")
file_handle.write(" cmd_queue_count_threshold => C_CMD_QUEUE_COUNT_THRESHOLD,\n")
file_handle.write(" cmd_queue_count_threshold_severity => C_CMD_QUEUE_COUNT_THRESHOLD_SEVERITY,\n")
file_handle.write(" result_queue_count_max => C_RESULT_QUEUE_COUNT_MAX,\n")
file_handle.write(" result_queue_count_threshold_severity => C_RESULT_QUEUE_COUNT_THRESHOLD_SEVERITY,\n")
file_handle.write(" result_queue_count_threshold => C_RESULT_QUEUE_COUNT_THRESHOLD,\n")
file_handle.write(" bfm_config => C_"+vvc_name.upper()+"_BFM_CONFIG_DEFAULT,\n")
file_handle.write(" msg_id_panel => C_VVC_MSG_ID_PANEL_DEFAULT\n")
file_handle.write(" );\n")
print_linefeed(file_handle)
file_handle.write(" type t_vvc_status is\n")
file_handle.write(" record\n")
file_handle.write(" current_cmd_idx : natural;\n")
file_handle.write(" previous_cmd_idx : natural;\n")
file_handle.write(" pending_cmd_cnt : natural;\n")
file_handle.write(" end record;\n")
print_linefeed(file_handle)
if vvc_channels.__len__() == 1:
file_handle.write(" type t_vvc_status_array is array (natural range <>) of t_vvc_status;\n")
else:
file_handle.write(" type t_vvc_status_array is array (t_channel range <>, natural range <>) of t_vvc_status;\n")
print_linefeed(file_handle)
file_handle.write(" constant C_VVC_STATUS_DEFAULT : t_vvc_status := (\n")
file_handle.write(" current_cmd_idx => 0,\n")
file_handle.write(" previous_cmd_idx => 0,\n")
file_handle.write(" pending_cmd_cnt => 0\n")
file_handle.write(" );\n")
print_linefeed(file_handle)
file_handle.write(" -- Transaction information to include in the wave view during simulation\n")
file_handle.write(" type t_transaction_info is\n")
file_handle.write(" record\n")
file_handle.write(" operation : t_operation;\n")
file_handle.write(" msg : string(1 to C_VVC_CMD_STRING_MAX_LENGTH);\n")
file_handle.write(" --<USER_INPUT> Fields that could be useful to track in the waveview can be placed in this "
"record.\n")
file_handle.write(" -- Example:\n")
file_handle.write(" -- addr : unsigned(C_VVC_CMD_ADDR_MAX_LENGTH-1 downto 0);\n")
file_handle.write(" -- data : std_logic_vector(C_VVC_CMD_DATA_MAX_LENGTH-1 downto 0);\n")
file_handle.write(" end record;\n")
print_linefeed(file_handle)
if vvc_channels.__len__() == 1:
file_handle.write(" type t_transaction_info_array is array (natural range <>) of "
"t_transaction_info;\n")
else:
file_handle.write(" type t_transaction_info_array is array (t_channel range <>, "
"natural range <>) of t_transaction_info;\n")
print_linefeed(file_handle)
file_handle.write(" constant C_TRANSACTION_INFO_DEFAULT : t_transaction_info := (\n")
file_handle.write(" --<USER_INPUT> Set the data fields added to the t_transaction_info record to \n")
file_handle.write(" -- their default values here.\n")
file_handle.write(" -- Example:\n")
file_handle.write(" -- addr => (others => '0'),\n")
file_handle.write(" -- data => (others => '0'),\n")
file_handle.write(" operation => NO_OPERATION,\n"),
file_handle.write(" msg => (others => ' ')\n")
file_handle.write(" );\n")
print_linefeed(file_handle)
print_linefeed(file_handle)
if vvc_channels.__len__() == 1:
file_handle.write(" shared variable shared_"+vvc_name.lower()+"_vvc_config : t_vvc_config_array(0 to "
"C_MAX_VVC_INSTANCE_NUM-1) := (others => C_"+vvc_name.upper()+"_VVC_CONFIG_DEFAULT);\n")
file_handle.write(" shared variable shared_"+vvc_name.lower()+"_vvc_status : t_vvc_status_array(0 to "
"C_MAX_VVC_INSTANCE_NUM-1) := (others => C_VVC_STATUS_DEFAULT);\n")
file_handle.write(" shared variable shared_"+vvc_name.lower()+"_transaction_info : "
"t_transaction_info_array(0 to C_MAX_VVC_INSTANCE_NUM-1) := "
"(others => C_TRANSACTION_INFO_DEFAULT);\n")
else:
file_handle.write(" shared variable shared_"+vvc_name.lower()+"_vvc_config : t_vvc_config_array(t_channel'left"
" to t_channel'right, 0 to C_MAX_VVC_INSTANCE_NUM-1) := (others => (others => "
"C_"+vvc_name.upper()+"_VVC_CONFIG_DEFAULT));\n")
file_handle.write(" shared variable shared_"+vvc_name.lower()+"_vvc_status : t_vvc_status_array(t_channel'left"
" to t_channel'right, 0 to C_MAX_VVC_INSTANCE_NUM-1) := (others => (others => "
"C_VVC_STATUS_DEFAULT));\n")
file_handle.write(" shared variable shared_"+vvc_name.lower()+"_transaction_info : "
"t_transaction_info_array(t_channel'left to t_channel'right, 0 to "
"C_MAX_VVC_INSTANCE_NUM-1) := (others => (others => C_TRANSACTION_INFO_DEFAULT));\n")
print_linefeed(file_handle)
print_linefeed(file_handle)
file_handle.write(" "+division_line+"\n")
file_handle.write(" -- Methods dedicated to this VVC \n")
file_handle.write(" -- - These procedures are called from the testbench in order to queue BFM calls \n")
file_handle.write(" -- in the VVC command queue. The VVC will store and forward these calls to the\n")
file_handle.write(" -- "+vvc_name.upper()+" BFM when the command is at the from of the VVC command queue.\n")
file_handle.write(" "+division_line+"\n")
print_linefeed(file_handle)
print_linefeed(file_handle)
file_handle.write(" --<USER_INPUT> Please insert the VVC procedure declarations here \n")
file_handle.write(" --Example with single VVC channel: \n")
file_handle.write(" -- procedure "+vvc_name.lower()+"_write(\n")
file_handle.write(" -- signal VVCT : inout t_vvc_target_record;\n")
file_handle.write(" -- constant vvc_instance_idx : in integer;\n")
file_handle.write(" -- constant addr : in unsigned;\n")
file_handle.write(" -- constant data : in std_logic_vector;\n")
file_handle.write(" -- constant msg : in string\n")
file_handle.write(" -- );\n")
print_linefeed(file_handle)
file_handle.write(" --Example with multiple VVC channels: \n")
file_handle.write(" -- procedure "+vvc_name.lower()+"_write(\n")
file_handle.write(" -- signal VVCT : inout t_vvc_target_record;\n")
file_handle.write(" -- constant vvc_instance_idx : in integer;\n")
file_handle.write(" -- constant channel : in t_channel;\n")
file_handle.write(" -- constant addr : in unsigned;\n")
file_handle.write(" -- constant data : in std_logic_vector;\n")
file_handle.write(" -- constant msg : in string\n")
file_handle.write(" -- );\n")
print_linefeed(file_handle)
print_linefeed(file_handle)
file_handle.write("end package vvc_methods_pkg;\n")
print_linefeed(file_handle)
def add_methods_pkg_body(file_handle, vvc_name):
print_linefeed(file_handle)
file_handle.write("package body vvc_methods_pkg is\n")
print_linefeed(file_handle)
print_linefeed(file_handle)
file_handle.write(" "+division_line+"\n")
file_handle.write(" -- Methods dedicated to this VVC\n")
file_handle.write(" "+division_line+"\n")
print_linefeed(file_handle)
print_linefeed(file_handle)
file_handle.write(" --<USER_INPUT> Please insert the VVC procedure implementations here.\n")
file_handle.write(" -- These procedures will be used to forward commands to the VVC executor, which will\n")
file_handle.write(" -- call the corresponding BFM procedures. \n")
file_handle.write(" -- Example using single channel:\n")
file_handle.write(" -- procedure "+vvc_name.lower()+"_write( \n")
file_handle.write(" -- signal VVCT : inout t_vvc_target_record;\n")
file_handle.write(" -- constant vvc_instance_idx : in integer;\n")
file_handle.write(" -- constant addr : in unsigned;\n")
file_handle.write(" -- constant data : in std_logic_vector;\n")
file_handle.write(" -- constant msg : in string\n")
file_handle.write(" -- ) is\n")
file_handle.write(" -- constant proc_name : string := \""+vvc_name.lower()+"_write\";\n")
file_handle.write(" -- constant proc_call : string := proc_name & \"(\" & to_string(VVCT, "
"vvc_instance_idx) -- First part common for all\n")
file_handle.write(" -- & \", \" & to_string(addr, HEX, AS_IS, INCL_RADIX) & \", \" & "
"to_string(data, HEX, AS_IS, INCL_RADIX) & \")\";\n")
file_handle.write(" -- constant v_normalised_addr : unsigned(C_VVC_CMD_ADDR_MAX_LENGTH-1 downto 0) := \n"
" -- normalize_and_check(addr, shared_vvc_cmd.addr, ALLOW_WIDER_NARROWER, \"addr\", \"shared_vvc_cmd.addr\", "
"proc_call & \" called with to wide addr. \" & msg);\n")
file_handle.write(" -- constant v_normalised_data : std_logic_vector(C_VVC_CMD_DATA_MAX_LENGTH-1 downto 0) := \n"
" -- normalize_and_check(data, shared_vvc_cmd.data, ALLOW_WIDER_NARROWER, \"data\", \"shared_vvc_cmd.data\", "
"proc_call & \" called with to wide data. \" & msg);\n")
file_handle.write(" -- begin\n")
file_handle.write(" -- -- Create command by setting common global 'VVCT' signal record and dedicated VVC 'shared_vvc_cmd' record\n")
file_handle.write(" -- -- locking semaphore in set_general_target_and_command_fields to gain exclusive right to VVCT and shared_vvc_cmd\n")
file_handle.write(" -- -- semaphore gets unlocked in await_cmd_from_sequencer of the targeted VVC\n")
file_handle.write(" -- set_general_target_and_command_fields(VVCT, vvc_instance_idx, proc_call, msg, "
"QUEUED, WRITE);\n")
file_handle.write(" -- shared_vvc_cmd.addr := v_normalised_addr;\n")
file_handle.write(" -- shared_vvc_cmd.data := v_normalised_data;\n")
file_handle.write(" -- send_command_to_vvc(VVCT);\n")
file_handle.write(" -- end procedure;\n")
print_linefeed(file_handle)
file_handle.write(" -- Example using multiple channels:\n")
file_handle.write(" -- procedure "+vvc_name.lower()+"_receive(\n")
file_handle.write(" -- signal VVCT : inout t_vvc_target_record;\n")
file_handle.write(" -- constant vvc_instance_idx : in integer;\n")
file_handle.write(" -- constant channel : in t_channel;\n")
file_handle.write(" -- constant msg : in string;\n")
file_handle.write(" -- constant alert_level : in t_alert_level := ERROR\n")
file_handle.write(" -- ) is\n")
file_handle.write(" -- constant proc_name : string := \""+vvc_name.lower()+"_receive\";\n")
file_handle.write(" -- constant proc_call : string := proc_name & \"(\" & "
"to_string(VVCT, vvc_instance_idx, channel) & \")\";\n")
file_handle.write(" -- begin\n")
file_handle.write(" -- -- Create command by setting common global 'VVCT' signal record and dedicated VVC 'shared_vvc_cmd' record\n")
file_handle.write(" -- -- locking semaphore in set_general_target_and_command_fields to gain exclusive right to VVCT and shared_vvc_cmd\n")
file_handle.write(" -- -- semaphore gets unlocked in await_cmd_from_sequencer of the targeted VVC\n")
file_handle.write(" -- set_general_target_and_command_fields(VVCT, vvc_instance_idx, channel, proc_call, msg, "
"QUEUED, RECEIVE);\n")
file_handle.write(" -- shared_vvc_cmd.operation := RECEIVE;\n")
file_handle.write(" -- shared_vvc_cmd.alert_level := alert_level;\n")
file_handle.write(" -- send_command_to_vvc(VVCT);\n")
file_handle.write(" -- end procedure;\n")
print_linefeed(file_handle)
print_linefeed(file_handle)
file_handle.write("end package body vvc_methods_pkg;\n")
def add_bfm_pkg_includes(file_handle):
file_handle.write("library ieee;\n")
file_handle.write("use ieee.std_logic_1164.all;\n")
file_handle.write("use ieee.numeric_std.all;\n")
print_linefeed(file_handle)
file_handle.write("library uvvm_util;\n")
file_handle.write("context uvvm_util.uvvm_util_context;\n")
print_linefeed(file_handle)
file_handle.write(division_line+"\n")
file_handle.write(division_line+"\n")
def add_bfm_pkg_header(file_handle, vvc_name):
file_handle.write("package "+vvc_name.lower()+"_bfm_pkg is\n")
print_linefeed(file_handle)
file_handle.write(" " + division_line+"\n")
file_handle.write(" -- Types and constants for "+vvc_name.upper()+" BFM \n")
file_handle.write(" " + division_line+"\n")
file_handle.write(" constant C_SCOPE : string := \""+vvc_name.upper()+" BFM\";\n")
print_linefeed(file_handle)
file_handle.write(" -- Optional interface record for BFM signals\n")
file_handle.write(" -- type t_"+vvc_name.lower()+"_if is record\n")
file_handle.write(" --<USER_INPUT> Insert all BFM signals here\n")
file_handle.write(" -- Example:\n")
file_handle.write(" -- cs : std_logic; -- to dut\n")
file_handle.write(" -- addr : unsigned; -- to dut\n")
file_handle.write(" -- rena : std_logic; -- to dut\n")
file_handle.write(" -- wena : std_logic; -- to dut\n")
file_handle.write(" -- wdata : std_logic_vector; -- to dut\n")
file_handle.write(" -- ready : std_logic; -- from dut\n")
file_handle.write(" -- rdata : std_logic_vector; -- from dut\n")
file_handle.write(" -- end record;\n")
print_linefeed(file_handle)
file_handle.write(" -- Configuration record to be assigned in the test harness.\n")
file_handle.write(" type t_"+vvc_name.lower()+"_bfm_config is\n")
file_handle.write(" record\n")
file_handle.write(" --<USER_INPUT> Insert all BFM config parameters here\n")
file_handle.write(" -- Example:\n")
file_handle.write(" -- max_wait_cycles : integer;\n")
file_handle.write(" -- max_wait_cycles_severity : t_alert_level;\n")
file_handle.write(" -- id_for_bfm : t_msg_id;\n")
file_handle.write(" -- id_for_bfm_wait : t_msg_id;\n")
file_handle.write(" -- id_for_bfm_poll : t_msg_id;\n")
file_handle.write(" clock_period : time; -- Needed in the VVC\n")
file_handle.write(" end record;\n")
print_linefeed(file_handle)
file_handle.write(" -- Define the default value for the BFM config\n")
file_handle.write(" constant C_"+vvc_name.upper()+"_BFM_CONFIG_DEFAULT : t_"+vvc_name.lower()+"_bfm_config := (\n")
file_handle.write(" --<USER_INPUT> Insert defaults for all BFM config parameters here\n")
file_handle.write(" -- Example:\n")
file_handle.write(" -- max_wait_cycles => 10,\n")
file_handle.write(" -- max_wait_cycles_severity => failure,\n")
file_handle.write(" -- id_for_bfm => ID_BFM,\n")
file_handle.write(" -- id_for_bfm_wait => ID_BFM_WAIT,\n")
file_handle.write(" -- id_for_bfm_poll => ID_BFM_POLL,\n")
file_handle.write(" clock_period => 10 ns\n")
file_handle.write(" );\n")
print_linefeed(file_handle)
print_linefeed(file_handle)
file_handle.write(" " + division_line+"\n")
file_handle.write(" -- BFM procedures \n")
file_handle.write(" " + division_line+"\n")
print_linefeed(file_handle)
print_linefeed(file_handle)
file_handle.write(" --<USER_INPUT> Insert BFM procedure declarations here, e.g. read and write operations\n")
file_handle.write(" -- It is recommended to also have an init function which sets the BFM signals to their "
"default state\n")
print_linefeed(file_handle)
print_linefeed(file_handle)
file_handle.write("end package "+vvc_name.lower()+"_bfm_pkg;\n")
print_linefeed(file_handle)
print_linefeed(file_handle)
file_handle.write(division_line+"\n")
file_handle.write(division_line+"\n")
def add_bfm_pkg_body(file_handle, vvc_name):
print_linefeed(file_handle)
file_handle.write("package body "+vvc_name.lower()+"_bfm_pkg is\n")
print_linefeed(file_handle)
print_linefeed(file_handle)
file_handle.write(" --<USER_INPUT> Insert BFM procedure implementation here.\n")
print_linefeed(file_handle)
print_linefeed(file_handle)
file_handle.write("end package body "+vvc_name.lower()+"_bfm_pkg;\n")
print_linefeed(file_handle)
def generate_bfm_skeleton(vvc_name):
f = open("output/"+vvc_name.lower()+"_bfm_pkg.vhd", 'w')
add_vvc_header(f)
add_bfm_pkg_includes(f)
add_bfm_pkg_header(f, vvc_name)
add_bfm_pkg_body(f, vvc_name)
f.close()
def generate_vvc_methods_pkg_file(vvc_name, vvc_channels):
f = open("output/vvc_methods_pkg.vhd", 'w')
add_vvc_header(f)
add_methods_pkg_includes(f, vvc_name)
add_methods_pkg_header(f,vvc_name,vvc_channels)
add_methods_pkg_body(f, vvc_name)
f.close()
def generate_vvc_cmd_pkg_file():
f = open("output/vvc_cmd_pkg.vhd", 'w')
add_vvc_header(f)
add_vvc_cmd_pkg_includes(f)
add_vvc_cmd_pkg_header(f)
add_vvc_cmd_pkg_body(f)
f.close()
def generate_vvc_file(vvc_name, vvc_channels):
# Create main VVC, or leaf VVCs if multiple channels
for channel in vvc_channels:
num_of_queues = channel.len_of_queue()
if channel.name == "NA":
vvc_file_name = "output/"+vvc_name.lower()+"_vvc.vhd"
else:
vvc_file_name = "output/"+vvc_name.lower()+"_"+channel.name.lower()+"_vvc.vhd"
f = open(vvc_file_name, 'w')
add_vvc_header(f)
add_leaf_includes(f,vvc_name)
add_vvc_entity(f,vvc_name,channel.name)
add_architecture_declaration(f, vvc_name, channel)
add_vvc_constructor(f, vvc_name)
add_vvc_interpreter(f, channel)
add_vvc_executor(f, channel)
if (num_of_queues > 1):
for i in range(1, num_of_queues):
add_vvc_pipeline_step(f, channel.queue_names[i])
add_vvc_terminator(f)
add_end_of_architecture(f)
f.close()
# Create wrapper if multiple channels
if vvc_channels.__len__() != 1:
vvc_file_name = "output/"+vvc_name.lower()+"_vvc.vhd"
f = open(vvc_file_name, 'w')
add_vvc_header(f)
add_wrapper_includes(f,vvc_name)
add_vvc_entity(f,vvc_name,"NA")
add_wrapper_architecture_declaration(f, vvc_name)
for channel in vvc_channels:
add_leaf_vvc_entity(f, vvc_name, channel.name)
add_wrapper_architecture_end(f)
f.close()
# Entry point for the vvc_generator script
if __name__ == '__main__':
vvc_name = "not_set"
number_of_channels = 1
vvc_channels = []
vvc_name = get_vvc_name()
if is_multi_channel_vvc() == 'y':
number_of_channels = get_number_of_channels()
for i in range(number_of_channels):
channel = Channel(get_channel_name())
if is_multi_queue_channel() == 'y':
number_of_queues = get_number_of_queues()
if number_of_queues > 1:
for i in range(1, number_of_queues):
channel.add_queue(get_queue_name())
vvc_channels.append(channel)
else:
channel = Channel("NA")
if is_multi_queue_channel() == 'y':
number_of_queues = get_number_of_queues()
if number_of_queues > 1:
for i in range(1, number_of_queues):
channel.add_queue(get_queue_name())
vvc_channels.append(channel)
if not os.path.exists("output"):
os.makedirs("output")
generate_vvc_file(vvc_name, vvc_channels)
generate_vvc_cmd_pkg_file()
generate_vvc_methods_pkg_file(vvc_name, vvc_channels)
generate_bfm_skeleton(vvc_name)
print("\nThe vvc_generator script is now finished")
print("The generated VVC can be found in the output folder")
|
{
"content_hash": "d7e492ce3f142a29739362b79d97be88",
"timestamp": "",
"source": "github",
"line_count": 1344,
"max_line_length": 244,
"avg_line_length": 59.53497023809524,
"alnum_prop": 0.5860151221645942,
"repo_name": "AndyMcC0/UVVM_All",
"id": "b1d65f858a5827ef1c93c880535cfbf2854543cb",
"size": "80022",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "uvvm_vvc_framework/script/vvc_generator/vvc_generator.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "90083"
},
{
"name": "Stata",
"bytes": "111075"
},
{
"name": "Tcl",
"bytes": "28575"
},
{
"name": "VHDL",
"bytes": "1365631"
}
],
"symlink_target": ""
}
|
import dict
import urllib2
import pyxb_114.utils.domutils as domutils
from xml.dom import minidom
# Get the list of dictionaries available from the service.
port_uri = 'http://services.aonaware.com/DictService/DictService.asmx'
uri = port_uri + '/DictionaryList'
dle_xml = urllib2.urlopen(uri).read()
dle_dom = domutils.StringToDOM(dle_xml)
dle = dict.ArrayOfDictionary.createFromDOM(dle_dom)
op_path = '/DictionaryInfo'
for d in dle.Dictionary:
# Create a REST-style query to retrieve the information about this dictionary.
uri = '%s%s?dictId=%s' % (port_uri, op_path, d.Id)
resp = urllib2.urlopen(uri).read()
# The response is a simple type derived from string, so we can
# just extract and print it.
di_resp = dict.CreateFromDOM(domutils.StringToDOM(resp))
# Do the "encode" garbage because one of these dictionaries has a
# non-ASCII character
print "%s (%s)\n%s\n" % (d.Name.encode('utf-8'), d.Id.encode('utf-8'), di_resp.encode('utf-8'))
|
{
"content_hash": "9eff538f90232ff908a6a9f8571c24d8",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 99,
"avg_line_length": 42.69565217391305,
"alnum_prop": 0.7158859470468432,
"repo_name": "msherry/PyXB-1.1.4",
"id": "93c3777359743991d7844c84b1f82dcd0f1d5d66",
"size": "982",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/dictionary/showdict.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Perl",
"bytes": "6307"
},
{
"name": "Python",
"bytes": "1521054"
},
{
"name": "Shell",
"bytes": "23730"
}
],
"symlink_target": ""
}
|
"""
A simple example showing how to process RDFa from the web
"""
from rdflib import Graph
if __name__ == '__main__':
g = Graph()
g.parse('http://www.worldcat.org/title/library-of-babel/oclc/44089369', format='rdfa')
print "Books found:"
for row in g.query("""SELECT ?title ?author WHERE {
[ a schema:Book ;
schema:author [ rdfs:label ?author ] ;
schema:name ?title ]
FILTER (LANG(?title) = 'en') } """):
print "%s by %s"%(row.title, row.author)
|
{
"content_hash": "09b22a02fbdd074806b6cc246132ad7e",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 90,
"avg_line_length": 23.318181818181817,
"alnum_prop": 0.5769980506822612,
"repo_name": "marma/rdflib",
"id": "02112ce3c73498a7a712bb27ffc796dce645c46f",
"size": "513",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "examples/rdfa_example.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ApacheConf",
"bytes": "145"
},
{
"name": "HTML",
"bytes": "120202"
},
{
"name": "Python",
"bytes": "1446885"
},
{
"name": "Ruby",
"bytes": "28544"
},
{
"name": "Shell",
"bytes": "1052"
}
],
"symlink_target": ""
}
|
"""
============================================
:mod:`selectors` -- Parent selection methods
============================================
This module provides pre-defined selectors for evolutionary computations.
All selector functions have the following arguments:
- *random* -- the random number generator object
- *population* -- the population of individuals
- *args* -- a dictionary of keyword arguments
Each selector function returns the list of selected individuals.
.. note::
The *population* is really a shallow copy of the actual population of
the evolutionary computation. This means that any activities like
sorting will not affect the actual population.
.. Copyright 2012 Inspired Intelligence Initiative
.. This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
.. This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
.. You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
.. module:: selectors
.. moduleauthor:: Aaron Garrett <aaron.lee.garrett@gmail.com>
"""
def default_selection(random, population, args):
"""Return the population.
This function acts as a default selection scheme for an evolutionary
computation. It simply returns the entire population as having been
selected.
.. Arguments:
random -- the random number generator object
population -- the population of individuals
args -- a dictionary of keyword arguments
"""
return population
def truncation_selection(random, population, args):
"""Selects the best individuals from the population.
This function performs truncation selection, which means that only
the best individuals from the current population are selected. This
is a completely deterministic selection mechanism.
.. Arguments:
random -- the random number generator object
population -- the population of individuals
args -- a dictionary of keyword arguments
Optional keyword arguments in args:
- *num_selected* -- the number of individuals to be selected
(default len(population))
"""
num_selected = args.setdefault('num_selected', len(population))
population.sort(reverse=True)
return population[:num_selected]
def uniform_selection(random, population, args):
"""Return a uniform sampling of individuals from the population.
This function performs uniform selection by randomly choosing
members of the population with replacement.
.. Arguments:
random -- the random number generator object
population -- the population of individuals
args -- a dictionary of keyword arguments
Optional keyword arguments in args:
- *num_selected* -- the number of individuals to be selected
(default 1)
"""
num_selected = args.setdefault('num_selected', 1)
selected = []
for _ in range(num_selected):
selected.append(population[random.randint(0, len(population)-1)])
return selected
def fitness_proportionate_selection(random, population, args):
"""Return fitness proportionate sampling of individuals from the population.
This function stochastically chooses individuals from the population
with probability proportional to their fitness. This is often
referred to as "roulette wheel" selection. Note that this selection
is not valid for minimization problems.
.. Arguments:
random -- the random number generator object
population -- the population of individuals
args -- a dictionary of keyword arguments
Optional keyword arguments in args:
- *num_selected* -- the number of individuals to be selected (default 1)
"""
num_selected = args.setdefault('num_selected', 1)
len_pop = len(population)
psum = [i for i in range(len_pop)]
pop_max_fit = (max(population)).fitness
pop_min_fit = (min(population)).fitness
# If we're actually doing minimimization,
# fitness proportionate selection is not defined.
if pop_max_fit < pop_min_fit:
raise ValueError('Fitness proportionate selection is not valid for minimization.')
# Set up the roulette wheel
if pop_max_fit == pop_min_fit:
psum = [(index + 1) / float(len_pop) for index in range(len_pop)]
elif (pop_max_fit > 0 and pop_min_fit >= 0) or (pop_max_fit <= 0 and pop_min_fit < 0):
population.sort(reverse=True)
psum[0] = population[0].fitness
for i in range(1, len_pop):
psum[i] = population[i].fitness + psum[i-1]
for i in range(len_pop):
psum[i] /= float(psum[len_pop-1])
# Select the individuals
selected = []
for _ in range(num_selected):
cutoff = random.random()
lower = 0
upper = len_pop - 1
while(upper >= lower):
mid = (lower + upper) // 2
if psum[mid] > cutoff:
upper = mid - 1
else:
lower = mid + 1
lower = max(0, min(len_pop-1, lower))
selected.append(population[lower])
return selected
def rank_selection(random, population, args):
"""Return a rank-based sampling of individuals from the population.
This function behaves similarly to fitness proportionate selection,
except that it uses the individual's rank in the population, rather
than its raw fitness value, to determine its probability. This
means that it can be used for both maximization and minimization
problems, since higher rank can be defined correctly for both.
.. Arguments:
random -- the random number generator object
population -- the population of individuals
args -- a dictionary of keyword arguments
Optional keyword arguments in args:
- *num_selected* -- the number of individuals to be selected (default 1)
"""
num_selected = args.setdefault('num_selected', 1)
# Set up the roulette wheel
len_pop = len(population)
population.sort()
psum = list(range(len_pop))
den = (len_pop * (len_pop + 1)) / 2.0
for i in range(len_pop):
psum[i] = (i + 1) / den
for i in range(1, len_pop):
psum[i] += psum[i-1]
# Select the individuals
selected = []
for _ in range(num_selected):
cutoff = random.random()
lower = 0
upper = len_pop - 1
while(upper >= lower):
mid = (lower + upper) // 2
if psum[mid] > cutoff:
upper = mid - 1
else:
lower = mid + 1
lower = max(0, min(len_pop-1, lower))
selected.append(population[lower])
return selected
def tournament_selection(random, population, args):
"""Return a tournament sampling of individuals from the population.
This function selects ``num_selected`` individuals from the population.
It selects each one by using random sampling without replacement
to pull ``tournament_size`` individuals and adds the best of the
tournament as its selection. If ``tournament_size`` is greater than
the population size, the population size is used instead as the size
of the tournament.
.. Arguments:
random -- the random number generator object
population -- the population of individuals
args -- a dictionary of keyword arguments
Optional keyword arguments in args:
- *num_selected* -- the number of individuals to be selected (default 1)
- *tournament_size* -- the tournament size (default 2)
"""
num_selected = args.setdefault('num_selected', 1)
tournament_size = args.setdefault('tournament_size', 2)
if tournament_size > len(population):
tournament_size = len(population)
selected = []
for _ in range(num_selected):
tourn = random.sample(population, tournament_size)
selected.append(max(tourn))
return selected
|
{
"content_hash": "cf94901c2fe401c0eeb457e0362852cd",
"timestamp": "",
"source": "github",
"line_count": 241,
"max_line_length": 90,
"avg_line_length": 36.48547717842324,
"alnum_prop": 0.6303878084840214,
"repo_name": "saulshanabrook/pushgp.py",
"id": "c9e9a8d80a1528435d929baf89d6681aeb1dce55",
"size": "8793",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "inspyred/ec/selectors.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "277825"
}
],
"symlink_target": ""
}
|
from django.utils.translation import ugettext_lazy as _
import horizon
from horizon.dashboards.project import dashboard
class AccessAndSecurity(horizon.Panel):
name = _("Access & Security")
slug = 'access_and_security'
dashboard.Project.register(AccessAndSecurity)
|
{
"content_hash": "afeca3fd487d50278044904a61443ba5",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 55,
"avg_line_length": 23.166666666666668,
"alnum_prop": 0.7769784172661871,
"repo_name": "1ukash/horizon",
"id": "485e8be3135959c0d3a55ad0caa680d0531313d0",
"size": "959",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "horizon/dashboards/project/access_and_security/panel.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "234763"
},
{
"name": "Python",
"bytes": "1189371"
},
{
"name": "Shell",
"bytes": "12326"
}
],
"symlink_target": ""
}
|
"""Tests for the event filters manager."""
import unittest
from plaso.filters import manager
from tests.filters import test_lib
class FiltersManagerTest(unittest.TestCase):
"""Tests for the event filters manager."""
def testFilterRegistration(self):
"""Tests the RegisterFilter and DeregisterFilter functions."""
# pylint: disable=protected-access
number_of_filters = len(manager.FiltersManager._filter_classes)
manager.FiltersManager.RegisterFilter(test_lib.TestEventFilter)
self.assertEqual(
len(manager.FiltersManager._filter_classes),
number_of_filters + 1)
with self.assertRaises(KeyError):
manager.FiltersManager.RegisterFilter(test_lib.TestEventFilter)
manager.FiltersManager.DeregisterFilter(test_lib.TestEventFilter)
self.assertEqual(
len(manager.FiltersManager._filter_classes),
number_of_filters)
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "9e400ef6ecbdba5f902eb463d3cc9e7a",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 69,
"avg_line_length": 28.424242424242426,
"alnum_prop": 0.7334754797441365,
"repo_name": "ostree/plaso",
"id": "18f60443d768c5c5e1acc9811c854fcd774bac58",
"size": "980",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tests/filters/manager.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1276"
},
{
"name": "Makefile",
"bytes": "1151"
},
{
"name": "Protocol Buffer",
"bytes": "13930"
},
{
"name": "Python",
"bytes": "3133020"
},
{
"name": "Shell",
"bytes": "47305"
}
],
"symlink_target": ""
}
|
import string
# The lexer knows two classes of symbols:
# - any number of meta-symbols, which match a single meta-character each.
# this argument is a dictionary mapping meta-characters to symbol classes.
#
# - exactly one terminal symbol, which matches a run of any characters
# but ends as soon as a meta-character or a whitespace character
# (not escaped with "\") is encountered.
# whitespace characters delimit a sequence of multiple terminal symbols.
#
# this argument is a single symbol class.
#
# - optional: char_filter (character -> bool) determines if a character is
# allowed in a terminal symbol. A character that neither matches a
# meta-symbol, nor is whitespace, nor allowed in terminals, will cause the
# lexical analysis to fail.
#
# - optional: with letters=True, each terminal character becomes a terminal
# symbol. This has the same effect as adding spaces between every character
# of the input.
class lexer:
def __init__(self, meta, terminal, char_filter=None, letters=False):
assert not (set(meta.keys()) & set(string.whitespace)), 'Whitespace characters cannot be meta characters.'
self.meta = meta
self.terminal = terminal
self.filter = char_filter
self.letters = letters
def lex(self, s):
tokens = []
term = '' # The current terminal character run.
bs = False # The last character was a backslash.
# This space terminates a terminal symbol at the end of the input.
s += ' '
for i, c in enumerate(s):
# We found an unescaped backslash.
if not bs and c == '\\':
bs = True
# We found an unescaped meta or whitespace character.
elif not bs and (c in self.meta or c in string.whitespace):
# Append the current terminal symbol (if any):
if term:
tokens.append(self.terminal(term))
term = ''
# Append the meta symbol (if any):
if c in self.meta:
tokens.append(self.meta[c]())
# found another character, or one escaped by a backslash:
elif self.filter == None or self.filter(c):
bs = False
# Append it to the current terminal symbol:
term += c
if self.letters:
# If terminals are single characters, append it immediately:
tokens.append(self.terminal(term))
term = ''
else:
raise LexError(i, c)
return tokens
class LexError(ValueError):
def __init__(self, i, c):
self.i, self.c = i, c
def __str__(self):
return 'Lexical Error at #{}: {} is not allowed.'.format(self.i, self.c)
|
{
"content_hash": "3ec5efeced8b8a3e6435e4ee3072daee",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 114,
"avg_line_length": 40.614285714285714,
"alnum_prop": 0.593387266971509,
"repo_name": "cburschka/modod",
"id": "019134fbfbd22f48a741e4ab7ba78c250e3fb895",
"size": "2843",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/modod/lexer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Perl",
"bytes": "20228"
},
{
"name": "Python",
"bytes": "110673"
},
{
"name": "Shell",
"bytes": "330"
},
{
"name": "TeX",
"bytes": "416262"
}
],
"symlink_target": ""
}
|
LOG_LEVEL = "DEBUG"
LOG_FILE = "/var/log/security_monkey/security_monkey-deploy.log"
SQLALCHEMY_DATABASE_URI = 'postgresql://postgres:securitymonkeypassword@localhost:5432/secmonkey'
SQLALCHEMY_POOL_SIZE = 50
SQLALCHEMY_MAX_OVERFLOW = 15
ENVIRONMENT = 'ec2'
USE_ROUTE53 = False
FQDN = 'ec2-XX-XXX-XXX-XXX.compute-1.amazonaws.com'
API_PORT = '5000'
WEB_PORT = '443'
WEB_PATH = '/static/ui.html'
FRONTED_BY_NGINX = True
NGINX_PORT = '443'
BASE_URL = 'https://{}/'.format(FQDN)
SECRET_KEY = '<INSERT_RANDOM_STRING_HERE>'
MAIL_DEFAULT_SENDER = 'securitymonkey@example.com'
SECURITY_REGISTERABLE = True
SECURITY_CONFIRMABLE = False
SECURITY_RECOVERABLE = False
SECURITY_PASSWORD_HASH = 'bcrypt'
SECURITY_PASSWORD_SALT = '<INSERT_RANDOM_STRING_HERE>'
SECURITY_POST_LOGIN_VIEW = BASE_URL
SECURITY_POST_REGISTER_VIEW = BASE_URL
SECURITY_POST_CONFIRM_VIEW = BASE_URL
SECURITY_POST_RESET_VIEW = BASE_URL
SECURITY_POST_CHANGE_VIEW = BASE_URL
# This address gets all change notifications (i.e. 'securityteam@example.com')
SECURITY_TEAM_EMAIL = []
# These are only required if using SMTP instead of SES
EMAILS_USE_SMTP = False # Otherwise, Use SES
SES_REGION = 'us-east-1'
MAIL_SERVER = 'smtp.example.com'
MAIL_PORT = 465
MAIL_USE_SSL = True
MAIL_USERNAME = 'username'
MAIL_PASSWORD = 'password'
WTF_CSRF_ENABLED = True
WTF_CSRF_SSL_STRICT = True # Checks Referer Header. Set to False for API access.
WTF_CSRF_METHODS = ['DELETE', 'POST', 'PUT', 'PATCH']
|
{
"content_hash": "674109a9c357f59c10a4d6e58a339404",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 97,
"avg_line_length": 30.914893617021278,
"alnum_prop": 0.7405368203716449,
"repo_name": "monkeysecurity/security_monkey",
"id": "6a37587dd7c350df642fb408cf2248d472a243f6",
"size": "2179",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "env-config/config-deploy.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "22086"
},
{
"name": "Dart",
"bytes": "81616"
},
{
"name": "HTML",
"bytes": "76595"
},
{
"name": "JavaScript",
"bytes": "8629"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "441430"
},
{
"name": "Shell",
"bytes": "16916"
}
],
"symlink_target": ""
}
|
import xml.etree.ElementPath
import attr
@attr.s
class Element(object):
tag = attr.ib()
attrib = attr.ib(default=())
children = attr.ib(default=attr.Factory(list))
def append(self, element):
self.children.append(element)
def extend(self, elements):
self.children.extend(elements)
def insert(self, index, element):
self.children.insert(index, element)
def remove(self, element):
self.children.remove(element)
def findall(self, path):
return list(self.iterfind(path))
def find(self, path):
return next(self.iterfind(path), None)
def iterfind(self, path):
queryable_element = QueryableElement(self)
found = xml.etree.ElementPath.iterfind(queryable_element, path)
for item in found:
if isinstance(item, ListBackedElement):
yield item.list
elif isinstance(item, QueryableElement):
yield item.element
def iter(self, tag=None):
return iterate_element(self, tag)
def __iter__(self):
return iter(self.children)
def __getitem__(self, index):
return self.children[index]
def __setitem__(self, index, element):
self.children[index] = element
def __len__(self):
return len(self.children)
@attr.s
class QueryableElement(object):
element = attr.ib()
@property
def tag(self):
return self.element.tag
def iter(self, tag=None):
return iterate_element(self, tag)
def __iter__(self):
for item in self.element:
if isinstance(item, Element):
yield QueryableElement(item)
elif isinstance(item, list):
yield ListBackedElement(item)
@attr.s
class ListBackedElement(object):
list = attr.ib()
@property
def tag(self):
return self.list[0]
def iter(self, tag=None):
if tag is None or self.tag == tag:
yield self
def __iter__(self):
return iter(())
def iterate_element(element, tag):
if tag is None or element.tag == tag:
yield element
for item in element:
if hasattr(item, 'iter'):
for subitem in item.iter(tag):
yield subitem
elif tag is None:
yield item
|
{
"content_hash": "2439c8a0094aa246f35f04741a33c291",
"timestamp": "",
"source": "github",
"line_count": 98,
"max_line_length": 71,
"avg_line_length": 23.571428571428573,
"alnum_prop": 0.5965367965367966,
"repo_name": "Perlence/rpp",
"id": "0c1498ec037af62a4f82ca7052bc2e69c25ee640",
"size": "2310",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rpp/element.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "16279"
}
],
"symlink_target": ""
}
|
"""Generic interface to all dbm clones.
Use
import dbm
d = dbm.open(file, 'w', 0o666)
The returned object is a dbm.bsd, dbm.gnu, dbm.ndbm or dbm.dumb
object, dependent on the type of database being opened (determined by
the whichdb function) in the case of an existing dbm. If the dbm does
not exist and the create or new flag ('c' or 'n') was specified, the
dbm type will be determined by the availability of the modules (tested
in the above order).
It has the following interface (key and data are strings):
d[key] = data # store data at key (may override data at
# existing key)
data = d[key] # retrieve data at key (raise KeyError if no
# such key)
del d[key] # delete data stored at key (raises KeyError
# if no such key)
flag = key in d # true if the key exists
list = d.keys() # return a list of all existing keys (slow!)
Future versions may change the order in which implementations are
tested for existence, add interfaces to other dbm-like
implementations.
The open function has an optional second argument. This can be 'r',
for read-only access, 'w', for read-write access of an existing
database, 'c' for read-write access to a new or existing database, and
'n' for read-write access to a new database. The default is 'r'.
Note: 'r' and 'w' fail if the database doesn't exist; 'c' creates it
only if it doesn't exist; and 'n' always creates a new database.
"""
__all__ = ['open', 'whichdb', 'error', 'errors']
import io
import os
import struct
import sys
class error(Exception):
pass
_names = ['dbm.bsd', 'dbm.gnu', 'dbm.ndbm', 'dbm.dumb']
_defaultmod = None
_modules = {}
error = (error, IOError)
def open(file, flag = 'r', mode = 0o666):
global _defaultmod
if _defaultmod is None:
for name in _names:
try:
mod = __import__(name, fromlist=['open'])
except ImportError:
continue
if not _defaultmod:
_defaultmod = mod
_modules[name] = mod
if not _defaultmod:
raise ImportError("no dbm clone found; tried %s" % _names)
# guess the type of an existing database
result = whichdb(file)
if result is None:
# db doesn't exist
if 'c' in flag or 'n' in flag:
# file doesn't exist and the new flag was used so use default type
mod = _defaultmod
else:
raise error[0]("need 'c' or 'n' flag to open new db")
elif result == "":
# db type cannot be determined
raise error[0]("db type could not be determined")
elif result not in _modules:
raise error[0]("db type is {0}, but the module is not "
"available".format(result))
else:
mod = _modules[result]
return mod.open(file, flag, mode)
def whichdb(filename):
"""Guess which db package to use to open a db file.
Return values:
- None if the database file can't be read;
- empty string if the file can be read but can't be recognized
- the name of the dbm submodule (e.g. "ndbm" or "gnu") if recognized.
Importing the given module may still fail, and opening the
database using that module may still fail.
"""
# Check for ndbm first -- this has a .pag and a .dir file
try:
f = io.open(filename + ".pag", "rb")
f.close()
# dbm linked with gdbm on OS/2 doesn't have .dir file
if not (ndbm.library == "GNU gdbm" and sys.platform == "os2emx"):
f = io.open(filename + ".dir", "rb")
f.close()
return "dbm.ndbm"
except IOError:
# some dbm emulations based on Berkeley DB generate a .db file
# some do not, but they should be caught by the bsd checks
try:
f = io.open(filename + ".db", "rb")
f.close()
# guarantee we can actually open the file using dbm
# kind of overkill, but since we are dealing with emulations
# it seems like a prudent step
if ndbm is not None:
d = ndbm.open(filename)
d.close()
return "dbm.ndbm"
except IOError:
pass
# Check for dumbdbm next -- this has a .dir and a .dat file
try:
# First check for presence of files
os.stat(filename + ".dat")
size = os.stat(filename + ".dir").st_size
# dumbdbm files with no keys are empty
if size == 0:
return "dbm.dumb"
f = io.open(filename + ".dir", "rb")
try:
if f.read(1) in (b"'", b'"'):
return "dbm.dumb"
finally:
f.close()
except (OSError, IOError):
pass
# See if the file exists, return None if not
try:
f = io.open(filename, "rb")
except IOError:
return None
# Read the start of the file -- the magic number
s16 = f.read(16)
f.close()
s = s16[0:4]
# Return "" if not at least 4 bytes
if len(s) != 4:
return ""
# Convert to 4-byte int in native byte order -- return "" if impossible
try:
(magic,) = struct.unpack("=l", s)
except struct.error:
return ""
# Check for GNU dbm
if magic == 0x13579ace:
return "dbm.gnu"
## Check for old Berkeley db hash file format v2
#if magic in (0x00061561, 0x61150600):
# return "bsddb185" # not supported anymore
# Later versions of Berkeley db hash file have a 12-byte pad in
# front of the file type
try:
(magic,) = struct.unpack("=l", s16[-4:])
except struct.error:
return ""
## Check for BSD hash
#if magic in (0x00061561, 0x61150600):
# return "dbm.bsd"
# Unknown
return ""
if __name__ == "__main__":
for filename in sys.argv[1:]:
print(whichdb(filename) or "UNKNOWN", filename)
|
{
"content_hash": "d64a5320284f2834c84cd8ac3f41e4f2",
"timestamp": "",
"source": "github",
"line_count": 192,
"max_line_length": 78,
"avg_line_length": 31.09375,
"alnum_prop": 0.5829145728643216,
"repo_name": "MalloyPower/parsing-python",
"id": "56555be78f33d1f8669242cc93dd57c5e5964c59",
"size": "5970",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "front-end/testsuite-python-lib/Python-3.1/Lib/dbm/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1963"
},
{
"name": "Lex",
"bytes": "238458"
},
{
"name": "Makefile",
"bytes": "4513"
},
{
"name": "OCaml",
"bytes": "412695"
},
{
"name": "Python",
"bytes": "17319"
},
{
"name": "Rascal",
"bytes": "523063"
},
{
"name": "Yacc",
"bytes": "429659"
}
],
"symlink_target": ""
}
|
import sys
sys.path.insert(1, "../../")
import h2o, tests
def javapredict_smallcat():
# optional parameters
params = {'epochs':100}
print "Parameter list:"
for k,v in zip(params.keys(), params.values()): print "{0}, {1}".format(k,v)
train = h2o.upload_file(h2o.locate("smalldata/iris/setosa_versicolor.csv"))
test = h2o.upload_file(h2o.locate("smalldata/iris/virginica.csv"))
x = [0,1,2,4]
y = 3
tests.javapredict("deeplearning", "numeric", train, test, x, y, **params)
if __name__ == "__main__":
tests.run_test(sys.argv, javapredict_smallcat)
|
{
"content_hash": "b98ecd065b2a4052772c4c84878a1aa6",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 80,
"avg_line_length": 29.45,
"alnum_prop": 0.634974533106961,
"repo_name": "junwucs/h2o-3",
"id": "b71fcd2eefebe93372cac4773780f62035c664ae",
"size": "589",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "h2o-py/tests/testdir_javapredict/pyunit_NOPASS_javapredict_iris_new_categoryDL.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "5090"
},
{
"name": "CSS",
"bytes": "163561"
},
{
"name": "CoffeeScript",
"bytes": "262107"
},
{
"name": "Emacs Lisp",
"bytes": "8927"
},
{
"name": "Groovy",
"bytes": "78"
},
{
"name": "HTML",
"bytes": "147257"
},
{
"name": "Java",
"bytes": "5815537"
},
{
"name": "JavaScript",
"bytes": "38932"
},
{
"name": "Makefile",
"bytes": "34005"
},
{
"name": "Python",
"bytes": "2084348"
},
{
"name": "R",
"bytes": "1818321"
},
{
"name": "Rebol",
"bytes": "7059"
},
{
"name": "Ruby",
"bytes": "3506"
},
{
"name": "Scala",
"bytes": "16336"
},
{
"name": "Shell",
"bytes": "46944"
},
{
"name": "TeX",
"bytes": "583215"
}
],
"symlink_target": ""
}
|
import requests
class BiblesAPI(object):
_BIBLES_API_KEY = ""
_API_URL = "https://bibles.org/v2/"
_BIBLE_VERSION = "ESV"
_LANGUAGE = "eng"
def __init__(self,api_key,bible_version="ESV"):
self._BIBLES_API_KEY = api_key;
self._BIBLE_VERSION = bible_version;
print ("Bibles API Key",self._BIBLES_API_KEY)
@property
def bible_version(self):
return self._BIBLE_VERSION
@bible_version.setter
def bible_version(self,value):
self._BIBLE_VERSION = value
def doRequest(self,url,payload={}):
#payload = {'key1': 'value1', 'key2': 'value2'}
r = requests.get(url, params=payload, auth=(self._BIBLES_API_KEY, 'pass'))
print (r.url,r.headers)
#r.raise_for_status()
return r.json()
#return r.text
def verses(self,book_id,chapter_number):
#GET /chapters/#{version_id}:#{book_id}.#{chapter_number}/verses.js?include_marginalia=true
#https://bibles.org/v2/chapters/eng-KJVA:1Cor.2/verses.js
url = self._API_URL+"chapters/"+self._LANGUAGE+"-"+self._BIBLE_VERSION+":"+book_id+"."+str(chapter_number)+"/verses.js"
payload = {"version":self._LANGUAGE+"-"+self._BIBLE_VERSION,
"include_marginalia":True,
#"q[]":q
}
return self.doRequest(url,payload)
def passages(self,book_name,chapter_number,start_verse,end_verse=None):
# GET /passages.xml?q[]=#{passage_specifier_list}
# GET /passages.xml?q[]=#{passage_specifier_list}&version=#{version_id_list}
# GET /passages.xml?q[]=#{passage_specifier_list}&version=#{version_id_list}&include_marginalia=true
#The passage specifier is in the form "Book+chapters:verses". Multiple specifiers can be included in a comma-separated list.
#Examples: "John+3", "John+3-5", "John+3:12", "John+3:12-15", "John+3,Luke+2".
q = book_name+"+"+str(chapter_number)+":"+str(start_verse)
if(end_verse):
q += "-"+str(end_verse)
url = self._API_URL+"passages.js?q[]="+q #+":"+book_id+"."+chapter_number+"/verses.js"
payload = {"version":self._LANGUAGE+"-"+self._BIBLE_VERSION,
"include_marginalia":True,
#"q[]":q
}
print ("Get Passage",url,payload)
return self.doRequest(url,payload)
def search(self,search_phrase):
url = self._API_URL+"search.js"
payload = {"version":self._LANGUAGE+"-"+self._BIBLE_VERSION,
"include_marginalia":True,
"query":search_phrase
}
print ("Get Passage",url,payload)
return self.doRequest(url,payload)
def votd(self):
#https://labs.bible.org/api/?passage=votd&type=json&version=eng-ESV
url = "https://labs.bible.org/api/"
payload = {"passage":"votd","type":"json","version":"eng-ESV"}
return self.doRequest(url,payload)
|
{
"content_hash": "30f3aec161d491e3d3fd1ebffbf254c6",
"timestamp": "",
"source": "github",
"line_count": 80,
"max_line_length": 133,
"avg_line_length": 37.6625,
"alnum_prop": 0.574510454696316,
"repo_name": "samuelstevens9/apiai-bibles_org",
"id": "0b4b2d0df05ecada55921ea267d5fbf88a10bd76",
"size": "3014",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bibles_apy.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "10116"
}
],
"symlink_target": ""
}
|
import re
import warnings
from django.contrib.auth import get_user_model
from django.core.exceptions import ValidationError
from django.db import models
from django.db.models import Q
from django.db.models.expressions import RawSQL
from django.utils.translation import gettext_lazy
from .abstract_applicationkeyvalue import AbstractApplicationKeyValue
from .abstract_is_admin import AbstractIsAdmin
from devilry.devilry_account.models import User
from .period import Period
from . import period_tag
class BulkCreateFromEmailsResult(object):
"""
Return value of :meth:`.AbstractRelatedUserManager.bulk_create_from_emails`.
.. attribute:: created_relatedusers_queryset
Queryset with all the created related users.
.. attribute:: existing_relateduser_emails_set
Set of the email of related users that was NOT created.
.. attribute:: created_users_queryset
Queryset with all the created users. Warning: this
**is not** the created **related** users, it is the
:class:`devilry.devilry_account.User` objects that was
created.
.. attribute:: existing_user_emails_set
Set of the email of the :class:`~devilry.devilry_account.User` objects
that was NOT created. If all the users already had a User object,
this will include all the email addresses provided to the method.
"""
def __init__(self, modelclass, created_users_queryset, existing_user_emails_set,
created_relatedusers_queryset,
existing_relateduser_emails_set):
self.__modelclass = modelclass
self.created_users_queryset = created_users_queryset
self.existing_user_emails_set = existing_user_emails_set
self.created_relatedusers_queryset = created_relatedusers_queryset
self.existing_relateduser_emails_set = existing_relateduser_emails_set
def new_users_was_created(self):
return self.created_users_queryset.exists()
def new_relatedusers_was_created(self):
return self.created_relatedusers_queryset.exists()
# def get_existing_relatedusers_queryset(self):
# return self.__modelclass.objects.filter(
# user__useremail__email__in=self.existing_relateduser_emails_set).distinct()
class BulkCreateFromUsernamesResult(object):
"""
Return value of :meth:`.AbstractRelatedUserManager.bulk_create_from_usernames`.
.. attribute:: created_relatedusers_queryset
Queryset with all the created related users.
.. attribute:: existing_relateduser_usernames_set
Set of the username of related users that was NOT created.
.. attribute:: created_users_queryset
Queryset with all the created users. Warning: this
**is not** the created **related** users, it is the
:class:`devilry.devilry_account.User` objects that was
created.
.. attribute:: existing_user_usernames_set
Set of the username of the :class:`~devilry.devilry_account.User` objects
that was NOT created. If all the users already had a User object,
this will include all the username addresses provided to the method.
"""
def __init__(self, modelclass, created_users_queryset, existing_user_usernames_set,
created_relatedusers_queryset,
existing_relateduser_usernames_set):
self.__modelclass = modelclass
self.created_users_queryset = created_users_queryset
self.existing_user_usernames_set = existing_user_usernames_set
self.created_relatedusers_queryset = created_relatedusers_queryset
self.existing_relateduser_usernames_set = existing_relateduser_usernames_set
def new_users_was_created(self):
return self.created_users_queryset.exists()
def new_relatedusers_was_created(self):
return self.created_relatedusers_queryset.exists()
# def get_existing_relatedusers_queryset(self):
# return self.__modelclass.objects.filter(
# user__username__username__in=self.existing_relateduser_usernames_set).distinct()
class AbstractRelatedUserManager(models.Manager):
"""
Base class for the managers for related users.
"""
def bulk_create_from_emails(self, period, emails):
"""
Bulk create related student/examiner for all the emails in the given ``emails`` iterator.
Uses :meth:`devilry.devilry_account.models.UserManager.bulk_create_from_emails`
to create any non-existing users.
Raises:
devilry_account.exceptions.IllegalOperationError: If the
``CRADMIN_LEGACY_USE_EMAIL_AUTH_BACKEND``-setting is ``False``.
Returns:
:class:`.BulkCreateFromEmailsResult` object with detailed information about
the created users, created related users, and the users and related users
that was not created.
"""
existing_relateduser_emails_set = set(self.model.objects.filter(
period=period,
user__useremail__email__in=emails).values_list('user__useremail__email', flat=True))
all_relateduser_emails_set = set(emails)
new_relateduser_emails_set = all_relateduser_emails_set.difference(existing_relateduser_emails_set)
created_users_queryset, existing_user_emails_set = get_user_model().objects.bulk_create_from_emails(
new_relateduser_emails_set)
new_relateduser_objects = []
new_relateduser_users_queryset = get_user_model().objects.filter_by_emails(new_relateduser_emails_set)
for user in new_relateduser_users_queryset:
new_relateduser = self.model(period=period, user=user)
new_relateduser_objects.append(new_relateduser)
self.model.objects.bulk_create(new_relateduser_objects)
created_relatedusers_queryset = self.model.objects.filter(
period=period,
user__in=new_relateduser_users_queryset)
return BulkCreateFromEmailsResult(
modelclass=self.model,
created_users_queryset=created_users_queryset,
existing_user_emails_set=existing_user_emails_set,
created_relatedusers_queryset=created_relatedusers_queryset,
existing_relateduser_emails_set=existing_relateduser_emails_set)
def bulk_create_from_usernames(self, period, usernames):
"""
Bulk create related student/examiner for all the usernames in the given ``usernames`` iterator.
Uses :meth:`devilry.devilry_account.models.UserManager.bulk_create_from_usernames`
to create any non-existing users.
Raises:
devilry_account.exceptions.IllegalOperationError: If the
``CRADMIN_LEGACY_USE_EMAIL_AUTH_BACKEND``-setting is ``True``.
Returns:
:class:`.BulkCreateFromUsernamesResult` object with detailed information about
the created users, created related users, and the users and related users
that was not created.
"""
existing_relateduser_usernames_set = set(self.model.objects.filter(
period=period,
user__username__username__in=usernames).values_list('user__username__username', flat=True))
all_relateduser_usernames_set = set(usernames)
new_relateduser_usernames_set = all_relateduser_usernames_set.difference(
existing_relateduser_usernames_set)
created_users_queryset, existing_user_usernames_set = get_user_model().objects \
.bulk_create_from_usernames(
new_relateduser_usernames_set)
new_relateduser_objects = []
new_relateduser_users_queryset = get_user_model().objects \
.filter_by_usernames(new_relateduser_usernames_set)
for user in new_relateduser_users_queryset:
new_relateduser = self.model(period=period, user=user)
new_relateduser_objects.append(new_relateduser)
self.model.objects.bulk_create(new_relateduser_objects)
created_relatedusers_queryset = self.model.objects.filter(
period=period,
user__in=new_relateduser_users_queryset)
return BulkCreateFromUsernamesResult(
modelclass=self.model,
created_users_queryset=created_users_queryset,
existing_user_usernames_set=existing_user_usernames_set,
created_relatedusers_queryset=created_relatedusers_queryset,
existing_relateduser_usernames_set=existing_relateduser_usernames_set)
class RelatedUserBase(models.Model, AbstractIsAdmin):
"""
Base class for :class:`devilry.apps.core.models.RelatedStudent` and
:class:`devilry.apps.core.models.RelatedExaminer`.
"""
#: The period that the user is related to.
period = models.ForeignKey(Period,
verbose_name='Period',
help_text="The period.",
on_delete=models.CASCADE)
#: A User object. Must be unique within this
#: period.
user = models.ForeignKey(User, help_text="The related user.", on_delete=models.CASCADE)
#: Comma-separated list of tags. Each tag is a word with the following
#: letters allowed: a-z and 0-9. Each word is separated by a comma, and no
#: whitespace.
#:
#: .. deprecated:: Since 3.0. Use :class:`.RelatedExaminerSyncSystemTag` and
#:
#: :class:`.RelatedStudentSyncSystemTag` instead.
#:
tags = models.TextField(blank=True, null=True,
help_text="Comma-separated list of tags. Each tag is a word with the following letters "
"allowed: a-z, 0-9, ``_`` and ``-``. Each word is separated by a comma, "
"and no whitespace.")
tags_patt = re.compile('^(?:[a-z0-9_-]+,)*[a-z0-9_-]+$')
#: Automatic anonymous ID for a student/examiner for the entire semester.
automatic_anonymous_id = models.CharField(max_length=255,
blank=True, null=False, default='',
editable=False)
class Meta:
abstract = True
unique_together = ('period', 'user')
app_label = 'core'
def clean(self):
if self.tags and not self.tags_patt.match(self.tags):
raise ValidationError('tags must be a comma-separated list of tags, each tag only containing '
'a-z, 0-9, ``_`` and ``-``.')
def __str__(self):
return '{}#{} on {}'.format(self.__class__.__name__,
self.user.shortname,
self.period.get_path())
class RelatedExaminerManager(AbstractRelatedUserManager):
"""
Manager for :class:`.RelatedExaminer`.
"""
use_for_related_fields = True
class RelatedExaminerQuerySet(models.QuerySet):
"""
QuerySet for :class:`.RelatedExaminer`.
"""
def annotate_with_number_of_groups_on_assignment(self, assignment):
"""
Annotates the queryset with number of :class:`devilry.apps.core.models.AssignmentGroup`
objects where the RelatedExaminer is :class:`devilry.apps.core.models.Examiner` within the given
assignment.
Args:
assignment: A :class:`devilry.apps.core.models.Assignment` object.
"""
return self.annotate(
number_of_groups_on_assignment=models.Count(
models.Case(
models.When(examiner__assignmentgroup__parentnode=assignment,
then=1)
)
)
)
def extra_annotate_with_number_of_candidates_on_assignment(self, assignment):
"""
Annotates the queryset with number of :class:`devilry.apps.core.models.Candidate`
objects within all :class:`devilry.apps.core.models.AssignmentGroup` objects where
the RelatedExaminer is :class:`devilry.apps.core.models.Examiner` within the given
assignment.
Args:
assignment: A :class:`devilry.apps.core.models.Assignment` object.
"""
return self.extra(
select={
'number_of_candidates_on_assignment': """
SELECT COALESCE(SUM(candidate_count), 0)
FROM devilry_dbcache_assignmentgroupcacheddata
INNER JOIN core_assignmentgroup_examiners
ON (core_assignmentgroup_examiners.assignmentgroup_id = devilry_dbcache_assignmentgroupcacheddata.group_id)
WHERE core_assignmentgroup_examiners.relatedexaminer_id = core_relatedexaminer.id
AND
devilry_dbcache_assignmentgroupcacheddata.group_id IN (
SELECT core_assignmentgroup.id
FROM core_assignmentgroup
WHERE core_assignmentgroup.parentnode_id = %s
)
"""
},
select_params=[
assignment.id
]
)
class RelatedExaminer(RelatedUserBase):
""" Related examiner.
Adds no fields to RelatedUserBase.
"""
objects = RelatedExaminerManager.from_queryset(RelatedExaminerQuerySet)()
#: Setting this to ``False`` indicates that the examiner is deleted from the course
#: for this period. All access is removed.
active = models.BooleanField(default=True)
def get_anonymous_name(self):
"""
Get the anonymous name for this RelatedExaminer.
If :obj:`~.RelatedUser.automatic_anonymous_id` is set,
falling back on ``"Anonymous ID missing"``.
Returns:
str: A unicode string with the anonymous name.
"""
if self.automatic_anonymous_id:
return self.automatic_anonymous_id
else:
return gettext_lazy('Automatic anonymous ID missing')
@property
def relatedusertag_set(self):
return self.relatedexaminertag_set
class RelatedStudentQuerySet(models.QuerySet):
"""
QuerySet for :class:`.RelatedStudent`.
"""
def get_userid_to_candidateid_map(self):
"""
Get a dict mapping user ID to candidate ID.
"""
queryset = self.exclude(models.Q(candidate_id='') | models.Q(candidate_id=None))
return dict(queryset.values_list('user_id', 'candidate_id'))
def prefetch_syncsystemtag_objects(self):
"""
Prefetch :class:`.RelatedStudentSyncSystemTag` objects in the
``syncsystemtag_objects`` attribute.
The ``syncsystemtag_objects`` attribute is a ``list`` of
:class:`.RelatedStudentSyncSystemTag` objects ordered by
``tag`` in ascending order.
"""
warnings.warn('deprecated, function up to date but will be refactored', DeprecationWarning)
return self.prefetch_related(
models.Prefetch('periodtag_set',
queryset=period_tag.PeriodTag.objects.order_by('tag'),
to_attr='syncsystemtag_objects'))
def annotate_with_total_grading_points(self, assignment_ids):
return self.annotate(
grade_points_total=RawSQL("""
SELECT COALESCE(SUM(grading_points), 0)
FROM devilry_group_feedbackset
INNER JOIN devilry_dbcache_assignmentgroupcacheddata
ON (devilry_group_feedbackset.id = devilry_dbcache_assignmentgroupcacheddata.last_published_feedbackset_id)
INNER JOIN core_assignmentgroup
ON (core_assignmentgroup.id = devilry_dbcache_assignmentgroupcacheddata.group_id)
INNER JOIN core_candidate
ON (core_assignmentgroup.id = core_candidate.assignment_group_id)
INNER JOIN core_assignment
ON (core_assignment.id = core_assignmentgroup.parentnode_id)
WHERE
core_candidate.relatedstudent_id = core_relatedstudent.id
AND
core_assignment.id IN %s
""", [tuple(assignment_ids)], output_field=models.PositiveIntegerField()))
class RelatedStudentManager(AbstractRelatedUserManager):
"""
Manager for :class:`.RelatedStudent`.
"""
use_for_related_fields = True
class RelatedStudent(RelatedUserBase):
"""
Related student.
"""
objects = RelatedStudentManager.from_queryset(RelatedStudentQuerySet)()
#: Setting this to ``False`` indicates that the student has dropped out
#: or been kicked out of the course for this period.
active = models.BooleanField(default=True)
#: A candidate ID that follows the student through the entire period.
candidate_id = models.CharField(max_length=30, blank=True, null=True)
def get_anonymous_name(self):
"""
Get the anonymous name for this RelatedStudent.
If :obj:`~.RelatedStudent.candidate_id` is set, we use use that,
falling back on :obj:`~.RelatedUser.automatic_anonymous_id`, and
then falling back on ``"Anonymous ID missing"``.
Returns:
str: A unicode string with the anonymous name.
"""
if self.candidate_id:
return self.candidate_id
return self.get_automatic_anonymous_id_with_fallback()
def get_automatic_anonymous_id_with_fallback(self):
"""
Returns:
str: A unicode string with the anonymous name.
"""
if self.automatic_anonymous_id:
return self.automatic_anonymous_id
else:
return gettext_lazy('Automatic anonymous ID missing')
@property
def relatedusertag_set(self):
return self.relatedstudenttag_set
@property
def syncsystemtag_stringlist(self):
"""
A shortcut for getting a list of tag strings from the
``syncsystemtag_objects`` list when the queryset uses
:meth:`.RelatedStudentQuerySet.prefetch_syncsystemtag_objects`.
"""
if not hasattr(self, 'syncsystemtag_objects'):
raise AttributeError('The syncsystemtag_stringlist property requires '
'RelatedStudentQuerySet.prefetch_syncsystemtag_objects().')
return [syncsystemtag.tag for syncsystemtag in self.syncsystemtag_objects]
class RelatedStudentKeyValue(AbstractApplicationKeyValue, AbstractIsAdmin):
""" Key/value pair tied to a specific RelatedStudent. """
relatedstudent = models.ForeignKey(RelatedStudent, on_delete=models.CASCADE)
student_can_read = models.BooleanField(
help_text='Specifies if a student can read the value or not.',
default=False)
class Meta:
unique_together = ('relatedstudent', 'application', 'key')
app_label = 'core'
def __str__(self):
return '{0}: {1}'.format(self.relatedstudent, super(RelatedStudentKeyValue, self).__str__())
|
{
"content_hash": "af6bf7b805b860cbd02025e4130be960",
"timestamp": "",
"source": "github",
"line_count": 470,
"max_line_length": 131,
"avg_line_length": 40.42765957446809,
"alnum_prop": 0.6420714699226356,
"repo_name": "devilry/devilry-django",
"id": "aa585a295f16afa8329f287d07db0b5ef6cdc2e9",
"size": "19027",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "devilry/apps/core/models/relateduser.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "513510"
},
{
"name": "Dockerfile",
"bytes": "211"
},
{
"name": "HTML",
"bytes": "421969"
},
{
"name": "JavaScript",
"bytes": "756713"
},
{
"name": "Less",
"bytes": "166670"
},
{
"name": "PLpgSQL",
"bytes": "397986"
},
{
"name": "Python",
"bytes": "6507968"
},
{
"name": "Shell",
"bytes": "10328"
}
],
"symlink_target": ""
}
|
"""
Datebook month views
"""
import datetime
from django.conf import settings
from django import http
from django.views import generic
from django.views.generic.edit import FormMixin
from django.contrib.auth.models import User
from braces.views import LoginRequiredMixin, PermissionRequiredMixin
from datebook.forms.month import DatebookForm, DatebookNotesForm
from datebook.forms.daymodel import AssignDayModelForm
from datebook.models import Datebook
from datebook.mixins import DatebookCalendarMixin, DatebookCalendarAutoCreateMixin, OwnerOrPermissionRequiredMixin
from datebook.utils import format_seconds_to_clock, get_day_weekno
class DatebookMonthFormView(PermissionRequiredMixin, generic.FormView):
"""
Datebook create form view
"""
model = Datebook
form_class = DatebookForm
template_name = 'datebook/month/form.html'
permission_required = 'datebook.add_datebook'
raise_exception = True
def get_form(self, form_class):
excluded_users = Datebook.objects.all().values('author_id').distinct()
self.available_users = User.objects.all().exclude(pk__in=[v['author_id'] for v in excluded_users]).order_by('username')
if self.available_users.count() == 0:
return None
return super(DatebookMonthFormView, self).get_form(form_class)
def get_form_kwargs(self, **kwargs):
kwargs = super(DatebookMonthFormView, self).get_form_kwargs(**kwargs)
kwargs.update({
'available_users': self.available_users,
})
return kwargs
def form_valid(self, form):
self.object = form.save()
return super(DatebookMonthFormView, self).form_valid(form)
def get_success_url(self):
return self.object.get_absolute_url()
class DatebookMonthGetOrCreateView(DatebookCalendarMixin, OwnerOrPermissionRequiredMixin, generic.View):
"""
Automatically create the datebook if it does not allready exists for the "author+year+month"
kwargs given, then redirect to its page.
If the Datebook allready exists for the given kwargs, raise a "Http404"
"""
permission_required = 'datebook.add_datebook'
raise_exception = True
def get(self, request, *args, **kwargs):
if Datebook.objects.filter(author=self.author, period__year=self.year, period__month=self.month).count()>0:
raise http.Http404
d = self.author.datebook_set.create(period=datetime.date(year=self.year, month=self.month, day=1))
return http.HttpResponseRedirect(d.get_absolute_url())
class DatebookMonthCurrentView(DatebookCalendarAutoCreateMixin, OwnerOrPermissionRequiredMixin, generic.View):
"""
Automatically create the datebook if it does not allready exists for the "author"
kwarg and the current month, then redirect to its page.
If the Datebook allready exists for the given kwargs, directly redirect to it
"""
def get(self, request, *args, **kwargs):
self.get_current_date()
q = Datebook.objects.filter(author=self.author, period__year=self.year, period__month=self.month)
if q.count()>0:
d = q.order_by('period')[0:1][0]
else:
d = self.author.datebook_set.create(period=datetime.date(year=self.year, month=self.month, day=1))
return http.HttpResponseRedirect(d.get_absolute_url())
class DatebookMonthView(LoginRequiredMixin, DatebookCalendarMixin, FormMixin, generic.TemplateView):
"""
Datebook month details view
Get the Calendar for the given year+month then fill it with day entries
Accept POST request for the AssignDayModelForm form that fill days from a day model.
"""
template_name = "datebook/month/calendar.html"
form_class = AssignDayModelForm
def get_calendar(self, day_filters={}):
"""
Where we get the Datebook's calendar and crawl it to compute some
values about its weeks and days
"""
current_day = datetime.date.today()
# Init the calendar object
_cal = super(DatebookMonthView, self).get_calendar()
# Get month weeks structure, removing days that are not month's days (equal to 0)
week_days = [filter(None, item) for item in _cal.monthdayscalendar(self.object.period.year, self.object.period.month)]
weeks_totals = [{'current': False, 'active': False, 'elapsed_seconds':0, 'overtime_seconds':0, 'vacations':0} for i in range(0, len(week_days))]
# Tag the current week if we are on a current month
if current_day.year == self.object.period.year and current_day.month == self.object.period.month:
for i, item in enumerate(weeks_totals, start=0):
if current_day.day in week_days[i]:
item['current'] = True
break
# Calculate total elapsed time for worked days and total vacations
day_entries = self.get_dayentry_list(day_filters)
total_elapsed_seconds = total_overtime_seconds = total_vacation = 0
for item in day_entries:
# Find the day's week number
weekno = get_day_weekno(week_days, item.activity_date.day)
week = weeks_totals[weekno]
# Default value for day's mark used in calendar template for a day equal or after the current day
item.projected = False
# Do not calculate future days (from current day and further)
if current_day <= item.activity_date:
item.projected = True
continue
# Mark the day's week as active (the week have days that are not projections)
else:
weeks_totals[weekno]['active'] = True
# Do not calculate vacations days
if item.vacation:
total_vacation += 1
weeks_totals[weekno]['vacations'] += 1
continue
# Compute totals (for months and weeks)
total_elapsed_seconds += item.get_elapsed_seconds()
total_overtime_seconds += item.get_overtime_seconds()
weeks_totals[weekno]['elapsed_seconds'] += item.get_elapsed_seconds()
weeks_totals[weekno]['overtime_seconds'] += item.get_overtime_seconds()
# Post process week totals for some additional values
for item in weeks_totals:
item['elapsed_time'] = format_seconds_to_clock(item['elapsed_seconds'])
item['overtime_time'] = format_seconds_to_clock(item['overtime_seconds'])
calendar_datas = {
"days": [item.day for item in _cal.itermonthdates(self.object.period.year, self.object.period.month) if item.month == self.object.period.month],
"weekheader": _cal.formatweekheader(),
"weeks_totals": weeks_totals,
"month": _cal.formatmonth(self.object.period.year, self.object.period.month, dayentries=day_entries, current_day=current_day),
"elapsed_seconds": 0,
"elapsed_time": None,
"overtime_seconds": 0,
"overtime_time": None,
"vacations": 0,
}
# Crawl all weeks to calculate month totals
for item in weeks_totals:
calendar_datas['elapsed_seconds'] += item['elapsed_seconds']
calendar_datas['overtime_seconds'] += item['overtime_seconds']
calendar_datas['vacations'] += item['vacations']
calendar_datas['elapsed_time'] = format_seconds_to_clock(calendar_datas['elapsed_seconds'])
calendar_datas['overtime_time'] = format_seconds_to_clock(calendar_datas['overtime_seconds'])
return calendar_datas
def get_day_models(self):
"""
Get and return author's day models
"""
return self.author.daymodel_set.all().order_by('title').values('id', 'title')
def get_context_data(self, **kwargs):
context = super(DatebookMonthView, self).get_context_data(**kwargs)
context.update({
'datebook': self.object,
'daymodels_form': self.form,
'datebook_calendar': self.calendar,
'day_models': self.get_day_models(),
'DATEBOOK_TEXT_MARKUP_RENDER_TEMPLATE': settings.DATEBOOK_TEXT_MARKUP_RENDER_TEMPLATE,
})
return context
def get_form_kwargs(self, **kwargs):
kwargs = super(DatebookMonthView, self).get_form_kwargs(**kwargs)
kwargs.update({
'author': self.author,
'datebook': self.object,
'daychoices': self.calendar['days'],
})
return kwargs
def form_valid(self, form):
"""
If the form is valid, save the associated model.
"""
form.save()
return super(DatebookMonthView, self).form_valid(form)
def get_success_url(self):
return self.object.get_absolute_url()
def get(self, request, *args, **kwargs):
self.object = self.get_datebook({'period__year': self.year, 'period__month': self.month})
self.calendar = self.get_calendar()
form_class = self.get_form_class()
self.form = self.get_form(form_class)
return self.render_to_response(self.get_context_data(**kwargs))
def post(self, request, *args, **kwargs):
self.object = self.get_datebook({'period__year': self.year, 'period__month': self.month})
self.calendar = self.get_calendar()
form_class = self.get_form_class()
self.form = self.get_form(form_class)
if self.form.is_valid():
return self.form_valid(self.form)
else:
return self.form_invalid(self.form)
class DatebookNotesFormView(DatebookCalendarMixin, OwnerOrPermissionRequiredMixin, generic.UpdateView):
"""
Datebook create form view
"""
model = Datebook
form_class = DatebookNotesForm
template_name = 'datebook/month/notes_form.html'
permission_required = 'datebook.change_datebook'
raise_exception = True
def get_object(self):
self.datebook = self.get_datebook({'period__year': self.year, 'period__month': self.month})
self.author = self.datebook.author
return self.datebook
def get_success_url(self):
return self.object.get_absolute_url()
def get_context_data(self, **kwargs):
context = super(DatebookNotesFormView, self).get_context_data(**kwargs)
context.update({
'DATEBOOK_TEXT_FIELD_JS_TEMPLATE': settings.DATEBOOK_TEXT_FIELD_JS_TEMPLATE,
'DATEBOOK_TEXT_MARKUP_RENDER_TEMPLATE': settings.DATEBOOK_TEXT_MARKUP_RENDER_TEMPLATE,
})
return context
|
{
"content_hash": "61c8a12a83bf9f96b6e32428d8184cdd",
"timestamp": "",
"source": "github",
"line_count": 266,
"max_line_length": 156,
"avg_line_length": 40.93984962406015,
"alnum_prop": 0.6319559228650138,
"repo_name": "sveetch/django-datebook",
"id": "2f7e7988ab3965de743acfec1a5fd1eb4b6c1cd1",
"size": "10914",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "datebook/views/month.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "399014"
},
{
"name": "HTML",
"bytes": "28769"
},
{
"name": "JavaScript",
"bytes": "25408"
},
{
"name": "Python",
"bytes": "120935"
},
{
"name": "Ruby",
"bytes": "990"
}
],
"symlink_target": ""
}
|
from datetime import date
from corehq.apps.userreports.specs import EvaluationContext
from custom.champ.tests.utils import TestDataSourceExpressions
from custom.champ.utils import POST_TEST_XMLNS, ACCOMPAGNEMENT_XMLNS, SUIVI_MEDICAL_XMLNS
CHAMP_CAMEROON_DATA_SOURCE = 'champ_cameroon.json'
class TestEnhancedPeerMobilization(TestDataSourceExpressions):
data_source_name = CHAMP_CAMEROON_DATA_SOURCE
def test_champ_cametoon_properties_for_post_test_xmlns(self):
form = {
'id': 'form_id',
'xmlns': POST_TEST_XMLNS,
'domain': 'champ_cameroon',
'form': {
'group': {
'age': 12,
},
'district': 'test district',
'visit_date': '2017-01-15',
'posttest_date': '2017-02-20',
'age_range': '10-15 yrs',
'type_visit': 'first visit',
'date_handshake': '2017-05-03',
'handshake_status': 'status',
'meta': {
'userID': 'user_id',
'timeEnd': '2017-01-31 20:00'
},
'seropostive_group': {
'first_art_date': '2017-02-03',
},
'load': {
'uic': 'test uic',
'first_art_date': '2017-02-03',
'client_type': 'test client',
},
'save': {
'hiv_status': 'positive',
},
'viral_load_group': {
'date_last_vl_test': '2017-01-29',
'undetect_vl': 'yes',
}
}
}
case = {
'district': 'test district',
'hiv_test_date': '2017-03-15',
}
user = {
'id': 'user_id',
'domain': 'champ_cameroon',
'location_id': 'test_location_id'
}
self.database.mock_docs = {
'user_id': user
}
xmlns = self.get_expression('xmlns', 'string')
uic = self.get_expression('uic', 'string')
district = self.get_expression('district', 'string')
hiv_test_date = self.get_expression('hiv_test_date', 'date')
age_range = self.get_expression('age_range', 'string')
posttest_date = self.get_expression('posttest_date', 'date')
date_handshake = self.get_expression('date_handshake', 'date')
first_art_date = self.get_expression('first_art_date', 'string')
date_last_vl_test = self.get_expression('date_last_vl_test', 'string')
client_type = self.get_expression('client_type', 'string')
hiv_status = self.get_expression('hiv_status', 'string')
handshake_status = self.get_expression('handshake_status', 'string')
undetect_vl = self.get_expression('undetect_vl', 'string')
form_completion = self.get_expression('form_completion', 'string')
user_id = self.get_expression('user_id', 'string')
htc_month = self.get_expression('htc_month', 'date')
care_new_month = self.get_expression('care_new_month', 'date')
organization = self.get_expression('organization', 'string')
self.assertEqual(
xmlns(form, EvaluationContext(form, 0)), POST_TEST_XMLNS
)
self.assertEqual(
district(form, EvaluationContext(case, 0)), 'test district'
)
self.assertEqual(
uic(form, EvaluationContext(form, 0)), 'test uic'
)
self.assertEqual(
age_range(form, EvaluationContext(form, 0)), '10-15 yrs'
)
self.assertEqual(
date_handshake(form, EvaluationContext(form, 0)), '2017-05-03'
)
self.assertEqual(
first_art_date(form, EvaluationContext(form, 0)), '2017-02-03'
)
self.assertEqual(
date_last_vl_test(form, EvaluationContext(form, 0)), '2017-01-29'
)
self.assertEqual(
client_type(form, EvaluationContext(case, 0)), 'test client'
)
self.assertEqual(
posttest_date(form, EvaluationContext(form, 0)), '2017-02-20'
)
self.assertEqual(
hiv_status(form, EvaluationContext(form, 0)), 'positive'
)
self.assertEqual(
handshake_status(form, EvaluationContext(form, 0)), 'status'
)
self.assertEqual(
undetect_vl(form, EvaluationContext(form, 0)), 'yes'
)
self.assertEqual(
form_completion(form, EvaluationContext(form, 0)), '2017-01-31 20:00'
)
self.assertEqual(
user_id(form, EvaluationContext(form, 0)), 'user_id'
)
self.assertEqual(
htc_month(form, EvaluationContext(form, 0)), date(2017, 2, 1)
)
self.assertEqual(
care_new_month(form, EvaluationContext(form, 0)), date(2017, 5, 1)
)
self.assertEqual(
organization(form, EvaluationContext(form, 0)), 'test_location_id'
)
self.assertEqual(
hiv_test_date(form, EvaluationContext(case, 0)), '2017-03-15'
)
def test_champ_cametoon_properties_for_accompagnement_xmlns(self):
form = {
'id': 'form_id',
'xmlns': ACCOMPAGNEMENT_XMLNS,
'domain': 'champ_cameroon',
'form': {
'group': {
'age': 12,
},
'district': 'test district',
'visit_date': '2017-01-15',
'posttest_date': '2017-02-20',
'type_visit': 'first visit',
'date_handshake': '2017-05-03',
'handshake_status': 'status',
'meta': {
'userID': 'user_id',
'timeEnd': '2017-01-31 20:00'
},
'seropostive_group': {
'first_art_date': '2017-02-03',
},
'viral_load_group': {
'date_last_vl_test': '2017-01-29',
'undetect_vl': 'yes',
}
}
}
case = {
'district': 'test district',
'hiv_test_date': '2017-03-15',
'name': 'test uic',
'age_range': '10-15 yrs',
'client_type': 'test client',
'hiv_status': 'positive',
}
user = {
'id': 'user_id',
'domain': 'champ_cameroon',
'location_id': 'test_location_id'
}
self.database.mock_docs = {
'user_id': user
}
xmlns = self.get_expression('xmlns', 'string')
uic = self.get_expression('uic', 'string')
district = self.get_expression('district', 'string')
hiv_test_date = self.get_expression('hiv_test_date', 'date')
age_range = self.get_expression('age_range', 'string')
posttest_date = self.get_expression('posttest_date', 'date')
date_handshake = self.get_expression('date_handshake', 'date')
first_art_date = self.get_expression('first_art_date', 'string')
date_last_vl_test = self.get_expression('date_last_vl_test', 'string')
client_type = self.get_expression('client_type', 'string')
hiv_status = self.get_expression('hiv_status', 'string')
handshake_status = self.get_expression('handshake_status', 'string')
undetect_vl = self.get_expression('undetect_vl', 'string')
form_completion = self.get_expression('form_completion', 'string')
user_id = self.get_expression('user_id', 'string')
htc_month = self.get_expression('htc_month', 'date')
care_new_month = self.get_expression('care_new_month', 'date')
organization = self.get_expression('organization', 'string')
self.assertEqual(
xmlns(form, EvaluationContext(form, 0)), ACCOMPAGNEMENT_XMLNS
)
self.assertEqual(
district(form, EvaluationContext(case, 0)), 'test district'
)
self.assertEqual(
uic(form, EvaluationContext(case, 0)), 'test uic'
)
self.assertEqual(
age_range(form, EvaluationContext(case, 0)), '10-15 yrs'
)
self.assertEqual(
date_handshake(form, EvaluationContext(form, 0)), '2017-05-03'
)
self.assertEqual(
first_art_date(form, EvaluationContext(form, 0)), '2017-02-03'
)
self.assertEqual(
date_last_vl_test(form, EvaluationContext(form, 0)), '2017-01-29'
)
self.assertEqual(
client_type(form, EvaluationContext(case, 0)), 'test client'
)
self.assertEqual(
posttest_date(form, EvaluationContext(form, 0)), '2017-02-20'
)
self.assertEqual(
hiv_status(form, EvaluationContext(case, 0)), 'positive'
)
self.assertEqual(
handshake_status(form, EvaluationContext(form, 0)), 'status'
)
self.assertEqual(
undetect_vl(form, EvaluationContext(form, 0)), 'yes'
)
self.assertEqual(
form_completion(form, EvaluationContext(form, 0)), '2017-01-31 20:00'
)
self.assertEqual(
user_id(form, EvaluationContext(form, 0)), 'user_id'
)
self.assertEqual(
htc_month(form, EvaluationContext(form, 0)), date(2017, 2, 1)
)
self.assertEqual(
care_new_month(form, EvaluationContext(form, 0)), date(2017, 5, 1)
)
self.assertEqual(
organization(form, EvaluationContext(form, 0)), 'test_location_id'
)
self.assertEqual(
hiv_test_date(form, EvaluationContext(case, 0)), '2017-03-15'
)
def test_champ_cametoon_properties_for_survi_medical_xmlns(self):
form = {
'id': 'form_id',
'xmlns': SUIVI_MEDICAL_XMLNS,
'domain': 'champ_cameroon',
'form': {
'group': {
'age': 12,
},
'district': 'test district',
'visit_date': '2017-01-15',
'posttest_date': '2017-02-20',
'type_visit': 'first visit',
'age_range': '10-15 yrs',
'date_handshake': '2017-05-03',
'handshake_status': 'status',
'meta': {
'userID': 'user_id',
'timeEnd': '2017-01-31 20:00'
},
'seropostive_group': {
'first_art_date': '2017-02-03',
},
'load': {
'client_type': 'test client',
'hiv_status': 'positive',
},
'viral_load_group': {
'date_last_vl_test': '2017-01-29',
'undetect_vl': 'yes',
}
}
}
case = {
'district': 'test district',
'hiv_test_date': '2017-03-15',
'name': 'test uic',
}
user = {
'id': 'user_id',
'domain': 'champ_cameroon',
'location_id': 'test_location_id'
}
self.database.mock_docs = {
'user_id': user
}
xmlns = self.get_expression('xmlns', 'string')
uic = self.get_expression('uic', 'string')
district = self.get_expression('district', 'string')
hiv_test_date = self.get_expression('hiv_test_date', 'date')
age_range = self.get_expression('age_range', 'string')
posttest_date = self.get_expression('posttest_date', 'date')
date_handshake = self.get_expression('date_handshake', 'date')
first_art_date = self.get_expression('first_art_date', 'string')
date_last_vl_test = self.get_expression('date_last_vl_test', 'string')
client_type = self.get_expression('client_type', 'string')
hiv_status = self.get_expression('hiv_status', 'string')
handshake_status = self.get_expression('handshake_status', 'string')
undetect_vl = self.get_expression('undetect_vl', 'string')
form_completion = self.get_expression('form_completion', 'string')
user_id = self.get_expression('user_id', 'string')
htc_month = self.get_expression('htc_month', 'date')
care_new_month = self.get_expression('care_new_month', 'date')
organization = self.get_expression('organization', 'string')
self.assertEqual(
xmlns(form, EvaluationContext(form, 0)), SUIVI_MEDICAL_XMLNS
)
self.assertEqual(
district(form, EvaluationContext(case, 0)), 'test district'
)
self.assertEqual(
uic(form, EvaluationContext(case, 0)), 'test uic'
)
self.assertEqual(
age_range(form, EvaluationContext(form, 0)), '10-15 yrs'
)
self.assertEqual(
date_handshake(form, EvaluationContext(form, 0)), '2017-05-03'
)
self.assertEqual(
first_art_date(form, EvaluationContext(form, 0)), '2017-02-03'
)
self.assertEqual(
date_last_vl_test(form, EvaluationContext(form, 0)), '2017-01-29'
)
self.assertEqual(
client_type(form, EvaluationContext(case, 0)), 'test client'
)
self.assertEqual(
posttest_date(form, EvaluationContext(form, 0)), '2017-02-20'
)
self.assertEqual(
hiv_status(form, EvaluationContext(case, 0)), 'positive'
)
self.assertEqual(
handshake_status(form, EvaluationContext(form, 0)), 'status'
)
self.assertEqual(
undetect_vl(form, EvaluationContext(form, 0)), 'yes'
)
self.assertEqual(
form_completion(form, EvaluationContext(form, 0)), '2017-01-31 20:00'
)
self.assertEqual(
user_id(form, EvaluationContext(form, 0)), 'user_id'
)
self.assertEqual(
htc_month(form, EvaluationContext(form, 0)), date(2017, 2, 1)
)
self.assertEqual(
care_new_month(form, EvaluationContext(form, 0)), date(2017, 5, 1)
)
self.assertEqual(
organization(form, EvaluationContext(form, 0)), 'test_location_id'
)
self.assertEqual(
hiv_test_date(form, EvaluationContext(case, 0)), '2017-03-15'
)
|
{
"content_hash": "c6d46fb8e74d5c5b310b8905637a7393",
"timestamp": "",
"source": "github",
"line_count": 385,
"max_line_length": 89,
"avg_line_length": 37.79220779220779,
"alnum_prop": 0.52,
"repo_name": "dimagi/commcare-hq",
"id": "7fadbd355c9888fc01869f17f77e2eefbdf0a9b7",
"size": "14550",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "custom/champ/tests/test_champ_cameroon.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "82928"
},
{
"name": "Dockerfile",
"bytes": "2341"
},
{
"name": "HTML",
"bytes": "2589268"
},
{
"name": "JavaScript",
"bytes": "5889543"
},
{
"name": "Jinja",
"bytes": "3693"
},
{
"name": "Less",
"bytes": "176180"
},
{
"name": "Makefile",
"bytes": "1622"
},
{
"name": "PHP",
"bytes": "2232"
},
{
"name": "PLpgSQL",
"bytes": "66704"
},
{
"name": "Python",
"bytes": "21779773"
},
{
"name": "Roff",
"bytes": "150"
},
{
"name": "Shell",
"bytes": "67473"
}
],
"symlink_target": ""
}
|
import os, logging
from io import StringIO
from threading import Thread
from yapsy.PluginManager import PluginManager
from yapsy.IPlugin import IPlugin
from yapsy.PluginInfo import PluginInfo
import BasePlugin
from Logger import Logger
class DpxPluginManager(PluginManager, Logger):
def __init__(self, **kwargs):
kwargs['plugin_info_ext'] = 'dpx-plugin'
PluginManager.__init__(self, **kwargs)
Logger.__init__(self) #Compromise
self.setPluginPlaces(['plugins'])
self.setCategoriesFilter({
'Parse': BasePlugin.ParserPlugin,
'Draw': BasePlugin.DrawPlugin
})
self.debug('Collecting / firing plugins')
self.collectPlugins()
plugins = self.getAllPlugins()
names = [x.name for x in plugins]
categories = self.getCategories()
if len(plugins) > 0:
self.debug('Collected plugins: {0}'.format(names))
else:
self.error('No plugins were collected!', 'ouch')
for p in plugins:
self.activatePluginByName(p.name, p.category)
class YetAnotherPluginManager(PluginManager):
"""This class manages plugin's availability and creates plugin thread.
Original implementation:
"Yapsy: a simple hack to get rid of the plugin info file"
http://stackoverflow.com/questions/8642146/how-to-define-category-in-yapsy-plugin
"""
def __init__(self,
categories_filter={"Default":IPlugin},
directories_list=None,
plugin_info_ext="plugin.py"):
"""
Initialize the mapping of the categories and set the list of
directories where plugins may be. This can also be set by
direct call the methods:
- ``setCategoriesFilter`` for ``categories_filter``
- ``setPluginPlaces`` for ``directories_list``
- ``setPluginInfoExtension`` for ``plugin_info_ext``
You may look at these function's documentation for the meaning
of each corresponding arguments.
This hack just assumes that the plugin has an extension ".plugin.py"
(or ".plugin" for directory, but I did not test it).
"""
self.setPluginInfoClass(PluginInfo)
self.setCategoriesFilter(categories_filter)
self.setPluginPlaces(directories_list)
self.setPluginInfoExtension(plugin_info_ext)
def _gatherCorePluginInfo(self, directory, filename):
"""
Gather the core information (name, and module to be loaded)
about a plugin described by it's info file (found at
'directory/filename').
Return an instance of ``self.plugin_info_cls`` and the
config_parser used to gather the core data *in a tuple*, if the
required info could be localised, else return ``(None,None)``.
.. note:: This is supposed to be used internally by subclasses
and decorators.
"""
# now we can consider the file as a serious candidate
candidate_infofile = os.path.join(directory,filename)
print(candidate_infofile)
# My hack : just create a StringIO file with basic plugin info
_fname = filename.rstrip(".py")
_file = StringIO()
_file.write("""[Core]
Name = %s
Module = %s
""" % (_fname, _fname))
_file.seek(0)
# parse the information file to get info about the plugin
name,moduleName,config_parser = self._getPluginNameAndModuleFromStream(_file, candidate_infofile)
print(name, moduleName, config_parser)
if (name,moduleName,config_parser) == (None,None,None):
return (None,None)
# start collecting essential info
plugin_info = self._plugin_info_cls(name,os.path.join(directory,moduleName))
return (plugin_info,config_parser)
|
{
"content_hash": "d1567e0ce9e8d14b0e5068a604716118",
"timestamp": "",
"source": "github",
"line_count": 104,
"max_line_length": 105,
"avg_line_length": 36.99038461538461,
"alnum_prop": 0.6399792045749935,
"repo_name": "puhitaku/digital-pen-experiment",
"id": "f2c616360fee1652b5abc319d5d58a60a55f6230",
"size": "3847",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "PluginManager.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "18133"
}
],
"symlink_target": ""
}
|
"""Unit tests for contextlib.py, and other context managers."""
import io
import sys
import tempfile
import unittest
from contextlib import * # Tests __all__
from test import support
try:
import threading
except ImportError:
threading = None
class ContextManagerTestCase(unittest.TestCase):
def test_contextmanager_plain(self):
state = []
@contextmanager
def woohoo():
state.append(1)
yield 42
state.append(999)
with woohoo() as x:
self.assertEqual(state, [1])
self.assertEqual(x, 42)
state.append(x)
self.assertEqual(state, [1, 42, 999])
def test_contextmanager_finally(self):
state = []
@contextmanager
def woohoo():
state.append(1)
try:
yield 42
finally:
state.append(999)
with self.assertRaises(ZeroDivisionError):
with woohoo() as x:
self.assertEqual(state, [1])
self.assertEqual(x, 42)
state.append(x)
raise ZeroDivisionError()
self.assertEqual(state, [1, 42, 999])
def test_contextmanager_no_reraise(self):
@contextmanager
def whee():
yield
ctx = whee()
ctx.__enter__()
# Calling __exit__ should not result in an exception
self.assertFalse(ctx.__exit__(TypeError, TypeError("foo"), None))
def test_contextmanager_trap_yield_after_throw(self):
@contextmanager
def whoo():
try:
yield
except:
yield
ctx = whoo()
ctx.__enter__()
self.assertRaises(
RuntimeError, ctx.__exit__, TypeError, TypeError("foo"), None
)
def test_contextmanager_except(self):
state = []
@contextmanager
def woohoo():
state.append(1)
try:
yield 42
except ZeroDivisionError as e:
state.append(e.args[0])
self.assertEqual(state, [1, 42, 999])
with woohoo() as x:
self.assertEqual(state, [1])
self.assertEqual(x, 42)
state.append(x)
raise ZeroDivisionError(999)
self.assertEqual(state, [1, 42, 999])
def _create_contextmanager_attribs(self):
def attribs(**kw):
def decorate(func):
for k,v in kw.items():
setattr(func,k,v)
return func
return decorate
@contextmanager
@attribs(foo='bar')
def baz(spam):
"""Whee!"""
return baz
def test_contextmanager_attribs(self):
baz = self._create_contextmanager_attribs()
self.assertEqual(baz.__name__,'baz')
self.assertEqual(baz.foo, 'bar')
@support.requires_docstrings
def test_contextmanager_doc_attrib(self):
baz = self._create_contextmanager_attribs()
self.assertEqual(baz.__doc__, "Whee!")
@support.requires_docstrings
def test_instance_docstring_given_cm_docstring(self):
baz = self._create_contextmanager_attribs()(None)
self.assertEqual(baz.__doc__, "Whee!")
class ClosingTestCase(unittest.TestCase):
@support.requires_docstrings
def test_instance_docs(self):
# Issue 19330: ensure context manager instances have good docstrings
cm_docstring = closing.__doc__
obj = closing(None)
self.assertEqual(obj.__doc__, cm_docstring)
def test_closing(self):
state = []
class C:
def close(self):
state.append(1)
x = C()
self.assertEqual(state, [])
with closing(x) as y:
self.assertEqual(x, y)
self.assertEqual(state, [1])
def test_closing_error(self):
state = []
class C:
def close(self):
state.append(1)
x = C()
self.assertEqual(state, [])
with self.assertRaises(ZeroDivisionError):
with closing(x) as y:
self.assertEqual(x, y)
1 / 0
self.assertEqual(state, [1])
class FileContextTestCase(unittest.TestCase):
def testWithOpen(self):
tfn = tempfile.mktemp()
try:
f = None
with open(tfn, "w") as f:
self.assertFalse(f.closed)
f.write("Booh\n")
self.assertTrue(f.closed)
f = None
with self.assertRaises(ZeroDivisionError):
with open(tfn, "r") as f:
self.assertFalse(f.closed)
self.assertEqual(f.read(), "Booh\n")
1 / 0
self.assertTrue(f.closed)
finally:
support.unlink(tfn)
@unittest.skipUnless(threading, 'Threading required for this test.')
class LockContextTestCase(unittest.TestCase):
def boilerPlate(self, lock, locked):
self.assertFalse(locked())
with lock:
self.assertTrue(locked())
self.assertFalse(locked())
with self.assertRaises(ZeroDivisionError):
with lock:
self.assertTrue(locked())
1 / 0
self.assertFalse(locked())
def testWithLock(self):
lock = threading.Lock()
self.boilerPlate(lock, lock.locked)
def testWithRLock(self):
lock = threading.RLock()
self.boilerPlate(lock, lock._is_owned)
def testWithCondition(self):
lock = threading.Condition()
def locked():
return lock._is_owned()
self.boilerPlate(lock, locked)
def testWithSemaphore(self):
lock = threading.Semaphore()
def locked():
if lock.acquire(False):
lock.release()
return False
else:
return True
self.boilerPlate(lock, locked)
def testWithBoundedSemaphore(self):
lock = threading.BoundedSemaphore()
def locked():
if lock.acquire(False):
lock.release()
return False
else:
return True
self.boilerPlate(lock, locked)
class mycontext(ContextDecorator):
"""Example decoration-compatible context manager for testing"""
started = False
exc = None
catch = False
def __enter__(self):
self.started = True
return self
def __exit__(self, *exc):
self.exc = exc
return self.catch
class TestContextDecorator(unittest.TestCase):
@support.requires_docstrings
def test_instance_docs(self):
# Issue 19330: ensure context manager instances have good docstrings
cm_docstring = mycontext.__doc__
obj = mycontext()
self.assertEqual(obj.__doc__, cm_docstring)
def test_contextdecorator(self):
context = mycontext()
with context as result:
self.assertIs(result, context)
self.assertTrue(context.started)
self.assertEqual(context.exc, (None, None, None))
def test_contextdecorator_with_exception(self):
context = mycontext()
with self.assertRaisesRegex(NameError, 'foo'):
with context:
raise NameError('foo')
self.assertIsNotNone(context.exc)
self.assertIs(context.exc[0], NameError)
context = mycontext()
context.catch = True
with context:
raise NameError('foo')
self.assertIsNotNone(context.exc)
self.assertIs(context.exc[0], NameError)
def test_decorator(self):
context = mycontext()
@context
def test():
self.assertIsNone(context.exc)
self.assertTrue(context.started)
test()
self.assertEqual(context.exc, (None, None, None))
def test_decorator_with_exception(self):
context = mycontext()
@context
def test():
self.assertIsNone(context.exc)
self.assertTrue(context.started)
raise NameError('foo')
with self.assertRaisesRegex(NameError, 'foo'):
test()
self.assertIsNotNone(context.exc)
self.assertIs(context.exc[0], NameError)
def test_decorating_method(self):
context = mycontext()
class Test(object):
@context
def method(self, a, b, c=None):
self.a = a
self.b = b
self.c = c
# these tests are for argument passing when used as a decorator
test = Test()
test.method(1, 2)
self.assertEqual(test.a, 1)
self.assertEqual(test.b, 2)
self.assertEqual(test.c, None)
test = Test()
test.method('a', 'b', 'c')
self.assertEqual(test.a, 'a')
self.assertEqual(test.b, 'b')
self.assertEqual(test.c, 'c')
test = Test()
test.method(a=1, b=2)
self.assertEqual(test.a, 1)
self.assertEqual(test.b, 2)
def test_typo_enter(self):
class mycontext(ContextDecorator):
def __unter__(self):
pass
def __exit__(self, *exc):
pass
with self.assertRaises(AttributeError):
with mycontext():
pass
def test_typo_exit(self):
class mycontext(ContextDecorator):
def __enter__(self):
pass
def __uxit__(self, *exc):
pass
with self.assertRaises(AttributeError):
with mycontext():
pass
def test_contextdecorator_as_mixin(self):
class somecontext(object):
started = False
exc = None
def __enter__(self):
self.started = True
return self
def __exit__(self, *exc):
self.exc = exc
class mycontext(somecontext, ContextDecorator):
pass
context = mycontext()
@context
def test():
self.assertIsNone(context.exc)
self.assertTrue(context.started)
test()
self.assertEqual(context.exc, (None, None, None))
def test_contextmanager_as_decorator(self):
@contextmanager
def woohoo(y):
state.append(y)
yield
state.append(999)
state = []
@woohoo(1)
def test(x):
self.assertEqual(state, [1])
state.append(x)
test('something')
self.assertEqual(state, [1, 'something', 999])
# Issue #11647: Ensure the decorated function is 'reusable'
state = []
test('something else')
self.assertEqual(state, [1, 'something else', 999])
class TestExitStack(unittest.TestCase):
@support.requires_docstrings
def test_instance_docs(self):
# Issue 19330: ensure context manager instances have good docstrings
cm_docstring = ExitStack.__doc__
obj = ExitStack()
self.assertEqual(obj.__doc__, cm_docstring)
def test_no_resources(self):
with ExitStack():
pass
def test_callback(self):
expected = [
((), {}),
((1,), {}),
((1,2), {}),
((), dict(example=1)),
((1,), dict(example=1)),
((1,2), dict(example=1)),
]
result = []
def _exit(*args, **kwds):
"""Test metadata propagation"""
result.append((args, kwds))
with ExitStack() as stack:
for args, kwds in reversed(expected):
if args and kwds:
f = stack.callback(_exit, *args, **kwds)
elif args:
f = stack.callback(_exit, *args)
elif kwds:
f = stack.callback(_exit, **kwds)
else:
f = stack.callback(_exit)
self.assertIs(f, _exit)
for wrapper in stack._exit_callbacks:
self.assertIs(wrapper.__wrapped__, _exit)
self.assertNotEqual(wrapper.__name__, _exit.__name__)
self.assertIsNone(wrapper.__doc__, _exit.__doc__)
self.assertEqual(result, expected)
def test_push(self):
exc_raised = ZeroDivisionError
def _expect_exc(exc_type, exc, exc_tb):
self.assertIs(exc_type, exc_raised)
def _suppress_exc(*exc_details):
return True
def _expect_ok(exc_type, exc, exc_tb):
self.assertIsNone(exc_type)
self.assertIsNone(exc)
self.assertIsNone(exc_tb)
class ExitCM(object):
def __init__(self, check_exc):
self.check_exc = check_exc
def __enter__(self):
self.fail("Should not be called!")
def __exit__(self, *exc_details):
self.check_exc(*exc_details)
with ExitStack() as stack:
stack.push(_expect_ok)
self.assertIs(stack._exit_callbacks[-1], _expect_ok)
cm = ExitCM(_expect_ok)
stack.push(cm)
self.assertIs(stack._exit_callbacks[-1].__self__, cm)
stack.push(_suppress_exc)
self.assertIs(stack._exit_callbacks[-1], _suppress_exc)
cm = ExitCM(_expect_exc)
stack.push(cm)
self.assertIs(stack._exit_callbacks[-1].__self__, cm)
stack.push(_expect_exc)
self.assertIs(stack._exit_callbacks[-1], _expect_exc)
stack.push(_expect_exc)
self.assertIs(stack._exit_callbacks[-1], _expect_exc)
1/0
def test_enter_context(self):
class TestCM(object):
def __enter__(self):
result.append(1)
def __exit__(self, *exc_details):
result.append(3)
result = []
cm = TestCM()
with ExitStack() as stack:
@stack.callback # Registered first => cleaned up last
def _exit():
result.append(4)
self.assertIsNotNone(_exit)
stack.enter_context(cm)
self.assertIs(stack._exit_callbacks[-1].__self__, cm)
result.append(2)
self.assertEqual(result, [1, 2, 3, 4])
def test_close(self):
result = []
with ExitStack() as stack:
@stack.callback
def _exit():
result.append(1)
self.assertIsNotNone(_exit)
stack.close()
result.append(2)
self.assertEqual(result, [1, 2])
def test_pop_all(self):
result = []
with ExitStack() as stack:
@stack.callback
def _exit():
result.append(3)
self.assertIsNotNone(_exit)
new_stack = stack.pop_all()
result.append(1)
result.append(2)
new_stack.close()
self.assertEqual(result, [1, 2, 3])
def test_exit_raise(self):
with self.assertRaises(ZeroDivisionError):
with ExitStack() as stack:
stack.push(lambda *exc: False)
1/0
def test_exit_suppress(self):
with ExitStack() as stack:
stack.push(lambda *exc: True)
1/0
def test_exit_exception_chaining_reference(self):
# Sanity check to make sure that ExitStack chaining matches
# actual nested with statements
class RaiseExc:
def __init__(self, exc):
self.exc = exc
def __enter__(self):
return self
def __exit__(self, *exc_details):
raise self.exc
class RaiseExcWithContext:
def __init__(self, outer, inner):
self.outer = outer
self.inner = inner
def __enter__(self):
return self
def __exit__(self, *exc_details):
try:
raise self.inner
except:
raise self.outer
class SuppressExc:
def __enter__(self):
return self
def __exit__(self, *exc_details):
type(self).saved_details = exc_details
return True
try:
with RaiseExc(IndexError):
with RaiseExcWithContext(KeyError, AttributeError):
with SuppressExc():
with RaiseExc(ValueError):
1 / 0
except IndexError as exc:
self.assertIsInstance(exc.__context__, KeyError)
self.assertIsInstance(exc.__context__.__context__, AttributeError)
# Inner exceptions were suppressed
self.assertIsNone(exc.__context__.__context__.__context__)
else:
self.fail("Expected IndexError, but no exception was raised")
# Check the inner exceptions
inner_exc = SuppressExc.saved_details[1]
self.assertIsInstance(inner_exc, ValueError)
self.assertIsInstance(inner_exc.__context__, ZeroDivisionError)
def test_exit_exception_chaining(self):
# Ensure exception chaining matches the reference behaviour
def raise_exc(exc):
raise exc
saved_details = None
def suppress_exc(*exc_details):
nonlocal saved_details
saved_details = exc_details
return True
try:
with ExitStack() as stack:
stack.callback(raise_exc, IndexError)
stack.callback(raise_exc, KeyError)
stack.callback(raise_exc, AttributeError)
stack.push(suppress_exc)
stack.callback(raise_exc, ValueError)
1 / 0
except IndexError as exc:
self.assertIsInstance(exc.__context__, KeyError)
self.assertIsInstance(exc.__context__.__context__, AttributeError)
# Inner exceptions were suppressed
self.assertIsNone(exc.__context__.__context__.__context__)
else:
self.fail("Expected IndexError, but no exception was raised")
# Check the inner exceptions
inner_exc = saved_details[1]
self.assertIsInstance(inner_exc, ValueError)
self.assertIsInstance(inner_exc.__context__, ZeroDivisionError)
def test_exit_exception_non_suppressing(self):
# http://bugs.python.org/issue19092
def raise_exc(exc):
raise exc
def suppress_exc(*exc_details):
return True
try:
with ExitStack() as stack:
stack.callback(lambda: None)
stack.callback(raise_exc, IndexError)
except Exception as exc:
self.assertIsInstance(exc, IndexError)
else:
self.fail("Expected IndexError, but no exception was raised")
try:
with ExitStack() as stack:
stack.callback(raise_exc, KeyError)
stack.push(suppress_exc)
stack.callback(raise_exc, IndexError)
except Exception as exc:
self.assertIsInstance(exc, KeyError)
else:
self.fail("Expected KeyError, but no exception was raised")
def test_exit_exception_with_correct_context(self):
# http://bugs.python.org/issue20317
@contextmanager
def gets_the_context_right(exc):
try:
yield
finally:
raise exc
exc1 = Exception(1)
exc2 = Exception(2)
exc3 = Exception(3)
exc4 = Exception(4)
# The contextmanager already fixes the context, so prior to the
# fix, ExitStack would try to fix it *again* and get into an
# infinite self-referential loop
try:
with ExitStack() as stack:
stack.enter_context(gets_the_context_right(exc4))
stack.enter_context(gets_the_context_right(exc3))
stack.enter_context(gets_the_context_right(exc2))
raise exc1
except Exception as exc:
self.assertIs(exc, exc4)
self.assertIs(exc.__context__, exc3)
self.assertIs(exc.__context__.__context__, exc2)
self.assertIs(exc.__context__.__context__.__context__, exc1)
self.assertIsNone(
exc.__context__.__context__.__context__.__context__)
def test_exit_exception_with_existing_context(self):
# Addresses a lack of test coverage discovered after checking in a
# fix for issue 20317 that still contained debugging code.
def raise_nested(inner_exc, outer_exc):
try:
raise inner_exc
finally:
raise outer_exc
exc1 = Exception(1)
exc2 = Exception(2)
exc3 = Exception(3)
exc4 = Exception(4)
exc5 = Exception(5)
try:
with ExitStack() as stack:
stack.callback(raise_nested, exc4, exc5)
stack.callback(raise_nested, exc2, exc3)
raise exc1
except Exception as exc:
self.assertIs(exc, exc5)
self.assertIs(exc.__context__, exc4)
self.assertIs(exc.__context__.__context__, exc3)
self.assertIs(exc.__context__.__context__.__context__, exc2)
self.assertIs(
exc.__context__.__context__.__context__.__context__, exc1)
self.assertIsNone(
exc.__context__.__context__.__context__.__context__.__context__)
def test_body_exception_suppress(self):
def suppress_exc(*exc_details):
return True
try:
with ExitStack() as stack:
stack.push(suppress_exc)
1/0
except IndexError as exc:
self.fail("Expected no exception, got IndexError")
def test_exit_exception_chaining_suppress(self):
with ExitStack() as stack:
stack.push(lambda *exc: True)
stack.push(lambda *exc: 1/0)
stack.push(lambda *exc: {}[1])
def test_excessive_nesting(self):
# The original implementation would die with RecursionError here
with ExitStack() as stack:
for i in range(10000):
stack.callback(int)
def test_instance_bypass(self):
class Example(object): pass
cm = Example()
cm.__exit__ = object()
stack = ExitStack()
self.assertRaises(AttributeError, stack.enter_context, cm)
stack.push(cm)
self.assertIs(stack._exit_callbacks[-1], cm)
class TestRedirectStdout(unittest.TestCase):
@support.requires_docstrings
def test_instance_docs(self):
# Issue 19330: ensure context manager instances have good docstrings
cm_docstring = redirect_stdout.__doc__
obj = redirect_stdout(None)
self.assertEqual(obj.__doc__, cm_docstring)
def test_no_redirect_in_init(self):
orig_stdout = sys.stdout
redirect_stdout(None)
self.assertIs(sys.stdout, orig_stdout)
def test_redirect_to_string_io(self):
f = io.StringIO()
msg = "Consider an API like help(), which prints directly to stdout"
orig_stdout = sys.stdout
with redirect_stdout(f):
print(msg)
self.assertIs(sys.stdout, orig_stdout)
s = f.getvalue().strip()
self.assertEqual(s, msg)
def test_enter_result_is_target(self):
f = io.StringIO()
with redirect_stdout(f) as enter_result:
self.assertIs(enter_result, f)
def test_cm_is_reusable(self):
f = io.StringIO()
write_to_f = redirect_stdout(f)
orig_stdout = sys.stdout
with write_to_f:
print("Hello", end=" ")
with write_to_f:
print("World!")
self.assertIs(sys.stdout, orig_stdout)
s = f.getvalue()
self.assertEqual(s, "Hello World!\n")
def test_cm_is_reentrant(self):
f = io.StringIO()
write_to_f = redirect_stdout(f)
orig_stdout = sys.stdout
with write_to_f:
print("Hello", end=" ")
with write_to_f:
print("World!")
self.assertIs(sys.stdout, orig_stdout)
s = f.getvalue()
self.assertEqual(s, "Hello World!\n")
class TestSuppress(unittest.TestCase):
@support.requires_docstrings
def test_instance_docs(self):
# Issue 19330: ensure context manager instances have good docstrings
cm_docstring = suppress.__doc__
obj = suppress()
self.assertEqual(obj.__doc__, cm_docstring)
def test_no_result_from_enter(self):
with suppress(ValueError) as enter_result:
self.assertIsNone(enter_result)
def test_no_exception(self):
with suppress(ValueError):
self.assertEqual(pow(2, 5), 32)
def test_exact_exception(self):
with suppress(TypeError):
len(5)
def test_exception_hierarchy(self):
with suppress(LookupError):
'Hello'[50]
def test_other_exception(self):
with self.assertRaises(ZeroDivisionError):
with suppress(TypeError):
1/0
def test_no_args(self):
with self.assertRaises(ZeroDivisionError):
with suppress():
1/0
def test_multiple_exception_args(self):
with suppress(ZeroDivisionError, TypeError):
1/0
with suppress(ZeroDivisionError, TypeError):
len(5)
def test_cm_is_reentrant(self):
ignore_exceptions = suppress(Exception)
with ignore_exceptions:
pass
with ignore_exceptions:
len(5)
with ignore_exceptions:
1/0
with ignore_exceptions: # Check nested usage
len(5)
if __name__ == "__main__":
unittest.main()
|
{
"content_hash": "30681c3af95d93f4255b48bd03bfad91",
"timestamp": "",
"source": "github",
"line_count": 828,
"max_line_length": 80,
"avg_line_length": 31.397342995169083,
"alnum_prop": 0.5431780590068085,
"repo_name": "samuelhavron/heroku-buildpack-python",
"id": "39cc776dbcafa743d46080a57e9243e2c9ed8520",
"size": "25997",
"binary": false,
"copies": "78",
"ref": "refs/heads/master",
"path": "Python-3.4.3/Lib/test/test_contextlib.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "594205"
},
{
"name": "Batchfile",
"bytes": "18943"
},
{
"name": "C",
"bytes": "16647302"
},
{
"name": "C++",
"bytes": "176362"
},
{
"name": "CSS",
"bytes": "2839"
},
{
"name": "Common Lisp",
"bytes": "24481"
},
{
"name": "DIGITAL Command Language",
"bytes": "26402"
},
{
"name": "Groff",
"bytes": "255056"
},
{
"name": "HTML",
"bytes": "130855"
},
{
"name": "JavaScript",
"bytes": "10598"
},
{
"name": "M4",
"bytes": "214312"
},
{
"name": "Makefile",
"bytes": "196708"
},
{
"name": "Objective-C",
"bytes": "33060"
},
{
"name": "PLSQL",
"bytes": "22886"
},
{
"name": "PostScript",
"bytes": "13803"
},
{
"name": "PowerShell",
"bytes": "1420"
},
{
"name": "Prolog",
"bytes": "557"
},
{
"name": "Python",
"bytes": "24212132"
},
{
"name": "R",
"bytes": "5378"
},
{
"name": "Shell",
"bytes": "488285"
},
{
"name": "TeX",
"bytes": "323102"
},
{
"name": "Visual Basic",
"bytes": "481"
}
],
"symlink_target": ""
}
|
"""
Continuous Random Variables - Prebuilt variables
Contains
========
Arcsin
Benini
Beta
BetaPrime
Cauchy
Chi
ChiNoncentral
ChiSquared
Dagum
Erlang
Exponential
FDistribution
FisherZ
Frechet
Gamma
GammaInverse
Gompertz
Kumaraswamy
Laplace
Logistic
LogNormal
Maxwell
Nakagami
Normal
Pareto
QuadraticU
RaisedCosine
Rayleigh
StudentT
Triangular
Uniform
UniformSum
VonMises
Weibull
WignerSemicircle
"""
from __future__ import print_function, division
from sympy import (log, sqrt, pi, S, Dummy, Interval, sympify, gamma,
Piecewise, And, Eq, binomial, factorial, Sum, floor, Abs,
Lambda, Basic)
from sympy import beta as beta_fn
from sympy import cos, exp, besseli
from sympy.stats.crv import (SingleContinuousPSpace, SingleContinuousDistribution,
ContinuousDistributionHandmade)
from sympy.stats.rv import _value_check
import random
oo = S.Infinity
__all__ = ['ContinuousRV',
'Arcsin',
'Benini',
'Beta',
'BetaPrime',
'Cauchy',
'Chi',
'ChiNoncentral',
'ChiSquared',
'Dagum',
'Erlang',
'Exponential',
'FDistribution',
'FisherZ',
'Frechet',
'Gamma',
'GammaInverse',
'Gompertz',
'Kumaraswamy',
'Laplace',
'Logistic',
'LogNormal',
'Maxwell',
'Nakagami',
'Normal',
'Pareto',
'QuadraticU',
'RaisedCosine',
'Rayleigh',
'StudentT',
'Triangular',
'Uniform',
'UniformSum',
'VonMises',
'Weibull',
'WignerSemicircle'
]
def ContinuousRV(symbol, density, set=Interval(-oo, oo)):
"""
Create a Continuous Random Variable given the following:
-- a symbol
-- a probability density function
-- set on which the pdf is valid (defaults to entire real line)
Returns a RandomSymbol.
Many common continuous random variable types are already implemented.
This function should be necessary only very rarely.
Examples
========
>>> from sympy import Symbol, sqrt, exp, pi
>>> from sympy.stats import ContinuousRV, P, E
>>> x = Symbol("x")
>>> pdf = sqrt(2)*exp(-x**2/2)/(2*sqrt(pi)) # Normal distribution
>>> X = ContinuousRV(x, pdf)
>>> E(X)
0
>>> P(X>0)
1/2
"""
pdf = Lambda(symbol, density)
dist = ContinuousDistributionHandmade(pdf, set)
return SingleContinuousPSpace(symbol, dist).value
def rv(symbol, cls, args):
args = list(map(sympify, args))
dist = cls(*args)
dist.check(*args)
return SingleContinuousPSpace(symbol, dist).value
########################################
# Continuous Probability Distributions #
########################################
#-------------------------------------------------------------------------------
# Arcsin distribution ----------------------------------------------------------
class ArcsinDistribution(SingleContinuousDistribution):
_argnames = ('a', 'b')
def pdf(self, x):
return 1/(pi*sqrt((x - self.a)*(self.b - x)))
def Arcsin(name, a=0, b=1):
r"""
Create a Continuous Random Variable with an arcsin distribution.
The density of the arcsin distribution is given by
.. math::
f(x) := \frac{1}{\pi\sqrt{(x-a)(b-x)}}
with :math:`x \in [a,b]`. It must hold that :math:`-\infty < a < b < \infty`.
Parameters
==========
a : Real number, the left interval boundary
b : Real number, the right interval boundary
Returns
=======
A RandomSymbol.
Examples
========
>>> from sympy.stats import Arcsin, density
>>> from sympy import Symbol, simplify
>>> a = Symbol("a", real=True)
>>> b = Symbol("b", real=True)
>>> z = Symbol("z")
>>> X = Arcsin("x", a, b)
>>> density(X)(z)
1/(pi*sqrt((-a + z)*(b - z)))
References
==========
.. [1] http://en.wikipedia.org/wiki/Arcsine_distribution
"""
return rv(name, ArcsinDistribution, (a, b))
#-------------------------------------------------------------------------------
# Benini distribution ----------------------------------------------------------
class BeniniDistribution(SingleContinuousDistribution):
_argnames = ('alpha', 'beta', 'sigma')
@property
def set(self):
return Interval(self.sigma, oo)
def pdf(self, x):
alpha, beta, sigma = self.alpha, self.beta, self.sigma
return (exp(-alpha*log(x/sigma) - beta*log(x/sigma)**2)
*(alpha/x + 2*beta*log(x/sigma)/x))
def Benini(name, alpha, beta, sigma):
r"""
Create a Continuous Random Variable with a Benini distribution.
The density of the Benini distribution is given by
.. math::
f(x) := e^{-\alpha\log{\frac{x}{\sigma}}
-\beta\log^2\left[{\frac{x}{\sigma}}\right]}
\left(\frac{\alpha}{x}+\frac{2\beta\log{\frac{x}{\sigma}}}{x}\right)
This is a heavy-tailed distrubtion and is also known as the log-Rayleigh
distribution.
Parameters
==========
alpha : Real number, `\alpha > 0`, a shape
beta : Real number, `\beta > 0`, a shape
sigma : Real number, `\sigma > 0`, a scale
Returns
=======
A RandomSymbol.
Examples
========
>>> from sympy.stats import Benini, density
>>> from sympy import Symbol, simplify, pprint
>>> alpha = Symbol("alpha", positive=True)
>>> beta = Symbol("beta", positive=True)
>>> sigma = Symbol("sigma", positive=True)
>>> z = Symbol("z")
>>> X = Benini("x", alpha, beta, sigma)
>>> D = density(X)(z)
>>> pprint(D, use_unicode=False)
/ / z \\ / z \ 2/ z \
| 2*beta*log|-----|| - alpha*log|-----| - beta*log |-----|
|alpha \sigma/| \sigma/ \sigma/
|----- + -----------------|*e
\ z z /
References
==========
.. [1] http://en.wikipedia.org/wiki/Benini_distribution
.. [2] http://reference.wolfram.com/legacy/v8/ref/BeniniDistribution.html
"""
return rv(name, BeniniDistribution, (alpha, beta, sigma))
#-------------------------------------------------------------------------------
# Beta distribution ------------------------------------------------------------
class BetaDistribution(SingleContinuousDistribution):
_argnames = ('alpha', 'beta')
set = Interval(0, 1)
@staticmethod
def check(alpha, beta):
_value_check(alpha > 0, "Alpha must be positive")
_value_check(beta > 0, "Beta must be positive")
def pdf(self, x):
alpha, beta = self.alpha, self.beta
return x**(alpha - 1) * (1 - x)**(beta - 1) / beta_fn(alpha, beta)
def sample(self):
return random.betavariate(self.alpha, self.beta)
def Beta(name, alpha, beta):
r"""
Create a Continuous Random Variable with a Beta distribution.
The density of the Beta distribution is given by
.. math::
f(x) := \frac{x^{\alpha-1}(1-x)^{\beta-1}} {\mathrm{B}(\alpha,\beta)}
with :math:`x \in [0,1]`.
Parameters
==========
alpha : Real number, `\alpha > 0`, a shape
beta : Real number, `\beta > 0`, a shape
Returns
=======
A RandomSymbol.
Examples
========
>>> from sympy.stats import Beta, density, E, variance
>>> from sympy import Symbol, simplify, pprint, expand_func
>>> alpha = Symbol("alpha", positive=True)
>>> beta = Symbol("beta", positive=True)
>>> z = Symbol("z")
>>> X = Beta("x", alpha, beta)
>>> D = density(X)(z)
>>> pprint(D, use_unicode=False)
alpha - 1 beta - 1
z *(-z + 1)
---------------------------
beta(alpha, beta)
>>> expand_func(simplify(E(X, meijerg=True)))
alpha/(alpha + beta)
>>> simplify(variance(X, meijerg=True)) #doctest: +SKIP
alpha*beta/((alpha + beta)**2*(alpha + beta + 1))
References
==========
.. [1] http://en.wikipedia.org/wiki/Beta_distribution
.. [2] http://mathworld.wolfram.com/BetaDistribution.html
"""
return rv(name, BetaDistribution, (alpha, beta))
#-------------------------------------------------------------------------------
# Beta prime distribution ------------------------------------------------------
class BetaPrimeDistribution(SingleContinuousDistribution):
_argnames = ('alpha', 'beta')
set = Interval(0, oo)
def pdf(self, x):
alpha, beta = self.alpha, self.beta
return x**(alpha - 1)*(1 + x)**(-alpha - beta)/beta_fn(alpha, beta)
def BetaPrime(name, alpha, beta):
r"""
Create a continuous random variable with a Beta prime distribution.
The density of the Beta prime distribution is given by
.. math::
f(x) := \frac{x^{\alpha-1} (1+x)^{-\alpha -\beta}}{B(\alpha,\beta)}
with :math:`x > 0`.
Parameters
==========
alpha : Real number, `\alpha > 0`, a shape
beta : Real number, `\beta > 0`, a shape
Returns
=======
A RandomSymbol.
Examples
========
>>> from sympy.stats import BetaPrime, density
>>> from sympy import Symbol, pprint
>>> alpha = Symbol("alpha", positive=True)
>>> beta = Symbol("beta", positive=True)
>>> z = Symbol("z")
>>> X = BetaPrime("x", alpha, beta)
>>> D = density(X)(z)
>>> pprint(D, use_unicode=False)
alpha - 1 -alpha - beta
z *(z + 1)
-------------------------------
beta(alpha, beta)
References
==========
.. [1] http://en.wikipedia.org/wiki/Beta_prime_distribution
.. [2] http://mathworld.wolfram.com/BetaPrimeDistribution.html
"""
return rv(name, BetaPrimeDistribution, (alpha, beta))
#-------------------------------------------------------------------------------
# Cauchy distribution ----------------------------------------------------------
class CauchyDistribution(SingleContinuousDistribution):
_argnames = ('x0', 'gamma')
def pdf(self, x):
return 1/(pi*self.gamma*(1 + ((x - self.x0)/self.gamma)**2))
def Cauchy(name, x0, gamma):
r"""
Create a continuous random variable with a Cauchy distribution.
The density of the Cauchy distribution is given by
.. math::
f(x) := \frac{1}{\pi} \arctan\left(\frac{x-x_0}{\gamma}\right)
+\frac{1}{2}
Parameters
==========
x0 : Real number, the location
gamma : Real number, `\gamma > 0`, the scale
Returns
=======
A RandomSymbol.
Examples
========
>>> from sympy.stats import Cauchy, density
>>> from sympy import Symbol
>>> x0 = Symbol("x0")
>>> gamma = Symbol("gamma", positive=True)
>>> z = Symbol("z")
>>> X = Cauchy("x", x0, gamma)
>>> density(X)(z)
1/(pi*gamma*(1 + (-x0 + z)**2/gamma**2))
References
==========
.. [1] http://en.wikipedia.org/wiki/Cauchy_distribution
.. [2] http://mathworld.wolfram.com/CauchyDistribution.html
"""
return rv(name, CauchyDistribution, (x0, gamma))
#-------------------------------------------------------------------------------
# Chi distribution -------------------------------------------------------------
class ChiDistribution(SingleContinuousDistribution):
_argnames = ('k',)
set = Interval(0, oo)
def pdf(self, x):
return 2**(1 - self.k/2)*x**(self.k - 1)*exp(-x**2/2)/gamma(self.k/2)
def Chi(name, k):
r"""
Create a continuous random variable with a Chi distribution.
The density of the Chi distribution is given by
.. math::
f(x) := \frac{2^{1-k/2}x^{k-1}e^{-x^2/2}}{\Gamma(k/2)}
with :math:`x \geq 0`.
Parameters
==========
k : A positive Integer, `k > 0`, the number of degrees of freedom
Returns
=======
A RandomSymbol.
Examples
========
>>> from sympy.stats import Chi, density, E, std
>>> from sympy import Symbol, simplify
>>> k = Symbol("k", integer=True)
>>> z = Symbol("z")
>>> X = Chi("x", k)
>>> density(X)(z)
2**(-k/2 + 1)*z**(k - 1)*exp(-z**2/2)/gamma(k/2)
References
==========
.. [1] http://en.wikipedia.org/wiki/Chi_distribution
.. [2] http://mathworld.wolfram.com/ChiDistribution.html
"""
return rv(name, ChiDistribution, (k,))
#-------------------------------------------------------------------------------
# Non-central Chi distribution -------------------------------------------------
class ChiNoncentralDistribution(SingleContinuousDistribution):
_argnames = ('k', 'l')
set = Interval(0, oo)
def pdf(self, x):
k, l = self.k, self.l
return exp(-(x**2+l**2)/2)*x**k*l / (l*x)**(k/2) * besseli(k/2-1, l*x)
def ChiNoncentral(name, k, l):
r"""
Create a continuous random variable with a non-central Chi distribution.
The density of the non-central Chi distribution is given by
.. math::
f(x) := \frac{e^{-(x^2+\lambda^2)/2} x^k\lambda}
{(\lambda x)^{k/2}} I_{k/2-1}(\lambda x)
with `x \geq 0`. Here, `I_\nu (x)` is the
:ref:`modified Bessel function of the first kind <besseli>`.
Parameters
==========
k : A positive Integer, `k > 0`, the number of degrees of freedom
l : Shift parameter
Returns
=======
A RandomSymbol.
Examples
========
>>> from sympy.stats import ChiNoncentral, density, E, std
>>> from sympy import Symbol, simplify
>>> k = Symbol("k", integer=True)
>>> l = Symbol("l")
>>> z = Symbol("z")
>>> X = ChiNoncentral("x", k, l)
>>> density(X)(z)
l*z**k*(l*z)**(-k/2)*exp(-l**2/2 - z**2/2)*besseli(k/2 - 1, l*z)
References
==========
.. [1] http://en.wikipedia.org/wiki/Noncentral_chi_distribution
"""
return rv(name, ChiNoncentralDistribution, (k, l))
#-------------------------------------------------------------------------------
# Chi squared distribution -----------------------------------------------------
class ChiSquaredDistribution(SingleContinuousDistribution):
_argnames = ('k',)
set = Interval(0, oo)
def pdf(self, x):
k = self.k
return 1/(2**(k/2)*gamma(k/2))*x**(k/2 - 1)*exp(-x/2)
def ChiSquared(name, k):
r"""
Create a continuous random variable with a Chi-squared distribution.
The density of the Chi-squared distribution is given by
.. math::
f(x) := \frac{1}{2^{\frac{k}{2}}\Gamma\left(\frac{k}{2}\right)}
x^{\frac{k}{2}-1} e^{-\frac{x}{2}}
with :math:`x \geq 0`.
Parameters
==========
k : A positive Integer, `k > 0`, the number of degrees of freedom
Returns
=======
A RandomSymbol.
Examples
========
>>> from sympy.stats import ChiSquared, density, E, variance
>>> from sympy import Symbol, simplify, combsimp, expand_func
>>> k = Symbol("k", integer=True, positive=True)
>>> z = Symbol("z")
>>> X = ChiSquared("x", k)
>>> density(X)(z)
2**(-k/2)*z**(k/2 - 1)*exp(-z/2)/gamma(k/2)
>>> combsimp(E(X))
k
>>> simplify(expand_func(variance(X)))
2*k
References
==========
.. [1] http://en.wikipedia.org/wiki/Chi_squared_distribution
.. [2] http://mathworld.wolfram.com/Chi-SquaredDistribution.html
"""
return rv(name, ChiSquaredDistribution, (k, ))
#-------------------------------------------------------------------------------
# Dagum distribution -----------------------------------------------------------
class DagumDistribution(SingleContinuousDistribution):
_argnames = ('p', 'a', 'b')
def pdf(self, x):
p, a, b = self.p, self.a, self.b
return a*p/x*((x/b)**(a*p)/(((x/b)**a + 1)**(p + 1)))
def Dagum(name, p, a, b):
r"""
Create a continuous random variable with a Dagum distribution.
The density of the Dagum distribution is given by
.. math::
f(x) := \frac{a p}{x} \left( \frac{\left(\tfrac{x}{b}\right)^{a p}}
{\left(\left(\tfrac{x}{b}\right)^a + 1 \right)^{p+1}} \right)
with :math:`x > 0`.
Parameters
==========
p : Real number, `p > 0`, a shape
a : Real number, `a > 0`, a shape
b : Real number, `b > 0`, a scale
Returns
=======
A RandomSymbol.
Examples
========
>>> from sympy.stats import Dagum, density
>>> from sympy import Symbol, simplify
>>> p = Symbol("p", positive=True)
>>> b = Symbol("b", positive=True)
>>> a = Symbol("a", positive=True)
>>> z = Symbol("z")
>>> X = Dagum("x", p, a, b)
>>> density(X)(z)
a*p*(z/b)**(a*p)*((z/b)**a + 1)**(-p - 1)/z
References
==========
.. [1] http://en.wikipedia.org/wiki/Dagum_distribution
"""
return rv(name, DagumDistribution, (p, a, b))
#-------------------------------------------------------------------------------
# Erlang distribution ----------------------------------------------------------
def Erlang(name, k, l):
r"""
Create a continuous random variable with an Erlang distribution.
The density of the Erlang distribution is given by
.. math::
f(x) := \frac{\lambda^k x^{k-1} e^{-\lambda x}}{(k-1)!}
with :math:`x \in [0,\infty]`.
Parameters
==========
k : Integer
l : Real number, `\lambda > 0`, the rate
Returns
=======
A RandomSymbol.
Examples
========
>>> from sympy.stats import Erlang, density, cdf, E, variance
>>> from sympy import Symbol, simplify, pprint
>>> k = Symbol("k", integer=True, positive=True)
>>> l = Symbol("l", positive=True)
>>> z = Symbol("z")
>>> X = Erlang("x", k, l)
>>> D = density(X)(z)
>>> pprint(D, use_unicode=False)
k k - 1 -l*z
l *z *e
---------------
gamma(k)
>>> C = cdf(X, meijerg=True)(z)
>>> pprint(C, use_unicode=False)
/ -2*I*pi*k -2*I*pi*k
| k*e *lowergamma(k, 0) k*e *lowergamma(k, l*z)
|- ----------------------------- + ------------------------------- for z >= 0
< gamma(k + 1) gamma(k + 1)
|
| 0 otherwise
\
>>> simplify(E(X))
k/l
>>> simplify(variance(X))
k/l**2
References
==========
.. [1] http://en.wikipedia.org/wiki/Erlang_distribution
.. [2] http://mathworld.wolfram.com/ErlangDistribution.html
"""
return rv(name, GammaDistribution, (k, 1/l))
#-------------------------------------------------------------------------------
# Exponential distribution -----------------------------------------------------
class ExponentialDistribution(SingleContinuousDistribution):
_argnames = ('rate',)
set = Interval(0, oo)
@staticmethod
def check(rate):
_value_check(rate > 0, "Rate must be positive.")
def pdf(self, x):
return self.rate * exp(-self.rate*x)
def sample(self):
return random.expovariate(self.rate)
def Exponential(name, rate):
r"""
Create a continuous random variable with an Exponential distribution.
The density of the exponential distribution is given by
.. math::
f(x) := \lambda \exp(-\lambda x)
with `x > 0`. Note that the expected value is `1/\lambda`.
Parameters
==========
rate : A positive Real number, `\lambda > 0`, the rate (or inverse scale/inverse mean)
Returns
=======
A RandomSymbol.
Examples
========
>>> from sympy.stats import Exponential, density, cdf, E
>>> from sympy.stats import variance, std, skewness
>>> from sympy import Symbol
>>> l = Symbol("lambda", positive=True)
>>> z = Symbol("z")
>>> X = Exponential("x", l)
>>> density(X)(z)
lambda*exp(-lambda*z)
>>> cdf(X)(z)
Piecewise((1 - exp(-lambda*z), z >= 0), (0, True))
>>> E(X)
1/lambda
>>> variance(X)
lambda**(-2)
>>> skewness(X)
2
>>> X = Exponential('x', 10)
>>> density(X)(z)
10*exp(-10*z)
>>> E(X)
1/10
>>> std(X)
1/10
References
==========
.. [1] http://en.wikipedia.org/wiki/Exponential_distribution
.. [2] http://mathworld.wolfram.com/ExponentialDistribution.html
"""
return rv(name, ExponentialDistribution, (rate, ))
#-------------------------------------------------------------------------------
# F distribution ---------------------------------------------------------------
class FDistributionDistribution(SingleContinuousDistribution):
_argnames = ('d1', 'd2')
set = Interval(0, oo)
def pdf(self, x):
d1, d2 = self.d1, self.d2
return (sqrt((d1*x)**d1*d2**d2 / (d1*x+d2)**(d1+d2))
/ (x * beta_fn(d1/2, d2/2)))
def FDistribution(name, d1, d2):
r"""
Create a continuous random variable with a F distribution.
The density of the F distribution is given by
.. math::
f(x) := \frac{\sqrt{\frac{(d_1 x)^{d_1} d_2^{d_2}}
{(d_1 x + d_2)^{d_1 + d_2}}}}
{x \mathrm{B} \left(\frac{d_1}{2}, \frac{d_2}{2}\right)}
with :math:`x > 0`.
.. TODO - What do these parameters mean?
Parameters
==========
d1 : `d_1 > 0` a parameter
d2 : `d_2 > 0` a parameter
Returns
=======
A RandomSymbol.
Examples
========
>>> from sympy.stats import FDistribution, density
>>> from sympy import Symbol, simplify, pprint
>>> d1 = Symbol("d1", positive=True)
>>> d2 = Symbol("d2", positive=True)
>>> z = Symbol("z")
>>> X = FDistribution("x", d1, d2)
>>> D = density(X)(z)
>>> pprint(D, use_unicode=False)
d2
-- ______________________________
2 / d1 -d1 - d2
d2 *\/ (d1*z) *(d1*z + d2)
--------------------------------------
/d1 d2\
z*beta|--, --|
\2 2 /
References
==========
.. [1] http://en.wikipedia.org/wiki/F-distribution
.. [2] http://mathworld.wolfram.com/F-Distribution.html
"""
return rv(name, FDistributionDistribution, (d1, d2))
#-------------------------------------------------------------------------------
# Fisher Z distribution --------------------------------------------------------
class FisherZDistribution(SingleContinuousDistribution):
_argnames = ('d1', 'd2')
def pdf(self, x):
d1, d2 = self.d1, self.d2
return (2*d1**(d1/2)*d2**(d2/2) / beta_fn(d1/2, d2/2) *
exp(d1*x) / (d1*exp(2*x)+d2)**((d1+d2)/2))
def FisherZ(name, d1, d2):
r"""
Create a Continuous Random Variable with an Fisher's Z distribution.
The density of the Fisher's Z distribution is given by
.. math::
f(x) := \frac{2d_1^{d_1/2} d_2^{d_2/2}} {\mathrm{B}(d_1/2, d_2/2)}
\frac{e^{d_1z}}{\left(d_1e^{2z}+d_2\right)^{\left(d_1+d_2\right)/2}}
.. TODO - What is the difference between these degrees of freedom?
Parameters
==========
d1 : `d_1 > 0`, degree of freedom
d2 : `d_2 > 0`, degree of freedom
Returns
=======
A RandomSymbol.
Examples
========
>>> from sympy.stats import FisherZ, density
>>> from sympy import Symbol, simplify, pprint
>>> d1 = Symbol("d1", positive=True)
>>> d2 = Symbol("d2", positive=True)
>>> z = Symbol("z")
>>> X = FisherZ("x", d1, d2)
>>> D = density(X)(z)
>>> pprint(D, use_unicode=False)
d1 d2
d1 d2 - -- - --
-- -- 2 2
2 2 / 2*z \ d1*z
2*d1 *d2 *\d1*e + d2/ *e
-----------------------------------------
/d1 d2\
beta|--, --|
\2 2 /
References
==========
.. [1] http://en.wikipedia.org/wiki/Fisher%27s_z-distribution
.. [2] http://mathworld.wolfram.com/Fishersz-Distribution.html
"""
return rv(name, FisherZDistribution, (d1, d2))
#-------------------------------------------------------------------------------
# Frechet distribution ---------------------------------------------------------
class FrechetDistribution(SingleContinuousDistribution):
_argnames = ('a', 's', 'm')
set = Interval(0, oo)
def __new__(cls, a, s=1, m=0):
a, s, m = list(map(sympify, (a, s, m)))
return Basic.__new__(cls, a, s, m)
def pdf(self, x):
a, s, m = self.a, self.s, self.m
return a/s * ((x-m)/s)**(-1-a) * exp(-((x-m)/s)**(-a))
def Frechet(name, a, s=1, m=0):
r"""
Create a continuous random variable with a Frechet distribution.
The density of the Frechet distribution is given by
.. math::
f(x) := \frac{\alpha}{s} \left(\frac{x-m}{s}\right)^{-1-\alpha}
e^{-(\frac{x-m}{s})^{-\alpha}}
with :math:`x \geq m`.
Parameters
==========
a : Real number, :math:`a \in \left(0, \infty\right)` the shape
s : Real number, :math:`s \in \left(0, \infty\right)` the scale
m : Real number, :math:`m \in \left(-\infty, \infty\right)` the minimum
Returns
=======
A RandomSymbol.
Examples
========
>>> from sympy.stats import Frechet, density, E, std
>>> from sympy import Symbol, simplify
>>> a = Symbol("a", positive=True)
>>> s = Symbol("s", positive=True)
>>> m = Symbol("m", real=True)
>>> z = Symbol("z")
>>> X = Frechet("x", a, s, m)
>>> density(X)(z)
a*((-m + z)/s)**(-a - 1)*exp(-((-m + z)/s)**(-a))/s
References
==========
.. [1] http://en.wikipedia.org/wiki/Fr%C3%A9chet_distribution
"""
return rv(name, FrechetDistribution, (a, s, m))
#-------------------------------------------------------------------------------
# Gamma distribution -----------------------------------------------------------
class GammaDistribution(SingleContinuousDistribution):
_argnames = ('k', 'theta')
set = Interval(0, oo)
@staticmethod
def check(k, theta):
_value_check(k > 0, "k must be positive")
_value_check(theta > 0, "Theta must be positive")
def pdf(self, x):
k, theta = self.k, self.theta
return x**(k - 1) * exp(-x/theta) / (gamma(k)*theta**k)
def sample(self):
return random.gammavariate(self.k, self.theta)
def Gamma(name, k, theta):
r"""
Create a continuous random variable with a Gamma distribution.
The density of the Gamma distribution is given by
.. math::
f(x) := \frac{1}{\Gamma(k) \theta^k} x^{k - 1} e^{-\frac{x}{\theta}}
with :math:`x \in [0,1]`.
Parameters
==========
k : Real number, `k > 0`, a shape
theta : Real number, `\theta > 0`, a scale
Returns
=======
A RandomSymbol.
Examples
========
>>> from sympy.stats import Gamma, density, cdf, E, variance
>>> from sympy import Symbol, pprint, simplify
>>> k = Symbol("k", positive=True)
>>> theta = Symbol("theta", positive=True)
>>> z = Symbol("z")
>>> X = Gamma("x", k, theta)
>>> D = density(X)(z)
>>> pprint(D, use_unicode=False)
-z
-----
-k k - 1 theta
theta *z *e
---------------------
gamma(k)
>>> C = cdf(X, meijerg=True)(z)
>>> pprint(C, use_unicode=False)
/ / z \
| k*lowergamma|k, -----|
| k*lowergamma(k, 0) \ theta/
<- ------------------ + ---------------------- for z >= 0
| gamma(k + 1) gamma(k + 1)
|
\ 0 otherwise
>>> E(X)
theta*gamma(k + 1)/gamma(k)
>>> V = simplify(variance(X))
>>> pprint(V, use_unicode=False)
2
k*theta
References
==========
.. [1] http://en.wikipedia.org/wiki/Gamma_distribution
.. [2] http://mathworld.wolfram.com/GammaDistribution.html
"""
return rv(name, GammaDistribution, (k, theta))
#-------------------------------------------------------------------------------
# Inverse Gamma distribution ---------------------------------------------------
class GammaInverseDistribution(SingleContinuousDistribution):
_argnames = ('a', 'b')
set = Interval(0, oo)
@staticmethod
def check(a, b):
_value_check(a > 0, "alpha must be positive")
_value_check(b > 0, "beta must be positive")
def pdf(self, x):
a, b = self.a, self.b
return b**a/gamma(a) * x**(-a-1) * exp(-b/x)
def GammaInverse(name, a, b):
r"""
Create a continuous random variable with an inverse Gamma distribution.
The density of the inverse Gamma distribution is given by
.. math::
f(x) := \frac{\beta^\alpha}{\Gamma(\alpha)} x^{-\alpha - 1}
\exp\left(\frac{-\beta}{x}\right)
with :math:`x > 0`.
Parameters
==========
a : Real number, `a > 0` a shape
b : Real number, `b > 0` a scale
Returns
=======
A RandomSymbol.
Examples
========
>>> from sympy.stats import GammaInverse, density, cdf, E, variance
>>> from sympy import Symbol, pprint
>>> a = Symbol("a", positive=True)
>>> b = Symbol("b", positive=True)
>>> z = Symbol("z")
>>> X = GammaInverse("x", a, b)
>>> D = density(X)(z)
>>> pprint(D, use_unicode=False)
-b
---
a -a - 1 z
b *z *e
---------------
gamma(a)
References
==========
.. [1] http://en.wikipedia.org/wiki/Inverse-gamma_distribution
"""
return rv(name, GammaInverseDistribution, (a, b))
#-------------------------------------------------------------------------------
# Gompertz distribution --------------------------------------------------------
class GompertzDistribution(SingleContinuousDistribution):
_argnames = ('b', 'eta')
set = Interval(0, oo)
@staticmethod
def check(b, eta):
_value_check(b > 0, "b must be positive")
_value_check(eta > 0, "eta must be positive")
def pdf(self, x):
eta, b = self.eta, self.b
return b*eta*exp(b*x)*exp(eta)*exp(-eta*exp(b*x))
def Gompertz(name, b, eta):
r"""
Create a Continuous Random Variable with Gompertz distribution.
The density of the Gompertz distribution is given by
.. math::
f(x) := b \eta e^{b x} e^{\eta} \exp \left(-\eta e^{bx} \right)
with :math: 'x \in [0, \inf)'.
Parameters
==========
b: Real number, 'b > 0' a scale
eta: Real number, 'eta > 0' a shape
Returns
=======
A RandomSymbol.
Examples
========
>>> from sympy.stats import Gompertz, density, E, variance
>>> from sympy import Symbol, simplify, pprint
>>> b = Symbol("b", positive=True)
>>> eta = Symbol("eta", positive=True)
>>> z = Symbol("z")
>>> X = Gompertz("x", b, eta)
>>> density(X)(z)
b*eta*exp(eta)*exp(b*z)*exp(-eta*exp(b*z))
References
==========
.. [1] https://en.wikipedia.org/wiki/Gompertz_distribution
"""
return rv(name, GompertzDistribution, (b, eta))
#-------------------------------------------------------------------------------
# Kumaraswamy distribution -----------------------------------------------------
class KumaraswamyDistribution(SingleContinuousDistribution):
_argnames = ('a', 'b')
set = Interval(0, oo)
@staticmethod
def check(a, b):
_value_check(a > 0, "a must be positive")
_value_check(b > 0, "b must be positive")
def pdf(self, x):
a, b = self.a, self.b
return a * b * x**(a-1) * (1-x**a)**(b-1)
def Kumaraswamy(name, a, b):
r"""
Create a Continuous Random Variable with a Kumaraswamy distribution.
The density of the Kumaraswamy distribution is given by
.. math::
f(x) := a b x^{a-1} (1-x^a)^{b-1}
with :math:`x \in [0,1]`.
Parameters
==========
a : Real number, `a > 0` a shape
b : Real number, `b > 0` a shape
Returns
=======
A RandomSymbol.
Examples
========
>>> from sympy.stats import Kumaraswamy, density, E, variance
>>> from sympy import Symbol, simplify, pprint
>>> a = Symbol("a", positive=True)
>>> b = Symbol("b", positive=True)
>>> z = Symbol("z")
>>> X = Kumaraswamy("x", a, b)
>>> D = density(X)(z)
>>> pprint(D, use_unicode=False)
b - 1
a - 1 / a \
a*b*z *\- z + 1/
References
==========
.. [1] http://en.wikipedia.org/wiki/Kumaraswamy_distribution
"""
return rv(name, KumaraswamyDistribution, (a, b))
#-------------------------------------------------------------------------------
# Laplace distribution ---------------------------------------------------------
class LaplaceDistribution(SingleContinuousDistribution):
_argnames = ('mu', 'b')
def pdf(self, x):
mu, b = self.mu, self.b
return 1/(2*b)*exp(-Abs(x - mu)/b)
def Laplace(name, mu, b):
r"""
Create a continuous random variable with a Laplace distribution.
The density of the Laplace distribution is given by
.. math::
f(x) := \frac{1}{2 b} \exp \left(-\frac{|x-\mu|}b \right)
Parameters
==========
mu : Real number, the location (mean)
b : Real number, `b > 0`, a scale
Returns
=======
A RandomSymbol.
Examples
========
>>> from sympy.stats import Laplace, density
>>> from sympy import Symbol
>>> mu = Symbol("mu")
>>> b = Symbol("b", positive=True)
>>> z = Symbol("z")
>>> X = Laplace("x", mu, b)
>>> density(X)(z)
exp(-Abs(mu - z)/b)/(2*b)
References
==========
.. [1] http://en.wikipedia.org/wiki/Laplace_distribution
.. [2] http://mathworld.wolfram.com/LaplaceDistribution.html
"""
return rv(name, LaplaceDistribution, (mu, b))
#-------------------------------------------------------------------------------
# Logistic distribution --------------------------------------------------------
class LogisticDistribution(SingleContinuousDistribution):
_argnames = ('mu', 's')
def pdf(self, x):
mu, s = self.mu, self.s
return exp(-(x - mu)/s)/(s*(1 + exp(-(x - mu)/s))**2)
def Logistic(name, mu, s):
r"""
Create a continuous random variable with a logistic distribution.
The density of the logistic distribution is given by
.. math::
f(x) := \frac{e^{-(x-\mu)/s}} {s\left(1+e^{-(x-\mu)/s}\right)^2}
Parameters
==========
mu : Real number, the location (mean)
s : Real number, `s > 0` a scale
Returns
=======
A RandomSymbol.
Examples
========
>>> from sympy.stats import Logistic, density
>>> from sympy import Symbol
>>> mu = Symbol("mu", real=True)
>>> s = Symbol("s", positive=True)
>>> z = Symbol("z")
>>> X = Logistic("x", mu, s)
>>> density(X)(z)
exp((mu - z)/s)/(s*(exp((mu - z)/s) + 1)**2)
References
==========
.. [1] http://en.wikipedia.org/wiki/Logistic_distribution
.. [2] http://mathworld.wolfram.com/LogisticDistribution.html
"""
return rv(name, LogisticDistribution, (mu, s))
#-------------------------------------------------------------------------------
# Log Normal distribution ------------------------------------------------------
class LogNormalDistribution(SingleContinuousDistribution):
_argnames = ('mean', 'std')
set = Interval(0, oo)
def pdf(self, x):
mean, std = self.mean, self.std
return exp(-(log(x) - mean)**2 / (2*std**2)) / (x*sqrt(2*pi)*std)
def sample(self):
return random.lognormvariate(self.mean, self.std)
def LogNormal(name, mean, std):
r"""
Create a continuous random variable with a log-normal distribution.
The density of the log-normal distribution is given by
.. math::
f(x) := \frac{1}{x\sqrt{2\pi\sigma^2}}
e^{-\frac{\left(\ln x-\mu\right)^2}{2\sigma^2}}
with :math:`x \geq 0`.
Parameters
==========
mu : Real number, the log-scale
sigma : Real number, :math:`\sigma^2 > 0` a shape
Returns
=======
A RandomSymbol.
Examples
========
>>> from sympy.stats import LogNormal, density
>>> from sympy import Symbol, simplify, pprint
>>> mu = Symbol("mu", real=True)
>>> sigma = Symbol("sigma", positive=True)
>>> z = Symbol("z")
>>> X = LogNormal("x", mu, sigma)
>>> D = density(X)(z)
>>> pprint(D, use_unicode=False)
2
-(-mu + log(z))
-----------------
2
___ 2*sigma
\/ 2 *e
------------------------
____
2*\/ pi *sigma*z
>>> X = LogNormal('x', 0, 1) # Mean 0, standard deviation 1
>>> density(X)(z)
sqrt(2)*exp(-log(z)**2/2)/(2*sqrt(pi)*z)
References
==========
.. [1] http://en.wikipedia.org/wiki/Lognormal
.. [2] http://mathworld.wolfram.com/LogNormalDistribution.html
"""
return rv(name, LogNormalDistribution, (mean, std))
#-------------------------------------------------------------------------------
# Maxwell distribution ---------------------------------------------------------
class MaxwellDistribution(SingleContinuousDistribution):
_argnames = ('a',)
set = Interval(0, oo)
def pdf(self, x):
a = self.a
return sqrt(2/pi)*x**2*exp(-x**2/(2*a**2))/a**3
def Maxwell(name, a):
r"""
Create a continuous random variable with a Maxwell distribution.
The density of the Maxwell distribution is given by
.. math::
f(x) := \sqrt{\frac{2}{\pi}} \frac{x^2 e^{-x^2/(2a^2)}}{a^3}
with :math:`x \geq 0`.
.. TODO - what does the parameter mean?
Parameters
==========
a : Real number, `a > 0`
Returns
=======
A RandomSymbol.
Examples
========
>>> from sympy.stats import Maxwell, density, E, variance
>>> from sympy import Symbol, simplify
>>> a = Symbol("a", positive=True)
>>> z = Symbol("z")
>>> X = Maxwell("x", a)
>>> density(X)(z)
sqrt(2)*z**2*exp(-z**2/(2*a**2))/(sqrt(pi)*a**3)
>>> E(X)
2*sqrt(2)*a/sqrt(pi)
>>> simplify(variance(X))
a**2*(-8 + 3*pi)/pi
References
==========
.. [1] http://en.wikipedia.org/wiki/Maxwell_distribution
.. [2] http://mathworld.wolfram.com/MaxwellDistribution.html
"""
return rv(name, MaxwellDistribution, (a, ))
#-------------------------------------------------------------------------------
# Nakagami distribution --------------------------------------------------------
class NakagamiDistribution(SingleContinuousDistribution):
_argnames = ('mu', 'omega')
set = Interval(0, oo)
def pdf(self, x):
mu, omega = self.mu, self.omega
return 2*mu**mu/(gamma(mu)*omega**mu)*x**(2*mu - 1)*exp(-mu/omega*x**2)
def Nakagami(name, mu, omega):
r"""
Create a continuous random variable with a Nakagami distribution.
The density of the Nakagami distribution is given by
.. math::
f(x) := \frac{2\mu^\mu}{\Gamma(\mu)\omega^\mu} x^{2\mu-1}
\exp\left(-\frac{\mu}{\omega}x^2 \right)
with :math:`x > 0`.
Parameters
==========
mu : Real number, `\mu \geq \frac{1}{2}` a shape
omega : Real number, `\omega > 0`, the spread
Returns
=======
A RandomSymbol.
Examples
========
>>> from sympy.stats import Nakagami, density, E, variance
>>> from sympy import Symbol, simplify, pprint
>>> mu = Symbol("mu", positive=True)
>>> omega = Symbol("omega", positive=True)
>>> z = Symbol("z")
>>> X = Nakagami("x", mu, omega)
>>> D = density(X)(z)
>>> pprint(D, use_unicode=False)
2
-mu*z
-------
mu -mu 2*mu - 1 omega
2*mu *omega *z *e
----------------------------------
gamma(mu)
>>> simplify(E(X, meijerg=True))
sqrt(mu)*sqrt(omega)*gamma(mu + 1/2)/gamma(mu + 1)
>>> V = simplify(variance(X, meijerg=True))
>>> pprint(V, use_unicode=False)
2
omega*gamma (mu + 1/2)
omega - -----------------------
gamma(mu)*gamma(mu + 1)
References
==========
.. [1] http://en.wikipedia.org/wiki/Nakagami_distribution
"""
return rv(name, NakagamiDistribution, (mu, omega))
#-------------------------------------------------------------------------------
# Normal distribution ----------------------------------------------------------
class NormalDistribution(SingleContinuousDistribution):
_argnames = ('mean', 'std')
@staticmethod
def check(mean, std):
_value_check(std > 0, "Standard deviation must be positive")
def pdf(self, x):
return exp(-(x - self.mean)**2 / (2*self.std**2)) / (sqrt(2*pi)*self.std)
def sample(self):
return random.normalvariate(self.mean, self.std)
def Normal(name, mean, std):
r"""
Create a continuous random variable with a Normal distribution.
The density of the Normal distribution is given by
.. math::
f(x) := \frac{1}{\sigma\sqrt{2\pi}} e^{ -\frac{(x-\mu)^2}{2\sigma^2} }
Parameters
==========
mu : Real number, the mean
sigma : Real number, :math:`\sigma^2 > 0` the variance
Returns
=======
A RandomSymbol.
Examples
========
>>> from sympy.stats import Normal, density, E, std, cdf, skewness
>>> from sympy import Symbol, simplify, pprint, factor, together, factor_terms
>>> mu = Symbol("mu")
>>> sigma = Symbol("sigma", positive=True)
>>> z = Symbol("z")
>>> X = Normal("x", mu, sigma)
>>> density(X)(z)
sqrt(2)*exp(-(-mu + z)**2/(2*sigma**2))/(2*sqrt(pi)*sigma)
>>> C = simplify(cdf(X))(z) # it needs a little more help...
>>> pprint(C, use_unicode=False)
/ ___ \
|\/ 2 *(-mu + z)|
erf|---------------|
\ 2*sigma / 1
-------------------- + -
2 2
>>> simplify(skewness(X))
0
>>> X = Normal("x", 0, 1) # Mean 0, standard deviation 1
>>> density(X)(z)
sqrt(2)*exp(-z**2/2)/(2*sqrt(pi))
>>> E(2*X + 1)
1
>>> simplify(std(2*X + 1))
2
References
==========
.. [1] http://en.wikipedia.org/wiki/Normal_distribution
.. [2] http://mathworld.wolfram.com/NormalDistributionFunction.html
"""
return rv(name, NormalDistribution, (mean, std))
#-------------------------------------------------------------------------------
# Pareto distribution ----------------------------------------------------------
class ParetoDistribution(SingleContinuousDistribution):
_argnames = ('xm', 'alpha')
@property
def set(self):
return Interval(self.xm, oo)
@staticmethod
def check(xm, alpha):
_value_check(xm > 0, "Xm must be positive")
_value_check(alpha > 0, "Alpha must be positive")
def pdf(self, x):
xm, alpha = self.xm, self.alpha
return alpha * xm**alpha / x**(alpha + 1)
def sample(self):
return random.paretovariate(self.alpha)
def Pareto(name, xm, alpha):
r"""
Create a continuous random variable with the Pareto distribution.
The density of the Pareto distribution is given by
.. math::
f(x) := \frac{\alpha\,x_m^\alpha}{x^{\alpha+1}}
with :math:`x \in [x_m,\infty]`.
Parameters
==========
xm : Real number, `x_m > 0`, a scale
alpha : Real number, `\alpha > 0`, a shape
Returns
=======
A RandomSymbol.
Examples
========
>>> from sympy.stats import Pareto, density
>>> from sympy import Symbol
>>> xm = Symbol("xm", positive=True)
>>> beta = Symbol("beta", positive=True)
>>> z = Symbol("z")
>>> X = Pareto("x", xm, beta)
>>> density(X)(z)
beta*xm**beta*z**(-beta - 1)
References
==========
.. [1] http://en.wikipedia.org/wiki/Pareto_distribution
.. [2] http://mathworld.wolfram.com/ParetoDistribution.html
"""
return rv(name, ParetoDistribution, (xm, alpha))
#-------------------------------------------------------------------------------
# QuadraticU distribution ------------------------------------------------------
class QuadraticUDistribution(SingleContinuousDistribution):
_argnames = ('a', 'b')
@property
def set(self):
return Interval(self.a, self.b)
def pdf(self, x):
a, b = self.a, self.b
alpha = 12 / (b-a)**3
beta = (a+b) / 2
return Piecewise(
(alpha * (x-beta)**2, And(a<=x, x<=b)),
(S.Zero, True))
def QuadraticU(name, a, b):
r"""
Create a Continuous Random Variable with a U-quadratic distribution.
The density of the U-quadratic distribution is given by
.. math::
f(x) := \alpha (x-\beta)^2
with :math:`x \in [a,b]`.
Parameters
==========
a : Real number
b : Real number, :math:`a < b`
Returns
=======
A RandomSymbol.
Examples
========
>>> from sympy.stats import QuadraticU, density, E, variance
>>> from sympy import Symbol, simplify, factor, pprint
>>> a = Symbol("a", real=True)
>>> b = Symbol("b", real=True)
>>> z = Symbol("z")
>>> X = QuadraticU("x", a, b)
>>> D = density(X)(z)
>>> pprint(D, use_unicode=False)
/ 2
| / a b \
|12*|- - - - + z|
| \ 2 2 /
<----------------- for And(a <= z, z <= b)
| 3
| (-a + b)
|
\ 0 otherwise
References
==========
.. [1] http://en.wikipedia.org/wiki/U-quadratic_distribution
"""
return rv(name, QuadraticUDistribution, (a, b))
#-------------------------------------------------------------------------------
# RaisedCosine distribution ----------------------------------------------------
class RaisedCosineDistribution(SingleContinuousDistribution):
_argnames = ('mu', 's')
@property
def set(self):
return Interval(self.mu - self.s, self.mu + self.s)
@staticmethod
def check(mu, s):
_value_check(s > 0, "s must be positive")
def pdf(self, x):
mu, s = self.mu, self.s
return Piecewise(
((1+cos(pi*(x-mu)/s)) / (2*s), And(mu-s<=x, x<=mu+s)),
(S.Zero, True))
def RaisedCosine(name, mu, s):
r"""
Create a Continuous Random Variable with a raised cosine distribution.
The density of the raised cosine distribution is given by
.. math::
f(x) := \frac{1}{2s}\left(1+\cos\left(\frac{x-\mu}{s}\pi\right)\right)
with :math:`x \in [\mu-s,\mu+s]`.
Parameters
==========
mu : Real number
s : Real number, `s > 0`
Returns
=======
A RandomSymbol.
Examples
========
>>> from sympy.stats import RaisedCosine, density, E, variance
>>> from sympy import Symbol, simplify, pprint
>>> mu = Symbol("mu", real=True)
>>> s = Symbol("s", positive=True)
>>> z = Symbol("z")
>>> X = RaisedCosine("x", mu, s)
>>> D = density(X)(z)
>>> pprint(D, use_unicode=False)
/ /pi*(-mu + z)\
|cos|------------| + 1
| \ s /
<--------------------- for And(z <= mu + s, mu - s <= z)
| 2*s
|
\ 0 otherwise
References
==========
.. [1] http://en.wikipedia.org/wiki/Raised_cosine_distribution
"""
return rv(name, RaisedCosineDistribution, (mu, s))
#-------------------------------------------------------------------------------
# Rayleigh distribution --------------------------------------------------------
class RayleighDistribution(SingleContinuousDistribution):
_argnames = ('sigma',)
set = Interval(0, oo)
def pdf(self, x):
sigma = self.sigma
return x/sigma**2*exp(-x**2/(2*sigma**2))
def Rayleigh(name, sigma):
r"""
Create a continuous random variable with a Rayleigh distribution.
The density of the Rayleigh distribution is given by
.. math ::
f(x) := \frac{x}{\sigma^2} e^{-x^2/2\sigma^2}
with :math:`x > 0`.
Parameters
==========
sigma : Real number, `\sigma > 0`
Returns
=======
A RandomSymbol.
Examples
========
>>> from sympy.stats import Rayleigh, density, E, variance
>>> from sympy import Symbol, simplify
>>> sigma = Symbol("sigma", positive=True)
>>> z = Symbol("z")
>>> X = Rayleigh("x", sigma)
>>> density(X)(z)
z*exp(-z**2/(2*sigma**2))/sigma**2
>>> E(X)
sqrt(2)*sqrt(pi)*sigma/2
>>> variance(X)
-pi*sigma**2/2 + 2*sigma**2
References
==========
.. [1] http://en.wikipedia.org/wiki/Rayleigh_distribution
.. [2] http://mathworld.wolfram.com/RayleighDistribution.html
"""
return rv(name, RayleighDistribution, (sigma, ))
#-------------------------------------------------------------------------------
# StudentT distribution --------------------------------------------------------
class StudentTDistribution(SingleContinuousDistribution):
_argnames = ('nu',)
def pdf(self, x):
nu = self.nu
return 1/(sqrt(nu)*beta_fn(S(1)/2, nu/2))*(1 + x**2/nu)**(-(nu + 1)/2)
def StudentT(name, nu):
r"""
Create a continuous random variable with a student's t distribution.
The density of the student's t distribution is given by
.. math::
f(x) := \frac{\Gamma \left(\frac{\nu+1}{2} \right)}
{\sqrt{\nu\pi}\Gamma \left(\frac{\nu}{2} \right)}
\left(1+\frac{x^2}{\nu} \right)^{-\frac{\nu+1}{2}}
Parameters
==========
nu : Real number, `\nu > 0`, the degrees of freedom
Returns
=======
A RandomSymbol.
Examples
========
>>> from sympy.stats import StudentT, density, E, variance
>>> from sympy import Symbol, simplify, pprint
>>> nu = Symbol("nu", positive=True)
>>> z = Symbol("z")
>>> X = StudentT("x", nu)
>>> D = density(X)(z)
>>> pprint(D, use_unicode=False)
nu 1
- -- - -
2 2
/ 2\
| z |
|1 + --|
\ nu/
--------------------
____ / nu\
\/ nu *beta|1/2, --|
\ 2 /
References
==========
.. [1] http://en.wikipedia.org/wiki/Student_t-distribution
.. [2] http://mathworld.wolfram.com/Studentst-Distribution.html
"""
return rv(name, StudentTDistribution, (nu, ))
#-------------------------------------------------------------------------------
# Triangular distribution ------------------------------------------------------
class TriangularDistribution(SingleContinuousDistribution):
_argnames = ('a', 'b', 'c')
def pdf(self, x):
a, b, c = self.a, self.b, self.c
return Piecewise(
(2*(x - a)/((b - a)*(c - a)), And(a <= x, x < c)),
(2/(b - a), Eq(x, c)),
(2*(b - x)/((b - a)*(b - c)), And(c < x, x <= b)),
(S.Zero, True))
def Triangular(name, a, b, c):
r"""
Create a continuous random variable with a triangular distribution.
The density of the triangular distribution is given by
.. math::
f(x) := \begin{cases}
0 & \mathrm{for\ } x < a, \\
\frac{2(x-a)}{(b-a)(c-a)} & \mathrm{for\ } a \le x < c, \\
\frac{2}{b-a} & \mathrm{for\ } x = c, \\
\frac{2(b-x)}{(b-a)(b-c)} & \mathrm{for\ } c < x \le b, \\
0 & \mathrm{for\ } b < x.
\end{cases}
Parameters
==========
a : Real number, :math:`a \in \left(-\infty, \infty\right)`
b : Real number, :math:`a < b`
c : Real number, :math:`a \leq c \leq b`
Returns
=======
A RandomSymbol.
Examples
========
>>> from sympy.stats import Triangular, density, E
>>> from sympy import Symbol, pprint
>>> a = Symbol("a")
>>> b = Symbol("b")
>>> c = Symbol("c")
>>> z = Symbol("z")
>>> X = Triangular("x", a,b,c)
>>> pprint(density(X)(z), use_unicode=False)
/ -2*a + 2*z
|----------------- for And(a <= z, z < c)
|(-a + b)*(-a + c)
|
| 2
| ------ for z = c
< -a + b
|
| 2*b - 2*z
|---------------- for And(z <= b, c < z)
|(-a + b)*(b - c)
|
\ 0 otherwise
References
==========
.. [1] http://en.wikipedia.org/wiki/Triangular_distribution
.. [2] http://mathworld.wolfram.com/TriangularDistribution.html
"""
return rv(name, TriangularDistribution, (a, b, c))
#-------------------------------------------------------------------------------
# Uniform distribution ---------------------------------------------------------
class UniformDistribution(SingleContinuousDistribution):
_argnames = ('left', 'right')
def pdf(self, x):
left, right = self.left, self.right
return Piecewise(
(S.One/(right - left), And(left <= x, x <= right)),
(S.Zero, True))
def compute_cdf(self, **kwargs):
from sympy import Lambda, Min
z = Dummy('z', real=True, finite=True)
result = SingleContinuousDistribution.compute_cdf(self, **kwargs)(z)
reps = {
Min(z, self.right): z,
Min(z, self.left, self.right): self.left,
Min(z, self.left): self.left}
result = result.subs(reps)
return Lambda(z, result)
def expectation(self, expr, var, **kwargs):
from sympy import Max, Min
kwargs['evaluate'] = True
result = SingleContinuousDistribution.expectation(self, expr, var, **kwargs)
result = result.subs({Max(self.left, self.right): self.right,
Min(self.left, self.right): self.left})
return result
def sample(self):
return random.uniform(self.left, self.right)
def Uniform(name, left, right):
r"""
Create a continuous random variable with a uniform distribution.
The density of the uniform distribution is given by
.. math::
f(x) := \begin{cases}
\frac{1}{b - a} & \text{for } x \in [a,b] \\
0 & \text{otherwise}
\end{cases}
with :math:`x \in [a,b]`.
Parameters
==========
a : Real number, :math:`-\infty < a` the left boundary
b : Real number, :math:`a < b < \infty` the right boundary
Returns
=======
A RandomSymbol.
Examples
========
>>> from sympy.stats import Uniform, density, cdf, E, variance, skewness
>>> from sympy import Symbol, simplify
>>> a = Symbol("a", negative=True)
>>> b = Symbol("b", positive=True)
>>> z = Symbol("z")
>>> X = Uniform("x", a, b)
>>> density(X)(z)
Piecewise((1/(-a + b), And(a <= z, z <= b)), (0, True))
>>> cdf(X)(z) # doctest: +SKIP
-a/(-a + b) + z/(-a + b)
>>> simplify(E(X))
a/2 + b/2
>>> simplify(variance(X))
a**2/12 - a*b/6 + b**2/12
References
==========
.. [1] http://en.wikipedia.org/wiki/Uniform_distribution_%28continuous%29
.. [2] http://mathworld.wolfram.com/UniformDistribution.html
"""
return rv(name, UniformDistribution, (left, right))
#-------------------------------------------------------------------------------
# UniformSum distribution ------------------------------------------------------
class UniformSumDistribution(SingleContinuousDistribution):
_argnames = ('n',)
@property
def set(self):
return Interval(0, self.n)
def pdf(self, x):
n = self.n
k = Dummy("k")
return 1/factorial(
n - 1)*Sum((-1)**k*binomial(n, k)*(x - k)**(n - 1), (k, 0, floor(x)))
def UniformSum(name, n):
r"""
Create a continuous random variable with an Irwin-Hall distribution.
The probability distribution function depends on a single parameter
`n` which is an integer.
The density of the Irwin-Hall distribution is given by
.. math ::
f(x) := \frac{1}{(n-1)!}\sum_{k=0}^{\lfloor x\rfloor}(-1)^k
\binom{n}{k}(x-k)^{n-1}
Parameters
==========
n : A positive Integer, `n > 0`
Returns
=======
A RandomSymbol.
Examples
========
>>> from sympy.stats import UniformSum, density
>>> from sympy import Symbol, pprint
>>> n = Symbol("n", integer=True)
>>> z = Symbol("z")
>>> X = UniformSum("x", n)
>>> D = density(X)(z)
>>> pprint(D, use_unicode=False)
floor(z)
___
\ `
\ k n - 1 /n\
) (-1) *(-k + z) *| |
/ \k/
/__,
k = 0
--------------------------------
(n - 1)!
References
==========
.. [1] http://en.wikipedia.org/wiki/Uniform_sum_distribution
.. [2] http://mathworld.wolfram.com/UniformSumDistribution.html
"""
return rv(name, UniformSumDistribution, (n, ))
#-------------------------------------------------------------------------------
# VonMises distribution --------------------------------------------------------
class VonMisesDistribution(SingleContinuousDistribution):
_argnames = ('mu', 'k')
set = Interval(0, 2*pi)
@staticmethod
def check(mu, k):
_value_check(k > 0, "k must be positive")
def pdf(self, x):
mu, k = self.mu, self.k
return exp(k*cos(x-mu)) / (2*pi*besseli(0, k))
def VonMises(name, mu, k):
r"""
Create a Continuous Random Variable with a von Mises distribution.
The density of the von Mises distribution is given by
.. math::
f(x) := \frac{e^{\kappa\cos(x-\mu)}}{2\pi I_0(\kappa)}
with :math:`x \in [0,2\pi]`.
Parameters
==========
mu : Real number, measure of location
k : Real number, measure of concentration
Returns
=======
A RandomSymbol.
Examples
========
>>> from sympy.stats import VonMises, density, E, variance
>>> from sympy import Symbol, simplify, pprint
>>> mu = Symbol("mu")
>>> k = Symbol("k", positive=True)
>>> z = Symbol("z")
>>> X = VonMises("x", mu, k)
>>> D = density(X)(z)
>>> pprint(D, use_unicode=False)
k*cos(mu - z)
e
------------------
2*pi*besseli(0, k)
References
==========
.. [1] http://en.wikipedia.org/wiki/Von_Mises_distribution
.. [2] http://mathworld.wolfram.com/vonMisesDistribution.html
"""
return rv(name, VonMisesDistribution, (mu, k))
#-------------------------------------------------------------------------------
# Weibull distribution ---------------------------------------------------------
class WeibullDistribution(SingleContinuousDistribution):
_argnames = ('alpha', 'beta')
set = Interval(0, oo)
@staticmethod
def check(alpha, beta):
_value_check(alpha > 0, "Alpha must be positive")
_value_check(beta > 0, "Beta must be positive")
def pdf(self, x):
alpha, beta = self.alpha, self.beta
return beta * (x/alpha)**(beta - 1) * exp(-(x/alpha)**beta) / alpha
def sample(self):
return random.weibullvariate(self.alpha, self.beta)
def Weibull(name, alpha, beta):
r"""
Create a continuous random variable with a Weibull distribution.
The density of the Weibull distribution is given by
.. math::
f(x) := \begin{cases}
\frac{k}{\lambda}\left(\frac{x}{\lambda}\right)^{k-1}
e^{-(x/\lambda)^{k}} & x\geq0\\
0 & x<0
\end{cases}
Parameters
==========
lambda : Real number, :math:`\lambda > 0` a scale
k : Real number, `k > 0` a shape
Returns
=======
A RandomSymbol.
Examples
========
>>> from sympy.stats import Weibull, density, E, variance
>>> from sympy import Symbol, simplify
>>> l = Symbol("lambda", positive=True)
>>> k = Symbol("k", positive=True)
>>> z = Symbol("z")
>>> X = Weibull("x", l, k)
>>> density(X)(z)
k*(z/lambda)**(k - 1)*exp(-(z/lambda)**k)/lambda
>>> simplify(E(X))
lambda*gamma(1 + 1/k)
>>> simplify(variance(X))
lambda**2*(-gamma(1 + 1/k)**2 + gamma(1 + 2/k))
References
==========
.. [1] http://en.wikipedia.org/wiki/Weibull_distribution
.. [2] http://mathworld.wolfram.com/WeibullDistribution.html
"""
return rv(name, WeibullDistribution, (alpha, beta))
#-------------------------------------------------------------------------------
# Wigner semicircle distribution -----------------------------------------------
class WignerSemicircleDistribution(SingleContinuousDistribution):
_argnames = ('R',)
@property
def set(self):
return Interval(-self.R, self.R)
def pdf(self, x):
R = self.R
return 2/(pi*R**2)*sqrt(R**2 - x**2)
def WignerSemicircle(name, R):
r"""
Create a continuous random variable with a Wigner semicircle distribution.
The density of the Wigner semicircle distribution is given by
.. math::
f(x) := \frac2{\pi R^2}\,\sqrt{R^2-x^2}
with :math:`x \in [-R,R]`.
Parameters
==========
R : Real number, `R > 0`, the radius
Returns
=======
A `RandomSymbol`.
Examples
========
>>> from sympy.stats import WignerSemicircle, density, E
>>> from sympy import Symbol, simplify
>>> R = Symbol("R", positive=True)
>>> z = Symbol("z")
>>> X = WignerSemicircle("x", R)
>>> density(X)(z)
2*sqrt(R**2 - z**2)/(pi*R**2)
>>> E(X)
0
References
==========
.. [1] http://en.wikipedia.org/wiki/Wigner_semicircle_distribution
.. [2] http://mathworld.wolfram.com/WignersSemicircleLaw.html
"""
return rv(name, WignerSemicircleDistribution, (R,))
|
{
"content_hash": "eca696508014d0eefacc8458beea8c70",
"timestamp": "",
"source": "github",
"line_count": 2577,
"max_line_length": 90,
"avg_line_length": 24.063639891346526,
"alnum_prop": 0.48753467070889506,
"repo_name": "Davidjohnwilson/sympy",
"id": "cf237990e267150c18f9048d078fbef0fd110c8e",
"size": "62012",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "sympy/stats/crv_types.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "13990612"
},
{
"name": "Ruby",
"bytes": "304"
},
{
"name": "Scheme",
"bytes": "125"
},
{
"name": "Shell",
"bytes": "4008"
},
{
"name": "Tcl",
"bytes": "1048"
},
{
"name": "XSLT",
"bytes": "366202"
}
],
"symlink_target": ""
}
|
"""
RACKSPACE API KEY EXTENSION
This WSGI component
- detects calls with extensions in them.
- processes the necessary components
"""
import json
import logging
from lxml import etree
import os
from webob.exc import Request, Response
from keystone import utils
EXTENSION_ALIAS = "RAX-KSKEY-admin"
logger = logging.getLogger(__name__) # pylint: disable=C0103
class FrontEndFilter(object):
"""API Key Middleware that handles authentication with API Key"""
def __init__(self, app, conf):
""" Common initialization code """
logger.info(_("Starting the %s extension" %
EXTENSION_ALIAS))
self.conf = conf
self.app = app
def __call__(self, env, start_response):
""" Handle incoming request. Transform. And send downstream. """
request = Request(env)
if request.path == "/extensions":
if env['KEYSTONE_API_VERSION'] == '2.0':
request = Request(env)
response = request.get_response(self.app)
if response.status_int == 200:
if response.content_type == 'application/json':
#load json for this extension from file
thisextension = open(os.path.join(
os.path.dirname(__file__),
"extension.json")).read()
thisextensionjson = json.loads(thisextension)
#load json in response
body = json.loads(response.body)
extensionsarray = body["extensions"]["values"]
#add this extension and return the response
extensionsarray.append(thisextensionjson)
newresp = Response(
content_type='application/json',
body=json.dumps(body))
return newresp(env, start_response)
elif response.content_type == 'application/xml':
#load xml for this extension from file
thisextensionxml = etree.parse(os.path.join(
os.path.dirname(__file__),
"extension.xml")).getroot()
#load xml being returned in response
body = etree.fromstring(response.body)
#add this extension and return the response
body.append(thisextensionxml)
newresp = Response(
content_type='application/xml',
body=etree.tostring(body))
return newresp(env, start_response)
# return the response
return response(env, start_response)
#default action, bypass
return self.app(env, start_response)
def filter_factory(global_conf, **local_conf):
"""Returns a WSGI filter app for use with paste.deploy."""
conf = global_conf.copy()
conf.update(local_conf)
def ext_filter(app):
"""Closure to return"""
return FrontEndFilter(app, conf)
return ext_filter
|
{
"content_hash": "6a16f99305ac24d11a6f8cd4ce5a6b0d",
"timestamp": "",
"source": "github",
"line_count": 87,
"max_line_length": 78,
"avg_line_length": 38.10344827586207,
"alnum_prop": 0.5182503770739065,
"repo_name": "admiyo/keystone",
"id": "4f3f0796d6418c41cf795a130c6ae4072d99dbd5",
"size": "3973",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "keystone/contrib/extensions/admin/raxkey/frontend.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "32945"
},
{
"name": "JavaScript",
"bytes": "67937"
},
{
"name": "Python",
"bytes": "1339048"
},
{
"name": "Shell",
"bytes": "7400"
},
{
"name": "XSLT",
"bytes": "52086"
}
],
"symlink_target": ""
}
|
"""
Landlab component for overland flow using the kinematic-wave approximation.
Created on Fri May 27 14:26:13 2016
@author: gtucker
"""
from landlab import Component
import numpy as np
class KinwaveOverlandFlowModel(Component):
"""
Calculate water flow over topography.
Landlab component that implements a two-dimensional
kinematic wave model. This is an extremely simple, unsophisticated
model, originally built simply to demonstrate the component creation
process. Limitations to the present version include: infiltration is
handled very crudely, the called is responsible for picking a stable
time step size (no adaptive time stepping is used in the `run_one_step`
method), precipitation rate is constant for a given duration (then zero),
and all parameters are uniform in space. Also, the terrain is assumed
to be stable over time. Caveat emptor!
Construction:
KinwaveOverlandFlowModel(grid, precip_rate=1.0,
precip_duration=1.0,
infilt_rate=0.0,
roughness=0.01, **kwds)
Parameters
----------
grid : ModelGrid
A Landlab grid object.
precip_rate : float, optional (defaults to 1 mm/hr)
Precipitation rate, mm/hr
precip_duration : float, optional (defaults to 1 hour)
Duration of precipitation, hours
infilt_rate : float, optional (defaults to 0)
Maximum rate of infiltration, mm/hr
roughnes : float, defaults to 0.01
Manning roughness coefficient, s/m^1/3
Examples
--------
>>> from landlab import RasterModelGrid
>>> rg = RasterModelGrid((4, 5), 10.0)
>>> kw = KinwaveOverlandFlowModel(rg)
>>> kw.vel_coef
100.0
>>> rg.at_node['water__depth']
array([ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0.])
"""
_name = 'KinwaveOverlandFlowModel'
_input_var_names = (
'topographic__elevation',
'topographic__gradient',
)
_output_var_names = (
'water__depth',
'water__velocity',
'water__specific_discharge',
)
_var_units = {
'topographic__elevation' : 'm',
'topographic__slope' : 'm/m',
'water__depth' : 'm',
'water__velocity' : 'm/s',
'water__specific_discharge' : 'm2/s',
}
_var_mapping = {
'topographic__elevation' : 'node',
'topographic__gradient' : 'link',
'water__depth' : 'node',
'water__velocity' : 'link',
'water__specific_discharge' : 'link',
}
_var_doc = {
'topographic__elevation':
'elevation of the ground surface relative to some datum',
'topographic__gradient':
'gradient of the ground surface',
'water__depth':
'depth of water',
'water__velocity':
'flow velocity component in the direction of the link',
'water__specific_discharge':
'flow discharge component in the direction of the link',
}
def __init__(self, grid, precip_rate=1.0, precip_duration=1.0,
infilt_rate=0.0, roughness=0.01, **kwds):
"""Initialize the KinwaveOverlandFlowModel.
Parameters
----------
grid : ModelGrid
Landlab ModelGrid object
precip_rate : float, optional (defaults to 1 mm/hr)
Precipitation rate, mm/hr
precip_duration : float, optional (defaults to 1 hour)
Duration of precipitation, hours
infilt_rate : float, optional (defaults to 0)
Maximum rate of infiltration, mm/hr
roughnes : float, defaults to 0.01
Manning roughness coefficient, s/m^1/3
"""
# Store grid and parameters and do unit conversion
self._grid = grid
self.precip = precip_rate / 3600000.0 # convert to m/s
self.precip_duration = precip_duration * 3600.0 # h->s
self.infilt = infilt_rate / 3600000.0 # convert to m/s
self.vel_coef = 1.0 / roughness # do division now to save time
# Create fields...
# Elevation
if 'topographic__elevation' in grid.at_node:
self.elev = grid.at_node['topographic__elevation']
else:
self.elev = grid.add_zeros('node',
'topographic__elevation')
# Water depth
if 'water__depth' in grid.at_node:
self.depth = grid.at_node['water__depth']
else:
self.depth = grid.add_zeros('node', 'water__depth')
# Slope
if 'topographic__gradient' in grid.at_link:
self.slope = grid.at_link['topographic__gradient']
else:
self.slope = grid.add_zeros('link', 'topographic__gradient')
# Velocity
if 'water__velocity' in grid.at_link:
self.vel = grid.at_link['water__velocity']
else:
self.vel = grid.add_zeros('link', 'water__velocity')
# Discharge
if 'water__specific_discharge' in grid.at_link:
self.disch = grid.at_link['water__specific_discharge']
else:
self.disch = grid.add_zeros('link',
'water__specific_discharge')
# Calculate the ground-surface slope (assume it won't change)
self.slope[self._grid.active_links] = \
self._grid.calc_grad_at_link(self.elev)[self._grid.active_links]
self.sqrt_slope = np.sqrt( self.slope )
self.sign_slope = np.sign( self.slope )
def run_one_step(self, dt, current_time=0.0, **kwds):
"""Calculate water flow for a time period `dt`.
"""
# Calculate water depth at links. This implements an "upwind" scheme
# in which water depth at the links is the depth at the higher of the
# two nodes.
H_link = self._grid.map_value_at_max_node_to_link(
'topographic__elevation', 'water__depth')
# Calculate velocity using the Manning equation.
self.vel = -self.sign_slope * self.vel_coef * H_link**0.66667 \
* self.sqrt_slope
# Calculate discharge
self.disch = H_link * self.vel
# Flux divergence
dqda = self._grid.calc_flux_div_at_node(self.disch)
# Rate of change of water depth
if current_time < self.precip_duration:
ppt = self.precip
else:
ppt = 0.0
dHdt = ppt - self.infilt - dqda
# Update water depth: simple forward Euler scheme
self.depth[self._grid.core_nodes] += dHdt[self._grid.core_nodes] * dt
# Very crude numerical hack: prevent negative water depth
self.depth[np.where(self.depth < 0.0)[0]] = 0.0
if __name__ == '__main__':
import doctest
doctest.testmod()
|
{
"content_hash": "64315f8aa2115d1ea4457cace122c40e",
"timestamp": "",
"source": "github",
"line_count": 198,
"max_line_length": 77,
"avg_line_length": 34.964646464646464,
"alnum_prop": 0.5750397226635852,
"repo_name": "SiccarPoint/landlab",
"id": "eb3c1e341c0eb28ecde4f205c302f4c6bd96193a",
"size": "6947",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "landlab/components/overland_flow/generate_overland_flow_kinwave.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1452"
},
{
"name": "PowerShell",
"bytes": "7128"
},
{
"name": "Python",
"bytes": "2619353"
},
{
"name": "Shell",
"bytes": "3132"
}
],
"symlink_target": ""
}
|
"""
This file demonstrates writing tests using the unittest module. These will pass
when you run "manage.py test".
Replace this with more appropriate tests for your application.
"""
from django.test import TestCase
from django.contrib.auth.models import User
from models import Candidate, CandidateList
class CreationTest(TestCase):
def setUp(self):
self.linus = User.objects.create(username='Linus')
self.guido = User.objects.create(username='Guido')
self.jacob = User.objects.create(username='Jacob')
def test_candidatelist(self):
"""
Tests the creation of candidateList and it's basic methods
"""
cl1 = CandidateList.objects.create(name="Imagine", ballot="I")
c = Candidate.objects.create(candidate_list=cl1, user=self.jacob, ordinal=1)
self.assertFalse(cl1.get_candidates())
c.status = 'V'
c.save()
self.assertEquals(cl1.get_candidates().count(), 1)
c.status = 'X'
c.save()
self.assertFalse(cl1.get_candidates())
cl1.delete()
def teardown(self):
for u in self.users: u.delete()
|
{
"content_hash": "4c4e63f1cf0769f00b63887ca8a55d93",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 84,
"avg_line_length": 30.756756756756758,
"alnum_prop": 0.6599297012302284,
"repo_name": "hasadna/open-shot",
"id": "21a29b994112fe5dfe09bdfbf403effc0c769816",
"size": "1138",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "polyorg/tests.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "40636"
},
{
"name": "JavaScript",
"bytes": "10528"
},
{
"name": "Python",
"bytes": "586576"
},
{
"name": "Shell",
"bytes": "3784"
}
],
"symlink_target": ""
}
|
import _plotly_utils.basevalidators
class ColorscaleValidator(_plotly_utils.basevalidators.ColorscaleValidator):
def __init__(
self, plotly_name="colorscale", parent_name="scattergl.marker", **kwargs
):
super(ColorscaleValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
implied_edits=kwargs.pop("implied_edits", {"autocolorscale": False}),
role=kwargs.pop("role", "style"),
**kwargs
)
|
{
"content_hash": "cd8f6ac9360d30a8059d06f4be48ab50",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 81,
"avg_line_length": 37.333333333333336,
"alnum_prop": 0.6107142857142858,
"repo_name": "plotly/python-api",
"id": "6d262eec0bd53e2ddc5331c2aca9e383e1fc80d9",
"size": "560",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/scattergl/marker/_colorscale.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "6870"
},
{
"name": "Makefile",
"bytes": "1708"
},
{
"name": "Python",
"bytes": "823245"
},
{
"name": "Shell",
"bytes": "3238"
}
],
"symlink_target": ""
}
|
from django.db.transaction import non_atomic_requests
from django.utils.translation import ugettext
import caching.base as caching
from olympia import amo
from olympia.amo.helpers import url, absolutify
from olympia.amo.feeds import NonAtomicFeed
from olympia.amo.utils import render
from .models import AppVersion
def get_versions(order=('application', 'version_int')):
def f():
apps = amo.APP_USAGE
versions = dict((app.id, []) for app in apps)
qs = list(AppVersion.objects.order_by(*order)
.filter(application__in=versions)
.values_list('application', 'version'))
for app, version in qs:
versions[app].append(version)
return apps, versions
return caching.cached(f, 'getv' + ''.join(order))
@non_atomic_requests
def appversions(request):
apps, versions = get_versions()
return render(request, 'applications/appversions.html',
dict(apps=apps, versions=versions))
class AppversionsFeed(NonAtomicFeed):
# appversions aren't getting a created date so the sorting is kind of
# wanky. I blame fligtar.
def title(self):
return ugettext(u'Application Versions')
def link(self):
return absolutify(url('apps.appversions'))
def description(self):
return ugettext(u'Acceptable versions for all applications on AMO.')
def items(self):
apps, versions = get_versions(order=('application', '-version_int'))
return [(app, version) for app in apps
for version in versions[app.id][:3]]
return [(app, versions[app.id][:3]) for app in apps]
def item_title(self, item):
app, version = item
return u'%s %s' % (app.pretty, version)
item_description = ''
def item_link(self):
return self.link()
def item_guid(self, item):
return self.item_link() + '%s:%s' % item
|
{
"content_hash": "cbd747abb4de929a059cd011d8b4a037",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 76,
"avg_line_length": 30.41269841269841,
"alnum_prop": 0.6466597077244259,
"repo_name": "harikishen/addons-server",
"id": "749293252c283428efeefe2c5a4518631e5f5c71",
"size": "1916",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/olympia/applications/views.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "822508"
},
{
"name": "HTML",
"bytes": "698554"
},
{
"name": "JavaScript",
"bytes": "1087360"
},
{
"name": "Makefile",
"bytes": "811"
},
{
"name": "PLSQL",
"bytes": "990"
},
{
"name": "PLpgSQL",
"bytes": "2381"
},
{
"name": "Python",
"bytes": "4560536"
},
{
"name": "SQLPL",
"bytes": "559"
},
{
"name": "Shell",
"bytes": "7564"
},
{
"name": "Smarty",
"bytes": "1859"
}
],
"symlink_target": ""
}
|
"""Tests for tf.data service ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import time
from absl.testing import parameterized
from tensorflow.python.data.experimental.kernel_tests import data_service_test_base
from tensorflow.python.data.experimental.ops import batching
from tensorflow.python.data.experimental.ops import data_service_ops
from tensorflow.python.data.experimental.ops import distribute_options
from tensorflow.python.data.experimental.ops import grouping
from tensorflow.python.data.experimental.ops import testing
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.eager import def_function
from tensorflow.python.framework import combinations
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_spec
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import lookup_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.platform import test
TMP_WORK_DIR = data_service_test_base.TMP_WORK_DIR
NO_WORK_DIR = data_service_test_base.NO_WORK_DIR
class DataServiceOpsTest(data_service_test_base.TestBase,
parameterized.TestCase):
@combinations.generate(
combinations.times(test_base.default_test_combinations(),
data_service_test_base.all_cluster_configurations()))
def testDistributeBasic(self, work_dir, fault_tolerant_mode):
cluster = data_service_test_base.TestCluster(
num_workers=1,
work_dir=work_dir,
fault_tolerant_mode=fault_tolerant_mode)
num_elements = 10
ds = self.make_distributed_range_dataset(num_elements, cluster)
self.assertDatasetProduces(ds, list(range(num_elements)))
@combinations.generate(
combinations.times(test_base.default_test_combinations(),
combinations.combine(compression=[None, "AUTO"])))
def testDistributeCompression(self, compression):
cluster = data_service_test_base.TestCluster(num_workers=1)
num_elements = 10
ds = self.make_distributed_range_dataset(
num_elements, cluster, compression=compression)
self.assertDatasetProduces(ds, list(range(num_elements)))
@combinations.generate(test_base.default_test_combinations())
def testDistributeInvalidCompression(self):
cluster = data_service_test_base.TestCluster(num_workers=1)
with self.assertRaisesRegex(ValueError, "Invalid compression argument"):
self.make_distributed_range_dataset(10, cluster, compression="foo")
@combinations.generate(test_base.eager_only_combinations())
def testDistributeSparse(self):
cluster = data_service_test_base.TestCluster(num_workers=1)
element = sparse_tensor.SparseTensor(
indices=[[0]],
values=constant_op.constant([0], dtype=dtypes.int32),
dense_shape=[1])
ds = dataset_ops.Dataset.from_tensors(element)
ds = self.make_distributed_dataset(ds, cluster)
results = [sparse_ops.sparse_tensor_to_dense(elem) for elem in ds]
self.assertAllEqual(results, [[0]])
@combinations.generate(test_base.eager_only_combinations())
def testDistributeRagged(self):
cluster = data_service_test_base.TestCluster(num_workers=1)
ds = dataset_ops.Dataset.from_tensor_slices([1, 5, 3, 2, 8])
ds = ds.map(math_ops.range)
ds = ds.apply(batching.dense_to_ragged_batch(2))
ds = self.make_distributed_dataset(ds, cluster)
results = [elem.to_tensor() for elem in ds]
self.assertAllEqual(results[0], [[0, 0, 0, 0, 0], [0, 1, 2, 3, 4]])
self.assertAllEqual(results[1], [[0, 1, 2], [0, 1, 0]])
self.assertAllEqual(results[2], [[0, 1, 2, 3, 4, 5, 6, 7]])
@combinations.generate(
combinations.times(test_base.default_test_combinations(),
combinations.combine(init_from_file=[True, False])))
def testDistributeLookupTable(self, init_from_file):
cluster = data_service_test_base.TestCluster(num_workers=1)
if init_from_file:
file = os.path.join(self.get_temp_dir(), "distribute_lookup_table")
with open(file, "w") as f:
f.write("10\n11\n")
initializer = lookup_ops.TextFileInitializer(
file, dtypes.int64, lookup_ops.TextFileIndex.LINE_NUMBER,
dtypes.int64, lookup_ops.TextFileIndex.WHOLE_LINE)
else:
keys_tensor = constant_op.constant([0, 1], dtype=dtypes.int64)
vals_tensor = constant_op.constant([10, 11])
initializer = lookup_ops.KeyValueTensorInitializer(
keys_tensor, vals_tensor)
table = lookup_ops.StaticHashTable(initializer, -1)
ds = dataset_ops.Dataset.range(3)
ds = ds.map(table.lookup)
ds = self.make_distributed_dataset(ds, cluster)
self.evaluate(lookup_ops.tables_initializer())
self.assertDatasetProduces(ds, [10, 11, -1], requires_initialization=True)
@combinations.generate(test_base.default_test_combinations())
def testDifferentShuffleOrders(self):
random_seed.set_random_seed(None)
num_elements = 100
cluster = data_service_test_base.TestCluster(num_workers=2)
ds = dataset_ops.Dataset.range(num_elements)
ds = ds.shuffle(num_elements)
ds = self.make_distributed_dataset(ds, cluster)
output = self.getDatasetOutput(ds)
# The output will be two sequences of range(num_elements)
# non-deterministically interleaved together. If the orders of the elements
# were the same, first_order and second_order computed below will be equal.
first_order = {}
second_order = {}
for element in output:
if element in first_order:
second_order[element] = len(second_order)
else:
first_order[element] = len(first_order)
self.assertNotEqual(first_order, second_order)
@combinations.generate(test_base.default_test_combinations())
def testMultipleEpochs(self):
cluster = data_service_test_base.TestCluster(num_workers=1)
num_elements = 3
ds = self.make_distributed_range_dataset(num_elements, cluster)
for _ in range(10):
self.assertDatasetProduces(ds, list(range(num_elements)))
@combinations.generate(test_base.default_test_combinations())
def testRepeatedDataset(self):
cluster = data_service_test_base.TestCluster(num_workers=1)
num_elements = 10
num_repetitions = 5
ds = self.make_distributed_range_dataset(num_elements, cluster)
ds = ds.repeat(num_repetitions)
self.assertDatasetProduces(
ds, expected_output=num_repetitions * list(range(num_elements)))
@combinations.generate(test_base.default_test_combinations())
def testConcurrentEpoch(self):
cluster = data_service_test_base.TestCluster(num_workers=1)
num_elements = 10
num_datasets = 3
get_nexts = []
results = []
for _ in range(num_datasets):
ds = self.make_distributed_range_dataset(num_elements, cluster)
get_nexts.append(self.getNext(ds))
results.append([])
for _ in range(num_elements):
for dataset_ind in range(num_datasets):
result = self.evaluate(get_nexts[dataset_ind]())
results[dataset_ind].append(result)
for result in results:
self.assertEqual(list(range(num_elements)), result)
@combinations.generate(test_base.default_test_combinations())
def testMultiWorker(self):
num_workers = 3
cluster = data_service_test_base.TestCluster(num_workers=num_workers)
num_elements = 10
ds = self.make_distributed_range_dataset(num_elements, cluster)
self.assertDatasetProduces(
ds, num_workers * list(range(num_elements)), assert_items_equal=True)
@combinations.generate(test_base.default_test_combinations())
def testMaxOutstandingRequests(self):
num_workers = 3
cluster = data_service_test_base.TestCluster(num_workers=num_workers)
num_elements = 10
ds = self.make_distributed_range_dataset(
num_elements, cluster, max_outstanding_requests=1)
self.assertDatasetProduces(
ds, num_workers * list(range(num_elements)), assert_items_equal=True)
@combinations.generate(test_base.eager_only_combinations())
def testInsideFunction(self):
num_workers = 3
cluster = data_service_test_base.TestCluster(num_workers=num_workers)
num_elements = 10
@def_function.function
def f():
ds = self.make_distributed_range_dataset(num_elements, cluster)
result = tensor_array_ops.TensorArray(
dtypes.int64, size=num_workers * num_elements, dynamic_size=True)
i = 0
for elem in ds:
result = result.write(i, elem)
i += 1
return result.stack()
result = list(f().numpy())
self.assertCountEqual(num_workers * list(range(num_elements)), result)
@combinations.generate(test_base.default_test_combinations())
def testSharedJobName(self):
cluster = data_service_test_base.TestCluster(num_workers=1)
num_elements = 1000
def make_ds():
return dataset_ops.Dataset.range(num_elements).shuffle(num_elements)
ds1 = self.make_distributed_dataset(make_ds(), cluster, job_name="job_name")
ds2 = self.make_distributed_dataset(make_ds(), cluster, job_name="job_name")
get_next_1 = self.getNext(ds1)
get_next_2 = self.getNext(ds2)
results = []
for _ in range(num_elements // 5):
results.append(self.evaluate(get_next_1()))
results.append(self.evaluate(get_next_2()))
results += self.getIteratorOutput(get_next_1)
results += self.getIteratorOutput(get_next_2)
self.assertCountEqual(list(range(num_elements)), results)
@combinations.generate(test_base.default_test_combinations())
def testDifferentJobNames(self):
cluster = data_service_test_base.TestCluster(num_workers=1)
num_elements = 10
ds1 = self.make_distributed_range_dataset(
num_elements, cluster, job_name="job_name1")
ds2 = self.make_distributed_range_dataset(
num_elements, cluster, job_name="job_name2")
self.assertDatasetProduces(ds1, list(range(num_elements)))
self.assertDatasetProduces(ds2, list(range(num_elements)))
@combinations.generate(test_base.eager_only_combinations())
def testSharedJobNameMultiIteration(self):
cluster = data_service_test_base.TestCluster(num_workers=1)
num_elements = 10
ds1 = self.make_distributed_range_dataset(
num_elements, cluster, job_name="job_name")
ds2 = self.make_distributed_range_dataset(
num_elements, cluster, job_name="job_name")
# iteration 1
self.assertDatasetProduces(ds1, list(range(num_elements)))
self.assertDatasetProduces(ds2, [])
# iteration 2
self.assertDatasetProduces(ds2, list(range(num_elements)))
self.assertDatasetProduces(ds1, [])
@combinations.generate(test_base.default_test_combinations())
def testSharedJobNameRepeat(self):
cluster = data_service_test_base.TestCluster(num_workers=1)
num_elements = 100
num_repetitions = 3
ds1 = self.make_distributed_range_dataset(
num_elements, cluster, job_name="job_name")
ds1 = ds1.repeat(num_repetitions)
ds2 = self.make_distributed_range_dataset(
num_elements, cluster, job_name="job_name")
ds2 = ds2.repeat(num_repetitions)
results = []
get_next_1 = self.getNext(ds1)
get_next_2 = self.getNext(ds2)
for _ in range((num_elements * num_repetitions) // 5):
results.append(self.evaluate(get_next_1()))
for _ in range((num_elements * num_repetitions) // 5):
results.append(self.evaluate(get_next_2()))
results += self.getIteratorOutput(get_next_1)
results += self.getIteratorOutput(get_next_2)
self.assertCountEqual(num_repetitions * list(range(num_elements)), results)
@combinations.generate(
combinations.times(test_base.default_test_combinations()))
def testRoundRobinConsumerRestart(self):
cluster = data_service_test_base.TestCluster(num_workers=1)
# Round robin reads can cause slow cluster shutdown.
data_service_test_base.GLOBAL_CLUSTERS.add(cluster)
num_consumers = 3
ds = self.make_round_robin_dataset(cluster, num_consumers)
ds = ds.take(20)
self.getDatasetOutput(ds)
ds2 = self.make_round_robin_dataset(cluster, num_consumers)
ds2 = ds2.take(20)
with self.assertRaisesRegex(errors.FailedPreconditionError,
"current round has already reached"):
self.getDatasetOutput(ds2)
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(num_workers=[1, 3], num_consumers=[1, 2, 5])))
def testRoundRobin(self, num_workers, num_consumers):
cluster = data_service_test_base.TestCluster(num_workers=num_workers)
# Round robin reads can cause slow cluster shutdown.
data_service_test_base.GLOBAL_CLUSTERS.add(cluster)
ds = self.make_round_robin_dataset(cluster, num_consumers)
ds = ds.take(100)
results = self.getDatasetOutput(ds)
self.checkRoundRobinGroups(results, num_consumers)
@combinations.generate(test_base.default_test_combinations())
def testRoundRobinBucketizing(self):
# Tests a common use case for round robin reads. At each step, all
# consumers should get batches with the same bucket size.
cluster = data_service_test_base.TestCluster(num_workers=4)
# Round robin reads can cause slow cluster shutdown.
data_service_test_base.GLOBAL_CLUSTERS.add(cluster)
num_elements = 100
low_bucket_max = 30
mid_bucket_max = 60
bucket_boundaries = [low_bucket_max, mid_bucket_max]
batch_size = 10
num_consumer_hosts = 3
replicas_per_consumer_host = 5
num_consumers = num_consumer_hosts * replicas_per_consumer_host
bucket_batch_sizes = [batch_size] * (len(bucket_boundaries) + 1)
# Set up the dataset that will run on the tf.data workers.
ds = dataset_ops.Dataset.range(num_elements, output_type=dtypes.int32)
ds = ds.shuffle(num_elements)
ds = ds.repeat()
ds = ds.apply(
grouping.bucket_by_sequence_length(
lambda x: x,
bucket_boundaries,
bucket_batch_sizes,
drop_remainder=True))
ds = ds.apply(
grouping.group_by_window(
lambda x: math_ops.cast(x[1], dtypes.int64),
lambda _, x: dataset_ops.Dataset.from_tensors(x),
window_size=num_consumers))
ds = ds.flat_map(lambda x: x)
# Set up the per-consumer-host datasets. During each global step, we pull
# `replicas_per_consumer_host` batches from each of these datasets.
host_datasets = []
for host_index in range(num_consumer_hosts):
per_replica_datasets = []
for i in range(replicas_per_consumer_host):
consumer_index = host_index * replicas_per_consumer_host + i
per_replica_datasets.append(
self.make_distributed_dataset(
ds,
cluster,
job_name="test",
consumer_index=consumer_index,
num_consumers=num_consumers))
host_dataset = dataset_ops.Dataset.from_tensor_slices(
per_replica_datasets)
host_dataset = host_dataset.interleave(
lambda x: x,
cycle_length=len(per_replica_datasets),
num_parallel_calls=len(per_replica_datasets),
deterministic=True)
host_datasets.append(host_dataset)
# Use parallel interleave to read from host datasets in parallel.
ds = dataset_ops.Dataset.from_tensor_slices(host_datasets)
ds = ds.interleave(
lambda x: x,
block_length=replicas_per_consumer_host,
cycle_length=len(host_datasets),
num_parallel_calls=len(host_datasets),
deterministic=True)
num_rounds = 4
get_next = self.getNext(ds)
results = []
for _ in range(num_rounds * num_consumers):
results.append(self.evaluate(get_next()))
def get_bucket(elem):
bucket_ind = 0
while bucket_ind < len(
bucket_boundaries) and elem >= bucket_boundaries[bucket_ind]:
bucket_ind += 1
return bucket_ind
# Check that the batches for each step contain elements from the same
# bucket.
for i in range(0, len(results), num_consumers):
batches = results[num_consumers * i:num_consumers * (i + 1)]
bucket_inds = [get_bucket(batch[0]) for batch in batches]
for bucket_ind in bucket_inds[1:]:
self.assertEqual(
bucket_inds[0], bucket_ind,
"Batches: {}, Buckets: {}".format(batches, bucket_inds))
@combinations.generate(test_base.v1_only_combinations())
def testRoundRobinFiniteV1(self):
cluster = data_service_test_base.TestCluster(num_workers=1)
num_elements = 100
ds = dataset_ops.Dataset.range(num_elements)
ds = self.make_distributed_dataset(
ds, cluster, job_name="test", consumer_index=0, num_consumers=1)
with self.assertRaisesRegex(
errors.FailedPreconditionError, "Encountered end of sequence on a "
"round-robin read iterator"):
self.getDatasetOutput(ds)
@combinations.generate(test_base.v2_only_combinations())
def testRoundRobinFiniteV2(self):
cluster = data_service_test_base.TestCluster(num_workers=1)
num_elements = 100
ds = dataset_ops.Dataset.range(num_elements)
ds = self.make_distributed_dataset(
ds, cluster, job_name="test", consumer_index=0, num_consumers=1)
with self.assertRaisesRegex(
errors.FailedPreconditionError, "Round robin reads "
"require that the input dataset has infinite "
"cardinality, but the dataset has cardinality " + str(num_elements)):
self.getDatasetOutput(ds)
@combinations.generate(
combinations.times(test_base.eager_only_combinations(),
combinations.combine(job_name=[None, "test"])))
def testGcUnusedJob(self, job_name):
cluster = data_service_test_base.TestCluster(
num_workers=1, job_gc_check_interval_ms=50, job_gc_timeout_ms=20)
num_elements = 100
ds = self.make_distributed_range_dataset(
num_elements, cluster, job_name=job_name)
it = iter(ds)
self.assertEqual(next(it).numpy(), 0)
self.assertEqual(cluster.workers[0].num_tasks(), 1)
del it
while cluster.workers[0].num_tasks() > 0:
time.sleep(0.1)
@combinations.generate(test_base.eager_only_combinations())
def testDontGcUsedJob(self):
cluster = data_service_test_base.TestCluster(
num_workers=1, job_gc_check_interval_ms=50, job_gc_timeout_ms=20)
num_elements = 10
it1 = iter(
self.make_distributed_range_dataset(
num_elements, cluster, job_name="test1"))
it2 = iter(
self.make_distributed_range_dataset(
num_elements, cluster, job_name="test2"))
it3 = iter( # this iterator keeps the task alive. pylint: disable=unused-variable
self.make_distributed_range_dataset(
num_elements, cluster, job_name="test2"))
self.assertEqual(cluster.workers[0].num_tasks(), 2)
del it1
del it2
# Check that only the first job is gced. The second job will not be gced
# because there is still an outstanding iterator for it.
while cluster.workers[0].num_tasks() > 1:
time.sleep(0.1)
self.assertEqual(cluster.workers[0].num_tasks(), 1)
@combinations.generate(test_base.default_test_combinations())
def testApplyDeterminismOption(self):
elements = list(range(10))
cluster = data_service_test_base.TestCluster(num_workers=1)
def dataset_fn(delay_ms):
def interleave_fn(x):
ds = dataset_ops.Dataset.from_tensors(x)
if math_ops.equal(x, 0):
ds = ds.apply(testing.sleep(delay_ms * 1000))
else:
ds = ds.apply(testing.sleep(0))
return ds
ds = dataset_ops.Dataset.from_tensor_slices(elements)
ds = ds.interleave(interleave_fn, cycle_length=10, num_parallel_calls=10)
opts = dataset_ops.Options()
opts.experimental_deterministic = False
ds = ds.with_options(opts)
ds = self.make_distributed_dataset(ds, cluster)
return ds
self.checkDeterminism(
dataset_fn=dataset_fn,
expect_determinism=False,
expected_elements=elements)
def run_stateful(self, external_state_policy):
num_elements = 10
ds = dataset_ops.Dataset.range(num_elements).map(
lambda _: random_ops.random_uniform(()))
options = dataset_ops.Options()
options.experimental_external_state_policy = external_state_policy
ds = ds.with_options(options)
cluster = data_service_test_base.TestCluster(num_workers=3)
ds = self.make_distributed_dataset(ds, cluster)
self.getDatasetOutput(ds)
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(external_state_policy=[
distribute_options.ExternalStatePolicy.IGNORE,
distribute_options.ExternalStatePolicy.WARN
])))
def testStatefulNoError(self, external_state_policy):
self.run_stateful(external_state_policy)
@combinations.generate(test_base.default_test_combinations())
def testStatefulError(self):
with self.assertRaises(errors.FailedPreconditionError):
self.run_stateful(distribute_options.ExternalStatePolicy.FAIL)
@combinations.generate(test_base.default_test_combinations())
def testDistributeDistributedEpochTensorSlices(self):
cluster = data_service_test_base.TestCluster(num_workers=2)
vals = [5, 1, 2, 4]
ds = dataset_ops.Dataset.from_tensor_slices(vals)
ds = self.make_distributed_dataset(
ds, cluster, processing_mode="distributed_epoch")
self.assertDatasetProduces(ds, vals, assert_items_equal=True)
@combinations.generate(test_base.default_test_combinations())
def testDistributeDistributedEpochInterleave(self):
cluster = data_service_test_base.TestCluster(num_workers=2)
elements = [1, 5, 0]
ds = dataset_ops.Dataset.from_tensor_slices(elements)
ds = ds.interleave(lambda x: dataset_ops.Dataset.from_tensor_slices([x]))
ds = self.make_distributed_dataset(
ds, cluster, processing_mode="distributed_epoch")
self.assertDatasetProduces(ds, elements, assert_items_equal=True)
@combinations.generate(test_base.default_test_combinations())
def testDistributeDistributedEpochParallelInterleave(self):
cluster = data_service_test_base.TestCluster(num_workers=2)
elements = [1, 5, 0]
ds = dataset_ops.Dataset.from_tensor_slices(elements)
ds = ds.interleave(
lambda x: dataset_ops.Dataset.from_tensor_slices([x]),
num_parallel_calls=dataset_ops.AUTOTUNE)
ds = self.make_distributed_dataset(
ds, cluster, processing_mode="distributed_epoch")
self.assertDatasetProduces(ds, elements, assert_items_equal=True)
@combinations.generate(test_base.default_test_combinations())
def testDistributeDistributedEpochFlatMap(self):
cluster = data_service_test_base.TestCluster(num_workers=2)
elements = [1, 5, 0]
ds = dataset_ops.Dataset.from_tensor_slices(elements)
ds = ds.flat_map(lambda x: dataset_ops.Dataset.from_tensor_slices([x]))
ds = self.make_distributed_dataset(
ds, cluster, processing_mode="distributed_epoch")
self.assertDatasetProduces(ds, elements, assert_items_equal=True)
@combinations.generate(test_base.default_test_combinations())
def testDistributeDistributedEpochRepeat(self):
cluster = data_service_test_base.TestCluster(num_workers=2)
num_repeats = 5
num_elements = 20
ds = dataset_ops.Dataset.range(num_elements).repeat(num_repeats)
ds = self.make_distributed_dataset(
ds, cluster, processing_mode="distributed_epoch")
self.assertDatasetProduces(
ds, num_repeats * list(range(num_elements)), assert_items_equal=True)
@combinations.generate(test_base.default_test_combinations())
def testDistributeDistributedEpochForeverRepeat(self):
cluster = data_service_test_base.TestCluster(num_workers=2)
num_elements = 20
elements_to_read = 1000
ds = dataset_ops.Dataset.range(num_elements).repeat()
ds = self.make_distributed_dataset(
ds, cluster, processing_mode="distributed_epoch")
get_next = self.getNext(ds)
results = {}
for _ in range(elements_to_read):
val = self.evaluate(get_next())
if val not in results:
results[val] = 0
results[val] += 1
for i in range(num_elements):
self.assertGreater(results[i], elements_to_read / num_elements / 2)
@combinations.generate(test_base.default_test_combinations())
def testDistributeDistributedEpochForeverRepeatFewElements(self):
num_workers = 5
cluster = data_service_test_base.TestCluster(num_workers=num_workers)
# Less than the number of workers, so that some workers get zero elements on
# the first repetition.
num_elements = 1
ds = dataset_ops.Dataset.range(num_elements).repeat()
ds = self.make_distributed_dataset(
ds, cluster, processing_mode="distributed_epoch")
get_next = self.getNext(ds)
for _ in range(20):
self.assertEqual(self.evaluate(get_next()), 0)
# Stop all but one worker and check that we can still read.
for i in range(num_workers - 1):
cluster.workers[i].stop()
for _ in range(20):
self.assertEqual(self.evaluate(get_next()), 0)
@combinations.generate(test_base.default_test_combinations())
def testDistributeDistributedEpochShuffleAndRepeat(self):
cluster = data_service_test_base.TestCluster(num_workers=2)
num_repeats = 5
num_elements = 20
ds = dataset_ops.Dataset.range(num_elements).shuffle(num_elements).repeat(
num_repeats)
ds = self.make_distributed_dataset(
ds, cluster, processing_mode="distributed_epoch")
self.assertDatasetProduces(
ds, num_repeats * list(range(num_elements)), assert_items_equal=True)
@combinations.generate(test_base.default_test_combinations())
def testDistributeFromInterleave(self):
cluster = data_service_test_base.TestCluster(num_workers=1)
ds = dataset_ops.Dataset.range(2)
def interleave_fn(_):
dataset = dataset_ops.Dataset.range(2)
self.make_distributed_dataset(dataset, cluster)
return dataset
ds = ds.interleave(interleave_fn, cycle_length=2)
self.assertDatasetProduces(ds, [0, 0, 1, 1])
@combinations.generate(test_base.default_test_combinations())
def testDistributeDistributedEpoch(self):
cluster = data_service_test_base.TestCluster(num_workers=2)
num_elements = 100
ds = dataset_ops.Dataset.range(num_elements)
ds = self.make_distributed_dataset(
ds, cluster, processing_mode="distributed_epoch")
self.assertDatasetProduces(
ds, list(range(num_elements)), assert_items_equal=True)
@combinations.generate(test_base.default_test_combinations())
def testDistributeNonStringAddresses(self):
ds = dataset_ops.Dataset.range(10)
with self.assertRaisesRegex(ValueError, "service must be a string"):
ds = ds.apply(
data_service_ops.distribute(
processing_mode="parallel_epochs", service=1))
@combinations.generate(test_base.default_test_combinations())
def testDistributeEmptyAddress(self):
ds = dataset_ops.Dataset.range(10)
with self.assertRaisesWithLiteralMatch(ValueError,
"service must not be empty"):
ds = ds.apply(
data_service_ops.distribute(
processing_mode="parallel_epochs", service=""))
@combinations.generate(test_base.default_test_combinations())
def testDistributeExplicitProtocol(self):
cluster = data_service_test_base.TestCluster(num_workers=1)
ds = dataset_ops.Dataset.range(10)
ds = ds.apply(
data_service_ops.distribute(
processing_mode="parallel_epochs",
service="grpc://" + cluster.dispatcher_address()))
self.assertDatasetProduces(ds, list(range(10)))
@combinations.generate(test_base.default_test_combinations())
def testDistributeInvalidProtocol(self):
cluster = data_service_test_base.TestCluster(num_workers=1)
ds = dataset_ops.Dataset.range(10)
with self.assertRaisesRegex(
errors.NotFoundError,
"No credentials factory has been registered for protocol grp"):
ds = ds.apply(
data_service_ops.distribute(
processing_mode="parallel_epochs",
service="grp://" + cluster.dispatcher_address()))
self.getDatasetOutput(ds)
@combinations.generate(test_base.eager_only_combinations())
def testDistributeInvalidProcessingMode(self):
ds = dataset_ops.Dataset.range(10)
with self.assertRaisesRegex(ValueError,
"invalid is not a valid processing mode"):
ds = ds.apply(
data_service_ops.distribute(
processing_mode="invalid", service="grpc://localhost:5000"))
@combinations.generate(test_base.default_test_combinations())
def testZipDifferentProcessingModesDatasets(self):
cluster = data_service_test_base.TestCluster(num_workers=1)
num_elements = 100
ds1 = dataset_ops.Dataset.range(num_elements)
ds1 = self.make_distributed_dataset(
ds1, cluster, processing_mode="distributed_epoch")
ds2 = dataset_ops.Dataset.range(num_elements)
ds2 = self.make_distributed_dataset(
ds2, cluster, processing_mode="parallel_epochs")
ds = dataset_ops.Dataset.zip((ds1, ds2))
self.assertDatasetProduces(
ds,
list(zip(range(num_elements), range(num_elements))),
assert_items_equal=True)
@combinations.generate(test_base.default_test_combinations())
def testZipDifferentProcessingModesDatasetsSharedJobName(self):
cluster = data_service_test_base.TestCluster(num_workers=1)
num_elements = 100
ds1 = dataset_ops.Dataset.range(num_elements)
ds1 = self.make_distributed_dataset(
ds1, cluster, processing_mode="distributed_epoch", job_name="job_name")
ds2 = dataset_ops.Dataset.range(num_elements)
ds2 = self.make_distributed_dataset(
ds2, cluster, processing_mode="parallel_epochs", job_name="job_name")
ds = dataset_ops.Dataset.zip((ds1, ds2))
with self.assertRaisesRegex(errors.FailedPreconditionError,
"but there is already an existing job"):
self.getDatasetOutput(ds)
@combinations.generate(test_base.default_test_combinations())
def testFromDatasetId(self):
cluster = data_service_test_base.TestCluster(num_workers=1)
num_elements = 10
ds = dataset_ops.Dataset.range(num_elements)
dataset_id = data_service_ops.register_dataset(cluster.dispatcher_address(),
ds)
from_dataset_id_ds = data_service_ops.from_dataset_id(
"parallel_epochs", cluster.dispatcher_address(), dataset_id,
ds.element_spec)
self.assertDatasetProduces(from_dataset_id_ds, list(range(num_elements)))
@combinations.generate(test_base.default_test_combinations())
def testFromDatasetIdMultipleComponents(self):
cluster = data_service_test_base.TestCluster(num_workers=1)
num_elements = 10
ds = dataset_ops.Dataset.range(num_elements)
ds = dataset_ops.Dataset.zip({"a": (ds, ds), "b": ds})
dataset_id = data_service_ops.register_dataset(cluster.dispatcher_address(),
ds)
from_dataset_id_ds = data_service_ops.from_dataset_id(
"parallel_epochs", cluster.dispatcher_address(), dataset_id,
ds.element_spec)
output = self.getDatasetOutput(from_dataset_id_ds)
for i in range(num_elements):
self.assertEqual(i, output[i]["a"][0])
self.assertEqual(i, output[i]["a"][1])
self.assertEqual(i, output[i]["b"])
@combinations.generate(test_base.default_test_combinations())
def testFromDatasetIdWrongElementSpec(self):
cluster = data_service_test_base.TestCluster(num_workers=1)
num_elements = 10
ds = dataset_ops.Dataset.range(num_elements)
dataset_id = data_service_ops.register_dataset(cluster.dispatcher_address(),
ds)
wrong_spec = tensor_spec.TensorSpec(shape=(), dtype=dtypes.variant)
from_dataset_id_ds = data_service_ops.from_dataset_id(
"parallel_epochs", cluster.dispatcher_address(), dataset_id, wrong_spec)
with self.assertRaisesRegex(errors.FailedPreconditionError,
"Expected a tensor of type variant"):
self.evaluate(self.getNext(from_dataset_id_ds)())
@combinations.generate(test_base.default_test_combinations())
def testFromDatasetIdNotRegistered(self):
cluster = data_service_test_base.TestCluster(num_workers=1)
dataset_id = 0
element_spec = tensor_spec.TensorSpec(shape=(), dtype=dtypes.variant)
from_dataset_id_ds = data_service_ops.from_dataset_id(
"parallel_epochs", cluster.dispatcher_address(), dataset_id,
element_spec)
with self.assertRaisesRegex(errors.NotFoundError, "Dataset id"):
self.evaluate(self.getNext(from_dataset_id_ds)())
@combinations.generate(test_base.default_test_combinations())
def testCancellation(self):
self.skipTest("b/162521601")
sleep_microseconds = int(1e6) * 1000
cluster = data_service_test_base.TestCluster(num_workers=1)
# Create a dataset which produces the first element quickly, and the second
# element slowly. Fetching the first element triggers prefetching of the
# second element, which we should be able to cancel.
slow = dataset_ops.Dataset.range(1)
slow = slow.apply(testing.sleep(sleep_microseconds))
ds = dataset_ops.Dataset.range(1).concatenate(slow)
ds = self.make_distributed_dataset(ds, cluster)
ds = ds.prefetch(1)
get_next = self.getNext(ds)
self.assertEqual(0, self.evaluate(get_next()))
# Without properly implemented cancellation, we will hang here while trying
# to garbage collect the dataset iterator.
@combinations.generate(test_base.default_test_combinations())
def testRegisterEquivalentDatasets(self):
ds_1 = dataset_ops.Dataset.range(10)
ds_2 = dataset_ops.Dataset.range(10)
cluster = data_service_test_base.TestCluster(num_workers=1)
id_1 = data_service_ops.register_dataset(cluster.dispatcher_address(), ds_1)
id_2 = data_service_ops.register_dataset(cluster.dispatcher_address(), ds_2)
self.assertEqual(self.evaluate(id_1), self.evaluate(id_2))
@combinations.generate(test_base.default_test_combinations())
def testRegisterDifferentDatasets(self):
ds_1 = dataset_ops.Dataset.range(10)
ds_2 = dataset_ops.Dataset.range(20)
cluster = data_service_test_base.TestCluster(num_workers=1)
id_1 = data_service_ops.register_dataset(cluster.dispatcher_address(), ds_1)
id_2 = data_service_ops.register_dataset(cluster.dispatcher_address(), ds_2)
self.assertNotEqual(self.evaluate(id_1), self.evaluate(id_2))
@combinations.generate(test_base.default_test_combinations())
def testDistributedEpochOnZippedDataset(self):
ds_1 = dataset_ops.Dataset.range(10)
ds_2 = dataset_ops.Dataset.range(10)
cluster = data_service_test_base.TestCluster(num_workers=1)
ds_3 = dataset_ops.Dataset.zip((ds_1, ds_2))
ds_3 = self.make_distributed_dataset(
ds_3, cluster, processing_mode="distributed_epoch")
error_regex = "Cannot create a split provider for dataset " + \
"of type ZipDataset"
with self.assertRaisesRegex(errors.UnimplementedError, error_regex):
self.getDatasetOutput(ds_3)
@combinations.generate(test_base.default_test_combinations())
def testDistributedEpochOnDistributedDataset(self):
cluster_1 = data_service_test_base.TestCluster(num_workers=1)
cluster_2 = data_service_test_base.TestCluster(num_workers=1)
num_sizes = 10
size_repeats = 5
numbers = [1 * i for i in range(num_sizes)] * size_repeats
ds = dataset_ops.Dataset.from_tensor_slices(numbers)
ds = self.make_distributed_dataset(
ds, cluster_1, processing_mode="parallel_epochs")
ds = ds.map(lambda x: x + 1)
ds = self.make_distributed_dataset(
ds, cluster_2, processing_mode="distributed_epoch")
error_regex = "Cannot create a split provider for dataset " + \
"of type DataServiceDataset"
with self.assertRaisesRegex(errors.UnimplementedError, error_regex):
self.getDatasetOutput(ds)
@combinations.generate(test_base.default_test_combinations())
def testTwoLevelDistribute(self):
cluster_1_size = 3
cluster_1 = data_service_test_base.TestCluster(num_workers=cluster_1_size)
cluster_2 = data_service_test_base.TestCluster(num_workers=1)
num_sizes = 10
size_repeats = 5
strings = ["a" * i for i in range(num_sizes)] * size_repeats
ds = dataset_ops.Dataset.from_tensor_slices(strings)
ds = ds.shuffle(len(strings))
ds = self.make_distributed_dataset(ds, cluster_1)
# Large enough so that all strings of the same size are windowed together.
window_size = cluster_1_size * size_repeats
batch_size = size_repeats
def key_func(x):
return math_ops.cast(string_ops.string_length_v2(x), dtypes.int64)
ds = ds.apply(
grouping.group_by_window(
key_func=key_func,
reduce_func=lambda _, x: x.batch(batch_size),
window_size=window_size))
ds = self.make_distributed_dataset(ds, cluster_2)
get_next = self.getNext(ds)
for _ in range(num_sizes):
element = self.evaluate(get_next())
for _ in range(1, cluster_1_size):
self.assertAllEqual(self.evaluate(get_next()), element)
self.assertEmpty(self.getIteratorOutput(get_next))
@combinations.generate(
combinations.times(test_base.default_test_combinations()))
def testDistributeLargeGraph(self):
cluster = data_service_test_base.TestCluster(
num_workers=1, work_dir=NO_WORK_DIR, fault_tolerant_mode=False)
# Larger than default OSS grpc message size limit of 4MB.
tensor = array_ops.ones((2, 1000, 1000), dtype=dtypes.float32)
ds = dataset_ops.Dataset.from_tensors(tensor)
ds = self.make_distributed_dataset(ds, cluster)
self.assertDatasetProduces(ds, [tensor])
if __name__ == "__main__":
test.main()
|
{
"content_hash": "b1722ebe8e94ab7842920703a62f97ce",
"timestamp": "",
"source": "github",
"line_count": 903,
"max_line_length": 86,
"avg_line_length": 42.80841638981174,
"alnum_prop": 0.6943553394039735,
"repo_name": "petewarden/tensorflow",
"id": "bae14987eafa70f0a83ad4dd00db8675e35699a3",
"size": "39345",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow/python/data/experimental/kernel_tests/data_service_ops_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "31796"
},
{
"name": "Batchfile",
"bytes": "55269"
},
{
"name": "C",
"bytes": "895451"
},
{
"name": "C#",
"bytes": "8562"
},
{
"name": "C++",
"bytes": "82100676"
},
{
"name": "CMake",
"bytes": "6500"
},
{
"name": "Dockerfile",
"bytes": "112853"
},
{
"name": "Go",
"bytes": "1867248"
},
{
"name": "HTML",
"bytes": "4686483"
},
{
"name": "Java",
"bytes": "984477"
},
{
"name": "Jupyter Notebook",
"bytes": "550862"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "MLIR",
"bytes": "1982867"
},
{
"name": "Makefile",
"bytes": "66496"
},
{
"name": "Objective-C",
"bytes": "116558"
},
{
"name": "Objective-C++",
"bytes": "317461"
},
{
"name": "PHP",
"bytes": "4236"
},
{
"name": "Pascal",
"bytes": "318"
},
{
"name": "Pawn",
"bytes": "20422"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "Python",
"bytes": "37425809"
},
{
"name": "RobotFramework",
"bytes": "1779"
},
{
"name": "Roff",
"bytes": "2705"
},
{
"name": "Ruby",
"bytes": "7464"
},
{
"name": "SWIG",
"bytes": "8992"
},
{
"name": "Shell",
"bytes": "700106"
},
{
"name": "Smarty",
"bytes": "35725"
},
{
"name": "Starlark",
"bytes": "3613406"
},
{
"name": "Swift",
"bytes": "62814"
},
{
"name": "Vim Snippet",
"bytes": "58"
}
],
"symlink_target": ""
}
|
import os
import io
import Exalt.view as vu
import Exalt.messages as messages
import Exalt.encodings as encodings
import Exalt.constants as constants
import Exalt.namespaces as namespaces
import Exalt.utils as utils
import Exalt.exalt as exalt
from functools import partial
from lxml import etree
from lxml import isoschematron
##########
# PUBLIC #
##########
def get_xslt_relaxng_path(version):
"""Get XSLT RelaxNG schema path for version.
Example:
get_xslt_relaxng_path(2.0) => rng/xslt20.rng"""
if version is not None:
v = "".join(version.split("."))
else:
v = "10"
path = os.path.join(exalt.get_plugin_path(), "rng", "xslt%s.rng" % v)
return exalt.file_to_uri(path)
def validate_against_schema(parser, error, view, document, schema_path):
"""Validate document against schema using parser and throw error if
validation fails."""
current_file = view.file_name()
# If the schema file URL is a relative URL and the file doesn't have
# a name (as in, it hasn't been saved), bail out.
if utils.is_relative_path(schema_path) and not current_file:
return False
file = utils.resolve_file_path(schema_path, current_file)
try:
validator = _get_validator(file, parser, file=file)
return validate(view, document, validator)
except (error, etree.XSLTApplyError) as e:
vu.show_error(view, e)
return False
def get_validator_for_namespace(namespace):
"""Get a validator for the given namespace.
For example, if the argument is 'http://relaxng.org/ns/structure/1.0', it
will return a validator that can validate against a RelaxNG schema."""
fn = validate_against_schema
if namespace == isoschematron.RELAXNG_NS:
return partial(fn, etree.RelaxNG, etree.RelaxNGParseError)
elif namespace == isoschematron.XML_SCHEMA_NS:
return partial(fn, etree.XMLSchema, etree.XMLSchemaParseError)
elif namespace == isoschematron.SCHEMATRON_NS:
return partial(fn, isoschematron.Schematron,
etree.SchematronParseError)
elif namespace == namespaces.PRE_ISO_SCHEMATRON:
return partial(fn, etree.Schematron, etree.SchematronParseError)
def validate_against_xml_schema(view, document, mode="namespace"):
schema_file = _get_xml_schema_instance(document, mode)
if schema_file is None:
return False
validator = get_validator_for_namespace(isoschematron.XML_SCHEMA_NS)
return validator(view, document, schema_file)
def validate_against_dtd(view, document):
"""Validate a document against the DTD in its DOCTYPE.
TODO: Add support for external subsets and system identifiers.
"""
docinfo = document.docinfo
internal_subset = docinfo.internalDTD
system_url = docinfo.system_url
if internal_subset is None and system_url is None:
return False
if internal_subset.external_id is None and system_url is not None:
try:
file = utils.resolve_file_path(system_url, view.file_name())
validator = _get_validator(system_url, etree.DTD, file=file)
return validate(view, document, validator)
except etree.DTDParseError as e:
vu.show_error(view, e)
return False
elif internal_subset.external_id is not None:
# <!DOCTYPE map PUBLIC "-//OASIS//DTD DITA Map//EN" "map.dtd">
id = bytes(internal_subset.external_id, encodings.UTF8)
try:
validator = _get_validator(id, etree.DTD, external_id=id)
return validate(view, document, validator)
except etree.DTDParseError as e:
vu.show_error(view, e)
return False
else:
# <!DOCTYPE people_list [ <!ELEMENT people_list (person)*> ]>
try:
return validate(view, document, internal_subset)
except etree.DTDParseError as e:
vu.show_error(view, e)
return False
def try_validate(view, document):
if not validate_against_dtd(view, document):
if not validate_against_xml_schema(view, document,):
if not validate_against_xml_schema(view, document, mode="URI"):
return _validate_against_xml_models(view, document)
def validate(view, document, validator):
try:
validator.assertValid(document)
return declare_valid(view)
except etree.DocumentInvalid as e:
if type(validator) == isoschematron.Schematron:
message = _get_schematron_error_message(e)
else:
message = e
vu.show_error(view, message, validator.error_log[0])
return True
except OSError:
vu.set_status(view, messages.SCHEMA_RESOLVE_ERROR % id)
return False
def declare_valid(view):
"""Declare the document valid.
Remove any highlight regions and indicate validity in status bar."""
view.erase_regions(constants.PLUGIN_NAME)
vu.set_status(view, messages.VALID_MARKUP)
vu.reset_status(view)
return True
###########
# PRIVATE #
###########
def _get_validator(id, parser, **kwargs):
"""Get a validator for the given identifier.
Given an ID (such as a DTD public identifier or a schema URI),
return a cached validator if there's one or make a new one if there
isn't.
This is probably a pretty stupid way of caching parsers. Suggestions
appreciated."""
validator = exalt.parser_cache.get(id) \
if id in exalt.parser_cache else parser(**kwargs)
exalt.parser_cache.setdefault(id, validator)
return validator
def _get_xml_schema_instance(document, mode):
root = document.getroot()
xsi = root.xpath("@xsi:schemaLocation | @xsi:noNamespaceSchemaLocation",
namespaces={"xsi": namespaces.XSI})
if len(xsi) == 0:
return None
return xsi[0].split()[0 if mode == "namespace" else -1]
def _get_xml_models(document):
return document.xpath("/processing-instruction('xml-model')")
def _get_schematron_error_message(error):
"""Get a Schematron error message string from Schematron report.
Schematron errors look like this:
<svrl:failed-assert xmlns:svrl="http://purl.oclc.org/dsdl/svrl"
xmlns:xs="http://www.w3.org/2001/XMLSchema"
xmlns:schold="http://www.ascc.net/xml/schematron"
xmlns:sch="http://www.ascc.net/xml/schematron"
xmlns:iso="http://purl.oclc.org/dsdl/schematron"
test="para" location="/section">
<svrl:text>This section has no paragraphs</svrl:text>
</svrl:failed-assert>
That obviously won't fit in the status bar, so we'll extract the
text from the <svrl:text> element. Not ideal, but will have to do for
now."""
xml = etree.parse(io.StringIO(str(error)))
return xml.xpath("//svrl:text[1]//node()",
namespaces={"svrl": isoschematron.SVRL_NS})[0]
def _get_validator_for_extension(extension):
if extension == ".xsd":
return get_validator_for_namespace(isoschematron.XML_SCHEMA_NS)
elif extension == ".rng":
return get_validator_for_namespace(isoschematron.RELAXNG_NS)
elif extension == ".sch":
return get_validator_for_namespace(isoschematron.SVRL_NS)
else:
return None
def _validate_against_xml_models(view, document):
"""Validate a document against all xml-model PIs in the document.
If the document is invalid, stop. If it's valid, move onto the next
PI."""
models = _get_xml_models(document)
if not models:
declare_valid(view)
return False
else:
for xml_model in models:
href = xml_model.get("href")
namespace = xml_model.get("schematypens")
if href is None and namespace is None:
break
elif href is not None and namespace is None:
_, extension = os.path.splitext(href)
if extension == ".dtd":
return validate_against_schema(
etree.DTD,
etree.DTDParseError,
view,
document,
href
)
else:
validator = _get_validator_for_extension(extension)
if validator is not None:
return validator(view, document, href)
else:
return False
else:
validator = get_validator_for_namespace(namespace)
return validator(view, document, href)
|
{
"content_hash": "a3f53b2331a7961648025020a4c6e3e8",
"timestamp": "",
"source": "github",
"line_count": 267,
"max_line_length": 77,
"avg_line_length": 32.46067415730337,
"alnum_prop": 0.6288219683858313,
"repo_name": "eerohele/exalt",
"id": "63abd559ecc1e6d97f2fd2d7a3a3684b30fd5c37",
"size": "8667",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "impl/validator.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "37583"
}
],
"symlink_target": ""
}
|
from heat.common.i18n import _
from heat.engine import constraints
from heat.engine import properties
from heat.engine import resource
from heat.engine import support
class GlanceImage(resource.Resource):
"""A resource managing images in Glance."""
support_status = support.SupportStatus(version='2014.2')
PROPERTIES = (
NAME, IMAGE_ID, IS_PUBLIC, MIN_DISK, MIN_RAM, PROTECTED,
DISK_FORMAT, CONTAINER_FORMAT, LOCATION
) = (
'name', 'id', 'is_public', 'min_disk', 'min_ram', 'protected',
'disk_format', 'container_format', 'location'
)
properties_schema = {
NAME: properties.Schema(
properties.Schema.STRING,
_('Name for the image. The name of an image is not '
'unique to a Image Service node.')
),
IMAGE_ID: properties.Schema(
properties.Schema.STRING,
_('The image ID. Glance will generate a UUID if not specified.')
),
IS_PUBLIC: properties.Schema(
properties.Schema.BOOLEAN,
_('Scope of image accessibility. Public or private. '
'Default value is False means private.'),
default=False,
),
MIN_DISK: properties.Schema(
properties.Schema.INTEGER,
_('Amount of disk space (in GB) required to boot image. '
'Default value is 0 if not specified '
'and means no limit on the disk size.'),
constraints=[
constraints.Range(min=0),
]
),
MIN_RAM: properties.Schema(
properties.Schema.INTEGER,
_('Amount of ram (in MB) required to boot image. Default value '
'is 0 if not specified and means no limit on the ram size.'),
constraints=[
constraints.Range(min=0),
]
),
PROTECTED: properties.Schema(
properties.Schema.BOOLEAN,
_('Whether the image can be deleted. If the value is True, '
'the image is protected and cannot be deleted.')
),
DISK_FORMAT: properties.Schema(
properties.Schema.STRING,
_('Disk format of image.'),
required=True,
constraints=[
constraints.AllowedValues(['ami', 'ari', 'aki',
'vhd', 'vmdk', 'raw',
'qcow2', 'vdi', 'iso'])
]
),
CONTAINER_FORMAT: properties.Schema(
properties.Schema.STRING,
_('Container format of image.'),
required=True,
constraints=[
constraints.AllowedValues(['ami', 'ari', 'aki',
'bare', 'ova', 'ovf'])
]
),
LOCATION: properties.Schema(
properties.Schema.STRING,
_('URL where the data for this image already resides. For '
'example, if the image data is stored in swift, you could '
'specify "swift://example.com/container/obj".'),
required=True,
),
}
default_client_name = 'glance'
entity = 'images'
def handle_create(self):
args = dict((k, v) for k, v in self.properties.items()
if v is not None)
image_id = self.client().images.create(**args).id
self.resource_id_set(image_id)
return image_id
def check_create_complete(self, image_id):
image = self.client().images.get(image_id)
return image.status == 'active'
def _show_resource(self):
if self.glance().version == 1.0:
return super(GlanceImage, self)._show_resource()
else:
image = self.glance().images.get(self.resource_id)
return dict(image)
def resource_mapping():
return {
'OS::Glance::Image': GlanceImage
}
|
{
"content_hash": "5ef2ea0de72f9709c30765c0fe5b4485",
"timestamp": "",
"source": "github",
"line_count": 113,
"max_line_length": 76,
"avg_line_length": 34.85840707964602,
"alnum_prop": 0.5374460522975374,
"repo_name": "maestro-hybrid-cloud/heat",
"id": "6c2b30bb516f3b6dcaa22180d43cca98acb2492c",
"size": "4514",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "heat/engine/resources/openstack/glance/glance_image.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "6954236"
},
{
"name": "Shell",
"bytes": "33503"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.