hexsha stringlengths 40 40 | size int64 1 1.03M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 239 | max_stars_repo_name stringlengths 5 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 239 | max_issues_repo_name stringlengths 5 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 239 | max_forks_repo_name stringlengths 5 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.03M | avg_line_length float64 1 958k | max_line_length int64 1 1.03M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
acf6ddfa979456bcab43c2ea5fdc401c1cf2c14d | 1,533 | py | Python | jumeaux/commands/init/main.py | ihatov08/jumeaux | 7d983474df4b6dcfa57ea1a66901fbc99ebababa | [
"MIT"
] | 11 | 2017-10-02T01:29:12.000Z | 2022-03-31T08:37:22.000Z | jumeaux/commands/init/main.py | ihatov08/jumeaux | 7d983474df4b6dcfa57ea1a66901fbc99ebababa | [
"MIT"
] | 79 | 2017-07-16T14:47:17.000Z | 2022-03-31T08:49:14.000Z | jumeaux/commands/init/main.py | ihatov08/jumeaux | 7d983474df4b6dcfa57ea1a66901fbc99ebababa | [
"MIT"
] | 2 | 2019-01-28T06:11:58.000Z | 2021-01-25T07:21:21.000Z | """Initialize a project template for Jumeaux
Usage:
{cli} (-h | --help)
{cli} [<name>]
Options:
<name> Initialize template name
-h --help Show this screen.
"""
import os
import shutil
from owlmixin import OwlMixin
from owlmixin import TOption
from jumeaux.logger import Logger, init_logger
logger: Logger = Logger(__name__)
SAMPLE_DIR = os.path.abspath(f"{os.path.dirname(__file__)}/../../sample")
class Args(OwlMixin):
name: TOption[str]
def run(args: Args):
init_logger(0)
# TODO: refactoring
if args.name.get() == "addon":
addon_dir = f"{SAMPLE_DIR}/addon"
for f in os.listdir(addon_dir):
if os.path.isdir(f"{addon_dir}/{f}"):
shutil.copytree(f"{addon_dir}/{f}", f)
else:
shutil.copy(f"{addon_dir}/{f}", f)
logger.info_lv1(f"✨ [Create] {f}")
return
sample_dir = f"{SAMPLE_DIR}/template"
target_dir = f"{sample_dir}/{args.name.get()}"
if os.path.exists(target_dir):
for f in ["config.yml", "requests"]:
shutil.copy(f"{target_dir}/{f}", ".")
logger.info_lv1(f"✨ [Create] {f}")
shutil.copytree(f"{target_dir}/api", "api")
logger.info_lv1(f"✨ [Create] templates with a api directory")
return
if not os.path.exists(target_dir):
exit(
f"""
Please specify a valid name.
✨ [Valid names] ✨
{os.linesep.join(os.listdir(sample_dir))}
""".strip()
)
| 25.131148 | 73 | 0.572081 |
acf6dede8bd2e838616ca2ac9069da77b8b0af50 | 2,024 | py | Python | hax/hax/queue/cli.py | papan-singh/cortx-hare | 4d6a533750dffe0b71c633a3707da79d9883b3dd | [
"Apache-2.0"
] | null | null | null | hax/hax/queue/cli.py | papan-singh/cortx-hare | 4d6a533750dffe0b71c633a3707da79d9883b3dd | [
"Apache-2.0"
] | null | null | null | hax/hax/queue/cli.py | papan-singh/cortx-hare | 4d6a533750dffe0b71c633a3707da79d9883b3dd | [
"Apache-2.0"
] | null | null | null | import logging
import sys
from typing import NamedTuple
import click
import inject
from hax.common import di_configuration
from hax.queue.publish import BQPublisher, EQPublisher, Publisher
AppCtx = NamedTuple('AppCtx', [('payload', str), ('type', str),
('publisher', Publisher)])
def _setup_logging():
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s [%(levelname)s] %(message)s')
@click.command()
@click.argument('queue',
type=click.Choice(['eq', 'bq'], case_sensitive=False),
required=True)
@click.argument('type', type=str, required=True)
@click.argument('payload', type=str, required=True)
@click.pass_context
def parse_opts(ctx, queue: str, type: str, payload: str):
"""Send entry to target queue.
\b
QUEUE Name of the target queue. Supported values: "eq" (Event Queue), \
"bq" (Broadcast Queue).
TYPE Type of the entry.
PAYLOAD Entry payload encoded as JSON value.
"""
ctx.ensure_object(dict)
name = queue.lower()
types = {'eq': EQPublisher, 'bq': BQPublisher}
# We're lucky now because both constructors have the same zero count
# of arguments.
# If the things change, such oneliner must be refactored.
publisher: Publisher = types[name]()
ctx.obj['result'] = AppCtx(payload=payload,
type=type,
publisher=publisher)
return ctx.obj
def main():
_setup_logging()
inject.configure(di_configuration)
try:
raw_ctx = parse_opts(args=sys.argv[1:],
standalone_mode=False,
obj={})
if type(raw_ctx) is not dict:
exit(1)
app_context = raw_ctx['result']
pub = app_context.publisher
offset = pub.publish(app_context.type, app_context.payload)
logging.info('Written to epoch: %s', offset)
except Exception:
logging.exception('Exiting with failure')
| 31.138462 | 77 | 0.615613 |
acf6dee883825ba2f608072708192bd4e4782563 | 876 | py | Python | yellowbrick/model_selection/__init__.py | Haebuk/yellowbrick | 092c0ca25187b3cde9f608a1f7bc6d8c2b998f96 | [
"Apache-2.0"
] | null | null | null | yellowbrick/model_selection/__init__.py | Haebuk/yellowbrick | 092c0ca25187b3cde9f608a1f7bc6d8c2b998f96 | [
"Apache-2.0"
] | 1 | 2021-11-10T18:06:19.000Z | 2021-11-10T18:06:19.000Z | yellowbrick/model_selection/__init__.py | Haebuk/yellowbrick | 092c0ca25187b3cde9f608a1f7bc6d8c2b998f96 | [
"Apache-2.0"
] | null | null | null | # yellowbrick.model_selection
# Visualizers that wrap the model selection libraries of Scikit-Learn
#
# Author: Benjamin Bengfort <benjamin@bengfort.com>
# Created: Fri Mar 30 10:36:12 2018 -0400
#
# ID: __init__.py [c5355ee] benjamin@bengfort.com $
"""
Visualizers that wrap the model selection libraries of Scikit-Learn
"""
##########################################################################
## Imports
##########################################################################
from .learning_curve import LearningCurve, learning_curve
from .validation_curve import ValidationCurve, validation_curve
from .cross_validation import CVScores, cv_scores
from .dropping_curve import DroppingCurve, dropping_curve
# RFECV and Feature Importances moved here as of YB v1.0
from .importances import FeatureImportances, feature_importances
from .rfecv import RFECV, rfecv
| 35.04 | 74 | 0.66895 |
acf6def53c8c1d6462ee7ada58bac013d72f18a8 | 1,203 | py | Python | test-crates/update_readme.py | Contextualist/maturin | fbf595863c41983263f79820159f9425bf84b2e5 | [
"Apache-2.0",
"MIT"
] | 135 | 2018-07-21T23:51:51.000Z | 2019-08-29T04:07:22.000Z | test-crates/update_readme.py | Contextualist/maturin | fbf595863c41983263f79820159f9425bf84b2e5 | [
"Apache-2.0",
"MIT"
] | 105 | 2018-07-21T23:33:12.000Z | 2019-08-30T17:13:33.000Z | test-crates/update_readme.py | aganders3/maturin | 6b5c8735bfae8c05091d71cf2ca8a09aa7c0a587 | [
"Apache-2.0",
"MIT"
] | 19 | 2018-07-22T23:31:14.000Z | 2019-08-29T04:08:34.000Z | #!/usr/bin/env python3
import re
import subprocess
from pathlib import Path
FILES = [
"Readme.md",
"guide/src/develop.md",
"guide/src/tutorial.md",
"guide/src/distribution.md",
]
def main():
root = Path(
subprocess.check_output(
["git", "rev-parse", "--show-toplevel"], text=True
).strip()
)
for path in FILES:
content = root.joinpath(path).read_text()
matcher = re.compile(
r"```\nUSAGE:\n maturin (\w+) (.*?)```", re.MULTILINE | re.DOTALL
)
replaces = {}
for command, old in matcher.findall(content):
command_output = subprocess.check_output(
["cargo", "run", "--", command.lower(), "--help"], text=True
)
new = "USAGE:" + command_output.strip().split("USAGE:")[1] + "\n"
# Remove trailing whitespace
new = re.sub(" +\n", "\n", new)
old = "USAGE:\n maturin " + command + " " + old
replaces[old] = new
for old, new in replaces.items():
content = content.replace(old, new)
root.joinpath(path).write_text(content)
if __name__ == "__main__":
main()
| 25.595745 | 80 | 0.527847 |
acf6df76e6828993d0ebcfc61835557d75f348a5 | 788 | py | Python | provisioners/ansible/library/system_users_credentials.py | PradKhandelwal/aem-aws-stack-builder | 27ef24c0b42b921c622791ce579e91e3ed08912f | [
"Apache-2.0"
] | 36 | 2017-01-17T01:40:08.000Z | 2022-03-11T18:09:53.000Z | provisioners/ansible/library/system_users_credentials.py | PradKhandelwal/aem-aws-stack-builder | 27ef24c0b42b921c622791ce579e91e3ed08912f | [
"Apache-2.0"
] | 192 | 2017-01-24T01:53:39.000Z | 2021-10-21T03:11:36.000Z | provisioners/ansible/library/system_users_credentials.py | PradKhandelwal/aem-aws-stack-builder | 27ef24c0b42b921c622791ce579e91e3ed08912f | [
"Apache-2.0"
] | 50 | 2017-01-19T04:44:46.000Z | 2021-08-16T05:21:06.000Z | #!/usr/bin/python3
from ansible.module_utils.basic import *
import random
import string
def generate_password(length):
return ''.join(random.SystemRandom().choice(string.ascii_letters + string.digits) for _ in range(length))
def main():
module = AnsibleModule(
argument_spec = dict(
enable_default_passwords = dict(required=True, type='bool'),
)
)
system_users = ['orchestrator', 'replicator', 'deployer', 'exporter', 'importer', 'admin']
credentials = {}
for system_user in system_users:
credentials[system_user] = system_user if module.params['enable_default_passwords'] == True else generate_password(100)
response = credentials
module.exit_json(changed = False, meta = response)
if __name__ == '__main__':
main()
| 28.142857 | 127 | 0.699239 |
acf6dfbad596472656f97f5abd109e4328aa7c14 | 6,129 | py | Python | lib/WebOb-1.0.8/webob/etag.py | AniX/webapp-improved | a6bd7d4a98642ce0f708e7d53c66f70f168d02f6 | [
"Apache-2.0"
] | 15 | 2015-01-18T17:30:31.000Z | 2019-10-25T17:14:41.000Z | lib/WebOb-1.0.8/webob/etag.py | AniX/webapp-improved | a6bd7d4a98642ce0f708e7d53c66f70f168d02f6 | [
"Apache-2.0"
] | 1 | 2015-09-30T03:15:36.000Z | 2015-09-30T03:15:36.000Z | lib/WebOb-1.0.8/webob/etag.py | AniX/webapp-improved | a6bd7d4a98642ce0f708e7d53c66f70f168d02f6 | [
"Apache-2.0"
] | 2 | 2015-06-17T23:01:13.000Z | 2015-07-08T23:10:19.000Z | """
Does parsing of ETag-related headers: If-None-Matches, If-Matches
Also If-Range parsing
"""
from webob.datetime_utils import *
from webob.util import rfc_reference
__all__ = ['AnyETag', 'NoETag', 'ETagMatcher', 'IfRange', 'NoIfRange', 'etag_property']
def etag_property(key, default, rfc_section):
doc = "Gets and sets the %r key in the environment." % key
doc += rfc_reference(key, rfc_section)
doc += " Converts it as a Etag."
def fget(req):
value = req.environ.get(key)
if not value:
return default
elif value == '*':
return AnyETag
else:
return ETagMatcher.parse(value)
def fset(req, val):
if val is None:
req.environ[key] = None
else:
req.environ[key] = str(val)
def fdel(req):
del req.environ[key]
return property(fget, fset, fdel, doc=doc)
class _AnyETag(object):
"""
Represents an ETag of *, or a missing ETag when matching is 'safe'
"""
def __repr__(self):
return '<ETag *>'
def __nonzero__(self):
return False
def __contains__(self, other):
return True
def weak_match(self, other):
return True
def __str__(self):
return '*'
AnyETag = _AnyETag()
class _NoETag(object):
"""
Represents a missing ETag when matching is unsafe
"""
def __repr__(self):
return '<No ETag>'
def __nonzero__(self):
return False
def __contains__(self, other):
return False
def weak_match(self, other):
return False
def __str__(self):
return ''
NoETag = _NoETag()
class ETagMatcher(object):
"""
Represents an ETag request. Supports containment to see if an
ETag matches. You can also use
``etag_matcher.weak_contains(etag)`` to allow weak ETags to match
(allowable for conditional GET requests, but not ranges or other
methods).
"""
def __init__(self, etags, weak_etags=()):
self.etags = etags
self.weak_etags = weak_etags
def __contains__(self, other):
return other in self.etags or other in self.weak_etags
def weak_match(self, other):
if other.lower().startswith('w/'):
other = other[2:]
return other in self.etags or other in self.weak_etags
def __repr__(self):
return '<ETag %s>' % (
' or '.join(self.etags))
def parse(cls, value):
"""
Parse this from a header value
"""
results = []
weak_results = []
while value:
if value.lower().startswith('w/'):
# Next item is weak
weak = True
value = value[2:]
else:
weak = False
if value.startswith('"'):
try:
etag, rest = value[1:].split('"', 1)
except ValueError:
etag = value.strip(' ",')
rest = ''
else:
rest = rest.strip(', ')
else:
if ',' in value:
etag, rest = value.split(',', 1)
rest = rest.strip()
else:
etag = value
rest = ''
if etag == '*':
return AnyETag
if etag:
if weak:
weak_results.append(etag)
else:
results.append(etag)
value = rest
return cls(results, weak_results)
parse = classmethod(parse)
def __str__(self):
# FIXME: should I quote these?
items = list(self.etags)
for weak in self.weak_etags:
items.append('W/%s' % weak)
return ', '.join(items)
class IfRange(object):
"""
Parses and represents the If-Range header, which can be
an ETag *or* a date
"""
def __init__(self, etag=None, date=None):
self.etag = etag
self.date = date
def __repr__(self):
if self.etag is None:
etag = '*'
else:
etag = str(self.etag)
if self.date is None:
date = '*'
else:
date = serialize_date(self.date)
return '<%s etag=%s, date=%s>' % (
self.__class__.__name__,
etag, date)
def __str__(self):
if self.etag is not None:
return str(self.etag)
elif self.date:
return serialize_date(self.date)
else:
return ''
def match(self, etag=None, last_modified=None):
"""
Return True if the If-Range header matches the given etag or last_modified
"""
if self.date is not None:
if last_modified is None:
# Conditional with nothing to base the condition won't work
return False
return last_modified <= self.date
elif self.etag is not None:
if not etag:
return False
return etag in self.etag
return True
def match_response(self, response):
"""
Return True if this matches the given ``webob.Response`` instance.
"""
return self.match(etag=response.etag, last_modified=response.last_modified)
@classmethod
def parse(cls, value):
"""
Parse this from a header value.
"""
date = etag = None
if not value:
etag = NoETag()
elif value and value.endswith(' GMT'):
# Must be a date
date = parse_date(value)
else:
etag = ETagMatcher.parse(value)
return cls(etag=etag, date=date)
class _NoIfRange(object):
"""
Represents a missing If-Range header
"""
def __repr__(self):
return '<Empty If-Range>'
def __str__(self):
return ''
def __nonzero__(self):
return False
def match(self, etag=None, last_modified=None):
return True
def match_response(self, response):
return True
NoIfRange = _NoIfRange()
| 25.5375 | 87 | 0.533203 |
acf6dfbf3916e73fddbaa0a0d890b10fd5aeb42c | 2,612 | py | Python | tests/test_fc/test_fcattroutofservice.py | wardy3/mdssdk | 393102fab146917a3893b6aa2bd6a0449ad491c5 | [
"Apache-2.0"
] | 4 | 2020-12-13T20:02:43.000Z | 2022-02-27T23:36:58.000Z | tests/test_fc/test_fcattroutofservice.py | wardy3/mdssdk | 393102fab146917a3893b6aa2bd6a0449ad491c5 | [
"Apache-2.0"
] | 13 | 2020-09-23T07:30:15.000Z | 2022-03-30T01:12:25.000Z | tests/test_fc/test_fcattroutofservice.py | wardy3/mdssdk | 393102fab146917a3893b6aa2bd6a0449ad491c5 | [
"Apache-2.0"
] | 12 | 2020-05-11T09:33:21.000Z | 2022-03-18T11:11:28.000Z | import random
import unittest
import time
from mdssdk.connection_manager.errors import CLIError
from mdssdk.fc import Fc
from tests.test_fc.vars import *
log = logging.getLogger(__name__)
class TestFcAttrOutOfService(unittest.TestCase):
def __init__(self, testName, sw):
super().__init__(testName)
self.switch = sw
def setUp(self) -> None:
log.debug(self.switch.version)
log.debug(self.switch.ipaddr)
interfaces = self.switch.interfaces
while True:
k, v = random.choice(list(interfaces.items()))
if type(v) is Fc:
self.fc = v
log.debug(k)
break
self.status_values = status_values
self.old = self.fc.status
def test_out_of_service_read_error(self):
with self.assertRaises(AttributeError) as e:
log.debug(self.fc.out_of_service)
self.assertEqual("unreadable attribute", str(e.exception))
def test_out_of_service_write(self):
# self.skipTest("needs to be fixed")
if self.fc.status == "outOfServc":
self.fc.out_of_service = False
self.assertIn(self.fc.status, self.status_values)
self.fc.out_of_service = True
self.assertEqual("outOfServc", self.fc.status)
else:
try:
self.fc.out_of_service = True
except CLIError as c:
if "requested config not allowed on bundle member" in c.message:
self.skipTest(
"Port "
+ self.fc.name
+ " is part of a PC and hence cannot set to out-of-service, Please rerun the tests"
)
self.assertEqual("outOfServc", self.fc.status)
self.fc.out_of_service = False
# if self.old != "down":
self.fc.status = "no shutdown"
time.sleep(2)
# Sometimes the states may not be same so no need to check this as of now
# self.assertEqual(self.old, self.fc.status)
def test_out_of_service_write_invalid(self):
with self.assertRaises(TypeError) as e:
self.fc.out_of_service = "asdf"
self.assertEqual("Only bool value(true/false) supported.", str(e.exception))
def tearDown(self) -> None:
self.fc.out_of_service = False
# if self.old != "down":
self.fc.status = "no shutdown"
time.sleep(5)
# Sometimes the states may not be same so no need to check this as of now
# self.assertEqual(self.old, self.fc.status)
| 35.780822 | 107 | 0.593798 |
acf6e0d549652abdef639afbe5b76d73e9e10295 | 4,055 | py | Python | examples/textbook/evolve_star_sph_evolve.py | joshuawall/amuse | c2034074ee76c08057c4faa96c32044ab40952e9 | [
"Apache-2.0"
] | 1 | 2019-12-28T22:47:51.000Z | 2019-12-28T22:47:51.000Z | examples/textbook/evolve_star_sph_evolve.py | joshuawall/amuse | c2034074ee76c08057c4faa96c32044ab40952e9 | [
"Apache-2.0"
] | null | null | null | examples/textbook/evolve_star_sph_evolve.py | joshuawall/amuse | c2034074ee76c08057c4faa96c32044ab40952e9 | [
"Apache-2.0"
] | 2 | 2021-11-19T04:41:37.000Z | 2021-11-20T02:11:17.000Z | import os
import os.path
import shutil
import numpy
from amuse.lab import *
from amuse.community.mesa.interface import MESA as stellar_evolution_code
from amuse.ext.star_to_sph import convert_stellar_model_to_SPH
from amuse.ext.sph_to_star import convert_SPH_to_stellar_model
from matplotlib import pyplot
def plot_clumps(groups):
number_of_particles_in_group = []
fraction_of_mass_in_group = []
# number_of_particles_in_group.append(len(group))
# fraction = (group.mass.sum()/total_mass)
# fraction_of_mass_in_group.append(fraction)
print "N=", len(groups)
ci = ['r', 'b', 'g', 'k']
figure = pyplot.figure(figsize= (12,6))
i = 0
alpha = 1
sizes = 50
for group in groups:
pyplot.scatter(group.x.value_in(units.RSun), group.y.value_in(units.RSun), sizes, ci[i], edgecolors = "none", alpha = alpha)
# pyplot.scatter(
# group.x.value_in(units.RSun),
# group.y.value_in(units.RSun),
# s = 1,#group.mass.value_in(units.MSun),
# c = ci[i]
# )
i+=1
pyplot.xlabel('x (AU)')
pyplot.ylabel('y (A*)')
# pyplot.xlim(-30, 30)
# pyplot.ylim(-30, 30)
pyplot.show()
def find_clumps(particles, unit_converter):
hop = Hop(unit_converter)
hop.particles.add_particles(particles)
hop.calculate_densities()
mean_densty = hop.particles.density.mean()
hop.parameters.peak_density_threshold = mean_densty
hop.parameters.saddle_density_threshold = 0.99*mean_densty
hop.parameters.outer_density_threshold = 0.01*mean_densty
# print "Peak density treshold:",
hop.do_hop()
result = [x.get_intersecting_subset_in(particles) for x in hop.groups()]
hop.stop()
return result
def new_option_parser():
from amuse.units.optparse import OptionParser
result = OptionParser()
result.add_option("-f",
dest="filename",
default = "hydro_triple_gas.hdf5",
help="input filename [%default]")
result.add_option("-t", unit=units.Myr,
dest="time", type="float",
default = 1|units.Myr,
help="evolution time scale [%default]")
result.add_option("-M", unit=units.MSun,
dest="Mprim", type="float",
default = 3|units.MSun,
help="stellar mass [%default]")
return result
if __name__ == "__main__":
set_printing_strategy("custom", #nbody_converter = converter,
preferred_units = [units.MSun, units.RSun, units.Myr],
precision = 11, prefix = "",
separator = " [", suffix = "]")
o, arguments = new_option_parser().parse_args()
print "initialize star"
stellar_evolution = EVtwin()
stellar_evolution.particles.add_particle(Particle(mass=o.Mprim))
stellar_evolution.evolve_model(o.time)
particles = convert_stellar_model_to_SPH(
stellar_evolution.particles[0],
500,
seed=12345
).gas_particles
stellar_evolution.stop()
from amuse.units.generic_unit_converter import ConvertBetweenGenericAndSiUnits
print "convert star to SPH"
converter = nbody_system.nbody_to_si(1|units.hour, 1|units.RSun)
hydrodynamics = Gadget2(converter)
hydrodynamics.gas_particles.add_particles(particles)
hydrodynamics.evolve_model(1.0|units.s)
hydrodynamics.gas_particles.copy_values_of_attributes_to(["density", "u", "pressure"], particles)
hydrodynamics.stop()
print "convert SPH to stellar model"
model = convert_SPH_to_stellar_model(particles)
#stellar_evolution = MESA()
print "initiate stellar evolution model"
stellar_evolution = EVtwin(redirect="none")
stellar_evolution.new_particle_from_model(model, 0.0|units.Myr)
print "star:", stellar_evolution.particles
print "evolve star"
stellar_evolution.evolve_model(2*o.time)
print stellar_evolution.particles
| 32.96748 | 132 | 0.650308 |
acf6e1a1a9860f55fc0ebc6ab531c67faa9678c3 | 6,673 | py | Python | ocrd_tesserocr/binarize.py | stweil/ocrd_tesserocr | 7e9dc53a2c1df97a5b1f5f6eb6e5771e3792890d | [
"MIT"
] | 1 | 2018-04-16T18:43:34.000Z | 2018-04-16T18:43:34.000Z | ocrd_tesserocr/binarize.py | stweil/ocrd_tesserocr | 7e9dc53a2c1df97a5b1f5f6eb6e5771e3792890d | [
"MIT"
] | null | null | null | ocrd_tesserocr/binarize.py | stweil/ocrd_tesserocr | 7e9dc53a2c1df97a5b1f5f6eb6e5771e3792890d | [
"MIT"
] | null | null | null | from __future__ import absolute_import
import os.path
from tesserocr import (
PyTessBaseAPI,
PSM, RIL
)
from ocrd_utils import (
getLogger, concat_padded,
MIMETYPE_PAGE
)
from ocrd_modelfactory import page_from_file
from ocrd_models.ocrd_page import (
MetadataItemType,
LabelsType, LabelType,
AlternativeImageType,
TextRegionType,
to_xml
)
from ocrd import Processor
from .config import TESSDATA_PREFIX, OCRD_TOOL
TOOL = 'ocrd-tesserocr-binarize'
LOG = getLogger('processor.TesserocrBinarize')
FALLBACK_IMAGE_GRP = 'OCR-D-IMG-BIN'
class TesserocrBinarize(Processor):
def __init__(self, *args, **kwargs):
kwargs['ocrd_tool'] = OCRD_TOOL['tools'][TOOL]
kwargs['version'] = OCRD_TOOL['version']
super(TesserocrBinarize, self).__init__(*args, **kwargs)
if hasattr(self, 'output_file_grp'):
try:
self.page_grp, self.image_grp = self.output_file_grp.split(',')
except ValueError:
self.page_grp = self.output_file_grp
self.image_grp = FALLBACK_IMAGE_GRP
LOG.info("No output file group for images specified, falling back to '%s'", FALLBACK_IMAGE_GRP)
def process(self):
"""Performs binarization of the region / line with Tesseract on the workspace.
Open and deserialize PAGE input files and their respective images,
then iterate over the element hierarchy down to the requested level.
Set up Tesseract to recognize the segment image's layout, and get
the binarized image. Create an image file, and reference it as
AlternativeImage in the segment element. Add the new image file
to the workspace with the fileGrp USE given in the second position
of the output fileGrp, or ``OCR-D-IMG-BIN``, and an ID based on input
file and input element.
Produce a new output file by serialising the resulting hierarchy.
"""
oplevel = self.parameter['operation_level']
with PyTessBaseAPI(path=TESSDATA_PREFIX) as tessapi:
for n, input_file in enumerate(self.input_files):
file_id = input_file.ID.replace(self.input_file_grp, self.image_grp)
page_id = input_file.pageId or input_file.ID
LOG.info("INPUT FILE %i / %s", n, page_id)
pcgts = page_from_file(self.workspace.download_file(input_file))
page = pcgts.get_Page()
# add metadata about this operation and its runtime parameters:
metadata = pcgts.get_Metadata() # ensured by from_file()
metadata.add_MetadataItem(
MetadataItemType(type_="processingStep",
name=self.ocrd_tool['steps'][0],
value=TOOL,
Labels=[LabelsType(
externalModel="ocrd-tool",
externalId="parameters",
Label=[LabelType(type_=name,
value=self.parameter[name])
for name in self.parameter.keys()])]))
page_image, page_xywh, _ = self.workspace.image_from_page(
page, page_id)
LOG.info("Binarizing on '%s' level in page '%s'", oplevel, page_id)
regions = page.get_TextRegion() + page.get_TableRegion()
if not regions:
LOG.warning("Page '%s' contains no text regions", page_id)
for region in regions:
region_image, region_xywh = self.workspace.image_from_segment(
region, page_image, page_xywh)
if oplevel == 'region':
tessapi.SetPageSegMode(PSM.SINGLE_BLOCK)
self._process_segment(tessapi, RIL.BLOCK, region, region_image, region_xywh,
"region '%s'" % region.id, input_file.pageId,
file_id + '_' + region.id)
elif isinstance(region, TextRegionType):
lines = region.get_TextLine()
if not lines:
LOG.warning("Page '%s' region '%s' contains no text lines",
page_id, region.id)
for line in lines:
line_image, line_xywh = self.workspace.image_from_segment(
line, region_image, region_xywh)
tessapi.SetPageSegMode(PSM.SINGLE_LINE)
self._process_segment(tessapi, RIL.TEXTLINE, line, line_image, line_xywh,
"line '%s'" % line.id, input_file.pageId,
file_id + '_' + region.id + '_' + line.id)
# Use input_file's basename for the new file -
# this way the files retain the same basenames:
file_id = input_file.ID.replace(self.input_file_grp, self.page_grp)
if file_id == input_file.ID:
file_id = concat_padded(self.page_grp, n)
self.workspace.add_file(
ID=file_id,
file_grp=self.page_grp,
pageId=input_file.pageId,
mimetype=MIMETYPE_PAGE,
local_filename=os.path.join(self.page_grp,
file_id + '.xml'),
content=to_xml(pcgts))
def _process_segment(self, tessapi, ril, segment, image, xywh, where, page_id, file_id):
tessapi.SetImage(image)
image_bin = None
layout = tessapi.AnalyseLayout()
if layout:
image_bin = layout.GetBinaryImage(ril)
if not image_bin:
LOG.error('Cannot binarize %s', where)
return
# update METS (add the image file):
file_path = self.workspace.save_image_file(image_bin,
file_id,
page_id=page_id,
file_grp=self.image_grp)
# update PAGE (reference the image file):
features = xywh['features'] + ",binarized"
segment.add_AlternativeImage(AlternativeImageType(
filename=file_path, comments=features))
| 47.326241 | 111 | 0.541436 |
acf6e2938755650cb5a5051cd36d03f18f220ad0 | 4,165 | py | Python | music/_sources/music_maker_f4ff261bb79f60c6193dec8f5023b10d.py | catniplab/ML-music-analysis | 793d54ed16166fbcd9acf4eec24998892334e064 | [
"MIT"
] | null | null | null | music/_sources/music_maker_f4ff261bb79f60c6193dec8f5023b10d.py | catniplab/ML-music-analysis | 793d54ed16166fbcd9acf4eec24998892334e064 | [
"MIT"
] | null | null | null | music/_sources/music_maker_f4ff261bb79f60c6193dec8f5023b10d.py | catniplab/ML-music-analysis | 793d54ed16166fbcd9acf4eec24998892334e064 | [
"MIT"
] | 1 | 2021-12-01T22:57:56.000Z | 2021-12-01T22:57:56.000Z | """
This script is for generating new music based on the LocusLab datasets and the models trained on them.
"""
import os
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torchsso.optim as soptim
import torch.nn.functional as F
import random
from torch.utils.data import DataLoader
from scipy.io import loadmat
from sacred import Experiment
from torch import Tensor, device
from copy import deepcopy
from time import sleep
from tqdm import tqdm
from src.neural_nets.models import get_model
from src.midi.utils import to_midi, make_music
# create a new sacred experiment whose name is an integer
ex = Experiment(name=str(random.randint(0, 1000000)))
# default configurations
@ex.config
def cfg():
# supported datasets
# JSB_Chorales (short)
# Nottingham (medium)
# Piano_midi (long)
# MuseData (extra long)
dataset = "JSB_Chorales"
# traindata, testdata, validdata
key = "traindata"
index = 0 # which song in the set will be input
# Supported architectures
# LDS
# TANH
architecture = 'TANH'
readout = 'linear'
input_size = 88
hidden_size = 300
num_layers = 1
output_size = 88
sdpath = 'models/204/final_state_dict.pt'
true_steps = 0 # how many time steps to copy from the original track
input_steps = 100 # how many time steps will be the model prediction given the original track
free_steps = 100 # how many time steps will be based on the output of the model alone
history = 50 # how many past steps the model takes into account when synthesizing new music
variance = 0.2 # variance of the noise meant to knock the system out of stable limit cycles
# what to name the midi and waveform files
song_name = "cyberbach"
# whether or not we automatically use timidity to convert to wav
convert2wav = True
# give all random number generators the same seed
def _seed_all(_seed) -> None:
torch.manual_seed(_seed)
np.random.seed(_seed)
random.seed(_seed)
@ex.automain
def music_synthesis(dataset,
key,
index,
architecture,
readout,
input_size,
hidden_size,
num_layers,
output_size,
sdpath,
true_steps,
input_steps,
free_steps,
history,
variance,
song_name,
convert2wav,
_seed,
_log,
_run):
# save artifacts to a temporary directory that gets erased when the experiment is over
save_dir = 'tmp_' + str(_seed)
os.system('mkdir ' + save_dir)
save_dir += '/'
# instructions for creating un-initialized model
model_dict = {
'architecture': architecture,
'readout': readout,
'input_size': input_size,
'hidden_size': hidden_size,
'num_layers': num_layers,
'output_size': output_size,
'gradient_clipping': 1,
}
# construct the model based on the saved state
model = get_model(model_dict, {'init': "default"}, False)
sd = torch.load(sdpath)
model.load_state_dict(sd)
# get the desired song
track = loadmat("data/" + dataset)[key][0][index]
# generate a new song
new_track = make_music(model, track, true_steps, input_steps, free_steps, history, variance)
# temporary name for this song
track_name = save_dir + song_name + '.mid'
# convert to midi and save in temporary directory
to_midi(0, new_track, track_name)
# make sacred remember where it is
_run.add_artifact(track_name)
if convert2wav:
# create the wav file using timidity
os.system("timidity -Ow " + track_name)
# make sacred remember
_run.add_artifact(save_dir + song_name + '.wav')
# wait for a second then remove the temporary directory
sleep(1)
os.system("rm -r " + save_dir) | 28.923611 | 102 | 0.62401 |
acf6e333c484e17ef9d08da8d4cadbec33000260 | 11,695 | py | Python | dygraph/paddlex/ppdet/modeling/backbones/blazenet.py | follower/PaddleX | 466e985f16cb82a634e3ffcf72a5784fb33a6897 | [
"Apache-2.0"
] | 1 | 2021-07-10T09:54:19.000Z | 2021-07-10T09:54:19.000Z | dygraph/paddlex/ppdet/modeling/backbones/blazenet.py | liyhc/PaddleX | bad2fd2b97cdf2ffa783650ccee0458a53dfbe6e | [
"Apache-2.0"
] | null | null | null | dygraph/paddlex/ppdet/modeling/backbones/blazenet.py | liyhc/PaddleX | bad2fd2b97cdf2ffa783650ccee0458a53dfbe6e | [
"Apache-2.0"
] | null | null | null | # copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
from paddle import ParamAttr
from paddle.regularizer import L2Decay
from paddle.nn.initializer import KaimingNormal
from paddlex.ppdet.core.workspace import register, serializable
from numbers import Integral
from ..shape_spec import ShapeSpec
__all__ = ['BlazeNet']
class ConvBNLayer(nn.Layer):
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride,
padding,
num_groups=1,
act='relu',
conv_lr=0.1,
conv_decay=0.,
norm_decay=0.,
norm_type='bn',
name=None):
super(ConvBNLayer, self).__init__()
self.act = act
self._conv = nn.Conv2D(
in_channels,
out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
groups=num_groups,
weight_attr=ParamAttr(
learning_rate=conv_lr,
initializer=KaimingNormal(),
name=name + "_weights"),
bias_attr=False)
param_attr = ParamAttr(name=name + "_bn_scale")
bias_attr = ParamAttr(name=name + "_bn_offset")
if norm_type == 'sync_bn':
self._batch_norm = nn.SyncBatchNorm(
out_channels, weight_attr=param_attr, bias_attr=bias_attr)
else:
self._batch_norm = nn.BatchNorm(
out_channels,
act=None,
param_attr=param_attr,
bias_attr=bias_attr,
use_global_stats=False,
moving_mean_name=name + '_bn_mean',
moving_variance_name=name + '_bn_variance')
def forward(self, x):
x = self._conv(x)
x = self._batch_norm(x)
if self.act == "relu":
x = F.relu(x)
elif self.act == "relu6":
x = F.relu6(x)
return x
class BlazeBlock(nn.Layer):
def __init__(self,
in_channels,
out_channels1,
out_channels2,
double_channels=None,
stride=1,
use_5x5kernel=True,
name=None):
super(BlazeBlock, self).__init__()
assert stride in [1, 2]
self.use_pool = not stride == 1
self.use_double_block = double_channels is not None
self.conv_dw = []
if use_5x5kernel:
self.conv_dw.append(
self.add_sublayer(
name + "1_dw",
ConvBNLayer(
in_channels=in_channels,
out_channels=out_channels1,
kernel_size=5,
stride=stride,
padding=2,
num_groups=out_channels1,
name=name + "1_dw")))
else:
self.conv_dw.append(
self.add_sublayer(
name + "1_dw_1",
ConvBNLayer(
in_channels=in_channels,
out_channels=out_channels1,
kernel_size=3,
stride=1,
padding=1,
num_groups=out_channels1,
name=name + "1_dw_1")))
self.conv_dw.append(
self.add_sublayer(
name + "1_dw_2",
ConvBNLayer(
in_channels=out_channels1,
out_channels=out_channels1,
kernel_size=3,
stride=stride,
padding=1,
num_groups=out_channels1,
name=name + "1_dw_2")))
act = 'relu' if self.use_double_block else None
self.conv_pw = ConvBNLayer(
in_channels=out_channels1,
out_channels=out_channels2,
kernel_size=1,
stride=1,
padding=0,
act=act,
name=name + "1_sep")
if self.use_double_block:
self.conv_dw2 = []
if use_5x5kernel:
self.conv_dw2.append(
self.add_sublayer(
name + "2_dw",
ConvBNLayer(
in_channels=out_channels2,
out_channels=out_channels2,
kernel_size=5,
stride=1,
padding=2,
num_groups=out_channels2,
name=name + "2_dw")))
else:
self.conv_dw2.append(
self.add_sublayer(
name + "2_dw_1",
ConvBNLayer(
in_channels=out_channels2,
out_channels=out_channels2,
kernel_size=3,
stride=1,
padding=1,
num_groups=out_channels2,
name=name + "1_dw_1")))
self.conv_dw2.append(
self.add_sublayer(
name + "2_dw_2",
ConvBNLayer(
in_channels=out_channels2,
out_channels=out_channels2,
kernel_size=3,
stride=1,
padding=1,
num_groups=out_channels2,
name=name + "2_dw_2")))
self.conv_pw2 = ConvBNLayer(
in_channels=out_channels2,
out_channels=double_channels,
kernel_size=1,
stride=1,
padding=0,
name=name + "2_sep")
# shortcut
if self.use_pool:
shortcut_channel = double_channels or out_channels2
self._shortcut = []
self._shortcut.append(
self.add_sublayer(
name + '_shortcut_pool',
nn.MaxPool2D(
kernel_size=stride, stride=stride, ceil_mode=True)))
self._shortcut.append(
self.add_sublayer(
name + '_shortcut_conv',
ConvBNLayer(
in_channels=in_channels,
out_channels=shortcut_channel,
kernel_size=1,
stride=1,
padding=0,
name="shortcut" + name)))
def forward(self, x):
y = x
for conv_dw_block in self.conv_dw:
y = conv_dw_block(y)
y = self.conv_pw(y)
if self.use_double_block:
for conv_dw2_block in self.conv_dw2:
y = conv_dw2_block(y)
y = self.conv_pw2(y)
if self.use_pool:
for shortcut in self._shortcut:
x = shortcut(x)
return F.relu(paddle.add(x, y))
@register
@serializable
class BlazeNet(nn.Layer):
"""
BlazeFace, see https://arxiv.org/abs/1907.05047
Args:
blaze_filters (list): number of filter for each blaze block.
double_blaze_filters (list): number of filter for each double_blaze block.
use_5x5kernel (bool): whether or not filter size is 5x5 in depth-wise conv.
"""
def __init__(
self,
blaze_filters=[[24, 24], [24, 24], [24, 48, 2], [48, 48], [48, 48]],
double_blaze_filters=[[48, 24, 96, 2], [96, 24, 96], [96, 24, 96],
[96, 24, 96, 2], [96, 24, 96], [96, 24, 96]],
use_5x5kernel=True):
super(BlazeNet, self).__init__()
conv1_num_filters = blaze_filters[0][0]
self.conv1 = ConvBNLayer(
in_channels=3,
out_channels=conv1_num_filters,
kernel_size=3,
stride=2,
padding=1,
name="conv1")
in_channels = conv1_num_filters
self.blaze_block = []
self._out_channels = []
for k, v in enumerate(blaze_filters):
assert len(v) in [2, 3], \
"blaze_filters {} not in [2, 3]"
if len(v) == 2:
self.blaze_block.append(
self.add_sublayer(
'blaze_{}'.format(k),
BlazeBlock(
in_channels,
v[0],
v[1],
use_5x5kernel=use_5x5kernel,
name='blaze_{}'.format(k))))
elif len(v) == 3:
self.blaze_block.append(
self.add_sublayer(
'blaze_{}'.format(k),
BlazeBlock(
in_channels,
v[0],
v[1],
stride=v[2],
use_5x5kernel=use_5x5kernel,
name='blaze_{}'.format(k))))
in_channels = v[1]
for k, v in enumerate(double_blaze_filters):
assert len(v) in [3, 4], \
"blaze_filters {} not in [3, 4]"
if len(v) == 3:
self.blaze_block.append(
self.add_sublayer(
'double_blaze_{}'.format(k),
BlazeBlock(
in_channels,
v[0],
v[1],
double_channels=v[2],
use_5x5kernel=use_5x5kernel,
name='double_blaze_{}'.format(k))))
elif len(v) == 4:
self.blaze_block.append(
self.add_sublayer(
'double_blaze_{}'.format(k),
BlazeBlock(
in_channels,
v[0],
v[1],
double_channels=v[2],
stride=v[3],
use_5x5kernel=use_5x5kernel,
name='double_blaze_{}'.format(k))))
in_channels = v[2]
self._out_channels.append(in_channels)
def forward(self, inputs):
outs = []
y = self.conv1(inputs['image'])
for block in self.blaze_block:
y = block(y)
outs.append(y)
return [outs[-4], outs[-1]]
@property
def out_shape(self):
return [
ShapeSpec(channels=c)
for c in [self._out_channels[-4], self._out_channels[-1]]
]
| 36.319876 | 83 | 0.46071 |
acf6e39463a870c0e95765a10db050db9fdd8dbb | 48 | py | Python | pipetrans/info.py | junkainiu/mongodb-to-elasticsearch | fb3a052070d05dde12090cc76c3250a24f43863d | [
"MIT"
] | 4 | 2017-06-17T05:27:55.000Z | 2020-06-13T06:44:17.000Z | pipetrans/info.py | junkainiu/mongo_to_elasticsearch | fb3a052070d05dde12090cc76c3250a24f43863d | [
"MIT"
] | null | null | null | pipetrans/info.py | junkainiu/mongo_to_elasticsearch | fb3a052070d05dde12090cc76c3250a24f43863d | [
"MIT"
] | null | null | null | SUPPORTED_CMDS = [
'$match',
'$group'
]
| 9.6 | 18 | 0.5 |
acf6e3df6a6773c5d3b28dd3a505f058f5329f4c | 412 | py | Python | ionyweb/plugin_app/plugin_links_list/forms.py | makinacorpus/ionyweb | 2f18e3dc1fdc86c7e19bae3778e67e28a37567be | [
"BSD-3-Clause"
] | 4 | 2015-09-28T10:07:39.000Z | 2019-10-18T20:14:07.000Z | ionyweb/plugin_app/plugin_links_list/forms.py | makinacorpus/ionyweb | 2f18e3dc1fdc86c7e19bae3778e67e28a37567be | [
"BSD-3-Clause"
] | 1 | 2021-03-19T21:41:33.000Z | 2021-03-19T21:41:33.000Z | ionyweb/plugin_app/plugin_links_list/forms.py | makinacorpus/ionyweb | 2f18e3dc1fdc86c7e19bae3778e67e28a37567be | [
"BSD-3-Clause"
] | 1 | 2017-10-12T09:25:19.000Z | 2017-10-12T09:25:19.000Z | # -*- coding: utf-8 -*-
import floppyforms as forms
from ionyweb.forms import ModuloModelForm, ModuloForm
from django.forms.models import modelformset_factory
from models import Plugin_LinksList, Link
class Plugin_LinksListForm(ModuloModelForm):
class Meta:
model = Plugin_LinksList
class LinkForm(ModuloModelForm):
class Meta:
model = Link
exclude = ('plugin', 'order')
| 18.727273 | 53 | 0.723301 |
acf6e41b18745ac8ea132fb701009cf3af929fa3 | 1,866 | py | Python | tests/unit/apiserver/utils/test_common.py | alexnemes/yardstick_enc | dc2d0eb663c7648271b04026b90046a27fe0b5fc | [
"Apache-2.0"
] | 1 | 2019-12-08T21:39:38.000Z | 2019-12-08T21:39:38.000Z | tests/unit/apiserver/utils/test_common.py | alexnemes/yardstick | 7a89a01cdb1b3569d0b67451572edbae0f3d05aa | [
"Apache-2.0"
] | null | null | null | tests/unit/apiserver/utils/test_common.py | alexnemes/yardstick | 7a89a01cdb1b3569d0b67451572edbae0f3d05aa | [
"Apache-2.0"
] | null | null | null | ##############################################################################
# Copyright (c) 2016 Huawei Technologies Co.,Ltd and others.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
from __future__ import absolute_import
import unittest
from api.utils import common
class TranslateToStrTestCase(unittest.TestCase):
def test_translate_to_str_unicode(self):
input_str = u'hello'
output_str = common.translate_to_str(input_str)
result = 'hello'
self.assertEqual(result, output_str)
def test_translate_to_str_dict_list_unicode(self):
input_str = {
u'hello': {u'hello': [u'world']}
}
output_str = common.translate_to_str(input_str)
result = {
'hello': {'hello': ['world']}
}
self.assertEqual(result, output_str)
class GetCommandListTestCase(unittest.TestCase):
def test_get_command_list_no_opts(self):
command_list = ['a']
opts = {}
args = 'b'
output_list = common.get_command_list(command_list, opts, args)
result_list = ['a', 'b']
self.assertEqual(result_list, output_list)
def test_get_command_list_with_opts_args(self):
command_list = ['a']
opts = {
'b': 'c',
'task-args': 'd'
}
args = 'e'
output_list = common.get_command_list(command_list, opts, args)
result_list = ['a', 'e', '--b', '--task-args', 'd']
self.assertEqual(result_list, output_list)
def main():
unittest.main()
if __name__ == '__main__':
main()
| 27.850746 | 78 | 0.583065 |
acf6e42d6d0b8624b77ddc03e620f5be7b5ca830 | 608 | py | Python | test cases/frameworks/1 boost/test_python_module.py | kira78/meson | 0ae840656c5b87f30872072aa8694667c63c96d2 | [
"Apache-2.0"
] | 44 | 2022-03-16T08:32:31.000Z | 2022-03-31T16:02:35.000Z | test cases/frameworks/1 boost/test_python_module.py | kira78/meson | 0ae840656c5b87f30872072aa8694667c63c96d2 | [
"Apache-2.0"
] | 2 | 2015-03-23T15:30:17.000Z | 2015-03-23T20:19:19.000Z | test cases/frameworks/1 boost/test_python_module.py | kira78/meson | 0ae840656c5b87f30872072aa8694667c63c96d2 | [
"Apache-2.0"
] | 18 | 2022-03-19T04:41:04.000Z | 2022-03-31T03:32:12.000Z | import sys
sys.path.append(sys.argv[1])
# import compiled python module depending on version of python we are running with
if sys.version_info[0] == 2:
import python2_module
if sys.version_info[0] == 3:
import python3_module
def run():
msg = 'howdy'
if sys.version_info[0] == 2:
w = python2_module.World()
if sys.version_info[0] == 3:
w = python3_module.World()
w.set(msg)
assert(msg == w.greet())
version_string = str(sys.version_info[0]) + "." + str(sys.version_info[1])
assert(version_string == w.version())
if __name__ == '__main__':
run()
| 21.714286 | 82 | 0.643092 |
acf6e4c5a068bca4e4ea30e92414f12f4fde30de | 681 | py | Python | Python/Bubble Sort.py | rahulgoyal8312/hacktoberfest2021 | b5d05d7634622445bb1af5d0ba85817f60cb10f1 | [
"MIT"
] | 2 | 2021-12-01T03:35:20.000Z | 2022-02-11T01:10:22.000Z | Python/Bubble Sort.py | rahulgoyal8312/hacktoberfest2021 | b5d05d7634622445bb1af5d0ba85817f60cb10f1 | [
"MIT"
] | null | null | null | Python/Bubble Sort.py | rahulgoyal8312/hacktoberfest2021 | b5d05d7634622445bb1af5d0ba85817f60cb10f1 | [
"MIT"
] | 1 | 2021-10-17T16:24:21.000Z | 2021-10-17T16:24:21.000Z | def bubblesort(elements):
# Looping from size of array from last index[-1] to index [0]
for n in range(len(elements)-1, 0, -1):
for i in range(n):
if elements[i] > elements[i + 1]:
# swapping data if the element is less than next element in the array
elements[i], elements[i + 1] = elements[i + 1], elements[i]
print(elements)
elements = []
# taking size of array from user
size = int(input("Enter How many number you want to sort:"))
# taking array data from the user
for i in range(size):
value = int(input("Enter the element:"))
elements.append(value)
print("Array :", elements)
bubblesort(elements)
print("Sorted Array is, ")
for i in range(size):
print(elements[i], end =" ")
| 32.428571 | 69 | 0.703377 |
acf6e50fe5e5be1d40a6130e04993d8c52a3d6be | 2,253 | py | Python | tests/test_configurator.py | thanos/python-configurator | 227c639b3a7e087d3b03a72cae9f766a2c285890 | [
"BSD-2-Clause"
] | null | null | null | tests/test_configurator.py | thanos/python-configurator | 227c639b3a7e087d3b03a72cae9f766a2c285890 | [
"BSD-2-Clause"
] | 2 | 2016-05-11T16:19:28.000Z | 2016-05-11T16:22:16.000Z | tests/test_configurator.py | thanos/python-configurator | 227c639b3a7e087d3b03a72cae9f766a2c285890 | [
"BSD-2-Clause"
] | null | null | null | import json
import tempfile
from configurator.cli import main
from configurator.config import Configurable
class O(object):
"""
test object
"""
def test_main():
assert main([]) == 0
def test_config_by_args():
class MyConfig(Configurable):
timeout = 1
highwater = .4
watermark = 'tv'
o = O()
MyConfig.configure(o, timeouut=1, highwater=.7)
assert (o.watermark == MyConfig.watermark)
assert (o.timeout == 1)
assert (o.highwater == .7)
def test_config_by_json():
class MyConfig(Configurable):
timeout = 1
highwater = .4
watermark = 'tv'
o = O()
with tempfile.NamedTemporaryFile(suffix='.json', delete=False) as json_file:
name = json_file.name
json.dump(dict(timeouut=1, highwater=.7), json_file)
MyConfig.configure(o, config_file=name)
assert (o.watermark == MyConfig.watermark)
assert (o.timeout == 1)
assert (o.highwater == .7)
def test_config_from_url():
class MyConfig(Configurable):
only_backfill = False
dont_backfill = False
read_period = 1
config_url = 'http://107.21.150.202:5984/config/tailchaser-test'
o = O()
MyConfig.configure(o)
assert (o.dont_backfill is False)
assert (o.read_period == 2)
def test_config_from_python():
code = """
SZ=10
SIG_SZ=10*SZ
class Tailer:
READ_PERIOD = 1
"""
class MyConfig(Configurable):
body = 'hello'
objectType = 'Nothing'
with tempfile.NamedTemporaryFile(suffix='.py', delete=False) as python_file:
name = python_file.name
python_file.write(code)
o = O()
MyConfig.configure(o, config_file=name)
assert (o.SIG_SZ == 100)
assert (o.Tailer.READ_PERIOD == 1)
def test_config_by_cfg():
class MyConfig(Configurable):
timeout = 1
highwater = .4
watermark = 'tv'
o = O()
with tempfile.NamedTemporaryFile(suffix='.cfg', delete=False) as cfg_file:
name = cfg_file.name
cfg_file.write("""
[dev]
timeout = 21
highwater = .7
""")
MyConfig.configure(o, config_file=name)
assert (o.watermark == MyConfig.watermark)
assert (o.dev['timeout'] == '21')
assert (o.dev['highwater'] == '.7')
| 22.306931 | 80 | 0.62672 |
acf6e6e3a10ea9f3c6e0a0ca5ebfd2a73e567353 | 3,901 | py | Python | rcosautomation/discord/channels.py | Apexal/rcos-automation | 58561639592261e1bc53ff8181a124e139887ac2 | [
"MIT"
] | 1 | 2020-09-01T20:14:00.000Z | 2020-09-01T20:14:00.000Z | rcosautomation/discord/channels.py | Apexal/rcos-bot | 58561639592261e1bc53ff8181a124e139887ac2 | [
"MIT"
] | 8 | 2020-08-26T14:18:24.000Z | 2021-11-18T02:58:47.000Z | rcosautomation/discord/channels.py | rcos/rcos-automation | 58561639592261e1bc53ff8181a124e139887ac2 | [
"MIT"
] | null | null | null | import requests
import re
from typing import List, Dict, Optional
from .constants import API_BASE, RCOS_SERVER_ID, HEADERS, TEXT_CHANNEL, VOICE_CHANNEL, CATEGORY, CHANNEL_TYPES
def generate_text_channel_name(name: str) -> str:
'''Given a name, convert it into what its Discord text channel title would be.'''
no_white_space = re.sub(r'\W+', ' ', name.replace('.', ''))
stripped = no_white_space.strip()
no_nonalphanum = re.sub(r'\s+', '-', stripped)
lowercased = no_nonalphanum.lower()
return lowercased
def get_all_channels() -> List:
'''Get all channels on the server.'''
response = requests.get(
f'{API_BASE}/guilds/{RCOS_SERVER_ID}/channels', headers=HEADERS)
response.raise_for_status()
return response.json()
def get_channel(channel_id: str):
response = requests.get(
f'https://discordapp.com/api/channels/{channel_id}', headers=HEADERS)
response.raise_for_status()
return response.json()
def get_category_children(category_id: str):
# Get all children
response = requests.get(
f'https://discordapp.com/api/guilds/{RCOS_SERVER_ID}/channels', headers=HEADERS)
response.raise_for_status()
channels = response.json()
# Filter to find children
children = filter(
lambda channel: channel['parent_id'] == category_id, channels)
return children
def add_channel(name: str, channel_type: int = TEXT_CHANNEL, topic: str = None, parent_id=None, perms=None) -> Dict:
'''Add a channel or category to the server.'''
response = requests.post(f'{API_BASE}/guilds/{RCOS_SERVER_ID}/channels',
json={
'name': name,
'type': channel_type,
'topic': topic,
'parent_id': parent_id,
'permission_overwrites': perms
},
headers=HEADERS
)
response.raise_for_status()
return response.json()
def find_channel(name: str, channel_type: int, parent_id=None, ignore_parent=False) -> Optional[Dict]:
'''Find and return a channel with the given criteria or return None'''
if channel_type == TEXT_CHANNEL:
name = generate_text_channel_name(name)
for channel in all_channels:
if channel['type'] == channel_type and channel['name'] == name:
if ignore_parent:
return channel
elif channel['parent_id'] == parent_id:
return channel
return None
def add_channel_if_not_exists(name: str, channel_type: int = TEXT_CHANNEL, topic: str = None, parent_id=None, perms=None) -> Dict:
'''Add a channel if it does not already exist. Returns either found channel or newly created one.'''
# See if channel exists
channel = find_channel(
name, channel_type=channel_type, parent_id=parent_id)
if channel == None:
channel = add_channel(name, channel_type=channel_type,
topic=topic, parent_id=parent_id, perms=perms)
all_channels.append(channel)
print(
f'{CHANNEL_TYPES[channel["type"]]} "{channel["name"]}" was added')
else:
print(
f'{CHANNEL_TYPES[channel["type"]]} "{channel["name"]}" already exists')
return channel
def edit_channel(channel_id: str, updates: Dict):
response = requests.patch(
f'{API_BASE}/channels/{channel_id}', json=updates, headers=HEADERS)
response.raise_for_status()
return response.json()
def delete_channel(channel_id) -> Dict:
response = requests.delete(f'{API_BASE}/channels/{channel_id}',
headers=HEADERS
)
response.raise_for_status()
return response.json()
all_channels = get_all_channels()
| 36.12037 | 130 | 0.622661 |
acf6e81b68ce2a9e3ba83c90b10b559b977dad79 | 1,983 | py | Python | tests/conftest.py | TimDiekmann/flask-smorest | 9ed0b23e41e5c73071dfb73962e43f995e2bfb6e | [
"MIT"
] | null | null | null | tests/conftest.py | TimDiekmann/flask-smorest | 9ed0b23e41e5c73071dfb73962e43f995e2bfb6e | [
"MIT"
] | null | null | null | tests/conftest.py | TimDiekmann/flask-smorest | 9ed0b23e41e5c73071dfb73962e43f995e2bfb6e | [
"MIT"
] | null | null | null | from collections import namedtuple
import pytest
import marshmallow as ma
from flask import Flask
from .mocks import DatabaseMock
class AppConfig:
"""Base application configuration class
Overload this to add config parameters
"""
API_TITLE = 'API Test'
API_VERSION = '1'
OPENAPI_VERSION = '3.0.2'
@pytest.fixture(params=[0])
def collection(request):
_collection = DatabaseMock()
for idx in range(request.param):
_collection.post({'db_field': idx})
return _collection
@pytest.fixture(params=[AppConfig])
def app(request):
_app = Flask(__name__)
_app.config.from_object(request.param)
return _app
class CounterSchema(ma.Schema):
"""Base Schema with load/dump counters"""
load_count = 0
dump_count = 0
@classmethod
def reset_load_count(cls):
cls.load_count = 0
@classmethod
def reset_dump_count(cls):
cls.dump_count = 0
@ma.post_load(pass_many=True)
def increment_load_count(self, data, many, **kwargs):
self.__class__.load_count += 1
return data
@ma.post_dump(pass_many=True)
def increment_dump_count(self, data, many, **kwargs):
self.__class__.dump_count += 1
return data
@pytest.fixture
def schemas():
class DocSchema(CounterSchema):
item_id = ma.fields.Int(dump_only=True)
field = ma.fields.Int(attribute='db_field')
class DocEtagSchema(CounterSchema):
field = ma.fields.Int(attribute='db_field')
class QueryArgsSchema(ma.Schema):
class Meta:
ordered = True
unknown = ma.EXCLUDE
arg1 = ma.fields.String()
arg2 = ma.fields.Integer()
class ClientErrorSchema(ma.Schema):
error_id = ma.fields.Str()
text = ma.fields.Str()
return namedtuple(
'Model',
('DocSchema', 'DocEtagSchema', 'QueryArgsSchema', 'ClientErrorSchema')
)(DocSchema, DocEtagSchema, QueryArgsSchema, ClientErrorSchema)
| 22.793103 | 78 | 0.66112 |
acf6e871a083ee8baaa3fdb86470e53f3b3c9d09 | 8,108 | py | Python | meeting_most/docs/conf.py | missashat/Group-Project | dff057776f2dbb88ef5fd61a680c167cf387a37f | [
"Apache-1.1"
] | null | null | null | meeting_most/docs/conf.py | missashat/Group-Project | dff057776f2dbb88ef5fd61a680c167cf387a37f | [
"Apache-1.1"
] | 5 | 2020-06-05T18:21:27.000Z | 2021-09-08T00:03:22.000Z | meeting_most/docs/conf.py | missashat/Group-Project | dff057776f2dbb88ef5fd61a680c167cf387a37f | [
"Apache-1.1"
] | null | null | null | # Meeting Most documentation build configuration file, created by
# sphinx-quickstart.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix of source filenames.
source_suffix = ".rst"
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = "index"
# General information about the project.
project = "Meeting Most"
copyright = """2018, Catherine Bodnar, Julia Heath, Asha Turman"""
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = "0.1"
# The full version, including alpha/beta/rc tags.
release = "0.1"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ["_build"]
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "default"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = "meeting_mostdoc"
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
(
"index",
"meeting_most.tex",
"Meeting Most Documentation",
"""Catherine Bodnar, Julia Heath, Asha Turman""",
"manual",
)
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(
"index",
"meeting_most",
"Meeting Most Documentation",
["""Catherine Bodnar, Julia Heath, Asha Turman"""],
1,
)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
"index",
"meeting_most",
"Meeting Most Documentation",
"""Catherine Bodnar, Julia Heath, Asha Turman""",
"Meeting Most",
"""A way to facilitate discussion on agenda items while reducing meeting time.""",
"Miscellaneous",
)
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
| 31.671875 | 90 | 0.697583 |
acf6ea791390f72accf84c9dd9ca82f8f34703a8 | 1,864 | py | Python | mltk/core/evaluation_results.py | SiliconLabs/mltk | 56b19518187e9d1c8a0d275de137fc9058984a1f | [
"Zlib"
] | null | null | null | mltk/core/evaluation_results.py | SiliconLabs/mltk | 56b19518187e9d1c8a0d275de137fc9058984a1f | [
"Zlib"
] | 1 | 2021-11-19T20:10:09.000Z | 2021-11-19T20:10:09.000Z | mltk/core/evaluation_results.py | sldriedler/mltk | d82a60359cf875f542a2257f1bc7d8eb4bdaa204 | [
"Zlib"
] | null | null | null |
import logging
class EvaluationResults(dict):
"""Holds model evaluation results
.. note:: The Implementation details are specific to the model type
.. seealso::
- :py:class:`mltk.core.ClassifierEvaluationResults`
- :py:class:`mltk.core.AutoEncoderEvaluationResults`
"""
def __init__(self, name:str, model_type:str='generic', **kwargs):
super().__init__()
self['name'] = name
self['model_type'] = model_type
self.update(**kwargs)
@property
def name(self) -> str:
"""The name of the evaluated model"""
return self['name']
@property
def model_type(self) -> str:
"""The type of the evaluated model (e.g. classification, autoencoder, etc.)"""
return self['model_type']
def generate_summary(self, include_all=True) -> str:
"""Generate and return a summary of the results as a string"""
# This should be implemented by a subclass
s = f'Name: {self.name}\n'
s += f'Model Type: {self.model_type}\n'
if include_all:
for key, value in self.items():
if key in ('name', 'model_type'):
continue
s += f'{key}: {value}\n'
return s.strip()
def generate_plots(
self,
show=True,
output_dir:str=None,
logger: logging.Logger=None
):
"""Generate plots of the evaluation results
Args:
show: Display the generated plots
output_dir: Generate the plots at the specified directory. If omitted, generated in the model's logging directory
logger: Optional logger
"""
# This should be implemented by a subclass
raise NotImplementedError
def __str__(self) -> str:
return self.generate_summary()
| 27.014493 | 125 | 0.584764 |
acf6ea8210712120bfc411a8d666f673ce53e5e0 | 3,527 | py | Python | test/test_kernels.py | radovanhorvat/gonzales | 7b25296acbfa11ba2f212ae29d7e99aaf7d92af3 | [
"MIT"
] | null | null | null | test/test_kernels.py | radovanhorvat/gonzales | 7b25296acbfa11ba2f212ae29d7e99aaf7d92af3 | [
"MIT"
] | null | null | null | test/test_kernels.py | radovanhorvat/gonzales | 7b25296acbfa11ba2f212ae29d7e99aaf7d92af3 | [
"MIT"
] | null | null | null | import numpy as np
from gonzales.simulator.space import Space
from gonzales.simulator.utils import calculate_relative_error
import gonzales.lib.brute_force as bf
import gonzales.lib.octree as oct
def vel_func(pos_vec):
return np.array((0, 0, 0))
def mass_func(pos_vec):
return 1.0
def test_brute_force_basic():
# test correctness of Cython kernel
G = 1.0
eps = 0
space = Space()
# 1. zero acceleration cases
# 1.1. single particle
space.add_particle(np.array((0., 0., 0.)), np.array((0., 0., 0.)), 1.0)
accs = bf.calculate_accs_pp(space.r, space.m, G, eps)
np.testing.assert_equal(accs, np.array([[0., 0., 0.]]))
# 1.2. two particles at big distance
space.add_particle(np.array((1.0e10, 1.0e15, 1.0e15)), np.array((0., 0., 0.)), 1.0)
accs = bf.calculate_accs_pp(space.r, space.m, G, eps)
np.testing.assert_almost_equal(accs, np.array([[0., 0., 0.], [0., 0., 0.]]))
# 1.3. set G = 0
accs = bf.calculate_accs_pp(space.r, space.m, 0., eps)
np.testing.assert_equal(accs, np.array([[0., 0., 0.], [0., 0., 0.]]))
# 1.4. two zero-mass particles
space.clear_particles()
space.add_particle(np.array((0., 0., 0.)), np.array((0., 0., 0.)), 0.)
space.add_particle(np.array((1., 0., 0.)), np.array((0., 0., 0.)), 0.)
accs = bf.calculate_accs_pp(space.r, space.m, G, eps)
np.testing.assert_equal(accs, np.array([[0., 0., 0.], [0., 0., 0.]]))
# 2. two particles on x-axis at distance 1.0
space.clear_particles()
space.add_particle(np.array((0., 0., 0.)), np.array((0., 0., 0.)), 1.0)
space.add_particle(np.array((1., 0., 0.)), np.array((0., 0., 0.)), 1.0)
accs = bf.calculate_accs_pp(space.r, space.m, G, eps)
np.testing.assert_almost_equal(accs, np.array([[1., 0., 0.], [-1., 0., 0.]]))
# 3. two particles of unequal masses on x-axis at distance 1.0
space.clear_particles()
space.add_particle(np.array((0., 0., 0.)), np.array((0., 0., 0.)), 1.0)
space.add_particle(np.array((1., 0., 0.)), np.array((0., 0., 0.)), 0.5)
accs = bf.calculate_accs_pp(space.r, space.m, G, eps)
np.testing.assert_almost_equal(accs, np.array([[0.5, 0., 0.], [-1, 0., 0.]]))
def test_barnes_hut_theta_zero_C():
# test Barnes-Hut C kernel relative to Cython brute force kernel, for theta = 0
G = 1.0
eps = 1.0e-3
theta = 0.
particle_nums = [2, 10, 100, 1000, 5000, 10000]
for num in particle_nums:
cube_length = int(np.sqrt(num))
space = Space()
space.add_cuboid(num, np.array((0., 0., 0.)), cube_length, cube_length, cube_length, vel_func, mass_func)
accs1 = bf.calculate_accs_pp(space.r, space.m, G, eps)
accs2 = oct.calc_accs_octree(cube_length, 0., 0., 0., space.r, space.m, G, eps, theta)
np.testing.assert_almost_equal(accs1, accs2)
def test_barnes_hut_theta_non_zero_C():
# test Barnes-Hut C kernel relative to Cython brute force kernel, for theta > 0
G = 1.0
eps = 1.0e-3
theta = 0.5
particle_nums = [2, 10, 100, 1000, 5000, 10000]
for num in particle_nums:
cube_length = int(np.sqrt(num))
space = Space()
space.add_cuboid(num, np.array((0., 0., 0.)), cube_length, cube_length, cube_length, vel_func, mass_func)
accs2 = bf.calculate_accs_pp(space.r, space.m, G, eps)
accs3 = oct.calc_accs_octree(cube_length, 0., 0., 0., space.r, space.m, G, eps, theta)
err, std_err = calculate_relative_error(accs3, accs2)
assert err < 0.02 and std_err < 0.02
| 41.011628 | 113 | 0.620924 |
acf6eb5e964dc9a7932fc805f1b8bc9f2a5532e5 | 1,063 | py | Python | 2.ListBased_Collections/my_collections.py | qrzhang/Udacity_Data_Structure_Algorithms | 5b65088884d93e7fcd53b7fcf888e378b514ba2a | [
"MIT"
] | null | null | null | 2.ListBased_Collections/my_collections.py | qrzhang/Udacity_Data_Structure_Algorithms | 5b65088884d93e7fcd53b7fcf888e378b514ba2a | [
"MIT"
] | null | null | null | 2.ListBased_Collections/my_collections.py | qrzhang/Udacity_Data_Structure_Algorithms | 5b65088884d93e7fcd53b7fcf888e378b514ba2a | [
"MIT"
] | null | null | null | """Collections do not have a order
Lists: no fixed length
Arrays: each array has associated index, deletion one element could cause an empty box,
or insertion should move all following boxes first. Inserting into an array is O(n), since
you may need to shift elements to make space for the one you're inserting, or even copy
everything to a new array if you run out of space. Thus, inserting into a Python list is
actually O(n), while operations that search for an element at a particular spot are O(1).
(Doubled) Linked lists: have order but not indexes. Insertion and deletion are just the changes of pointers."""
# Linked list in Python
class Element(object):
def __init__(self, value):
self.value = value
self.next = None
class LinkedList(object):
def __init__(self, head=None):
self.head = head
def append(self, new_element):
current = self.head
if self.head:
while current.next:
current = current.next
current.next = new_element
else:
self.head = new_element | 39.37037 | 111 | 0.702728 |
acf6eca04eb0a8e452937a2e16dd62b3085ec234 | 2,692 | py | Python | azure-mgmt-policyinsights/azure/mgmt/policyinsights/models/__init__.py | NMijat1024/azure-sdk-for-python | c49e1d6d797dceaca81813cafb1a486d67185182 | [
"MIT"
] | 1 | 2018-07-23T08:59:24.000Z | 2018-07-23T08:59:24.000Z | azure-mgmt-policyinsights/azure/mgmt/policyinsights/models/__init__.py | NMijat1024/azure-sdk-for-python | c49e1d6d797dceaca81813cafb1a486d67185182 | [
"MIT"
] | 1 | 2018-11-29T14:46:42.000Z | 2018-11-29T14:46:42.000Z | azure-mgmt-policyinsights/azure/mgmt/policyinsights/models/__init__.py | NMijat1024/azure-sdk-for-python | c49e1d6d797dceaca81813cafb1a486d67185182 | [
"MIT"
] | 1 | 2018-08-28T14:36:47.000Z | 2018-08-28T14:36:47.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
try:
from .policy_event_py3 import PolicyEvent
from .policy_events_query_results_py3 import PolicyEventsQueryResults
from .query_failure_error_py3 import QueryFailureError
from .query_failure_py3 import QueryFailure, QueryFailureException
from .policy_state_py3 import PolicyState
from .policy_states_query_results_py3 import PolicyStatesQueryResults
from .summary_results_py3 import SummaryResults
from .policy_definition_summary_py3 import PolicyDefinitionSummary
from .policy_assignment_summary_py3 import PolicyAssignmentSummary
from .summary_py3 import Summary
from .summarize_results_py3 import SummarizeResults
from .operation_display_py3 import OperationDisplay
from .operation_py3 import Operation
from .operations_list_results_py3 import OperationsListResults
from .query_options_py3 import QueryOptions
except (SyntaxError, ImportError):
from .policy_event import PolicyEvent
from .policy_events_query_results import PolicyEventsQueryResults
from .query_failure_error import QueryFailureError
from .query_failure import QueryFailure, QueryFailureException
from .policy_state import PolicyState
from .policy_states_query_results import PolicyStatesQueryResults
from .summary_results import SummaryResults
from .policy_definition_summary import PolicyDefinitionSummary
from .policy_assignment_summary import PolicyAssignmentSummary
from .summary import Summary
from .summarize_results import SummarizeResults
from .operation_display import OperationDisplay
from .operation import Operation
from .operations_list_results import OperationsListResults
from .query_options import QueryOptions
from .policy_insights_client_enums import (
PolicyStatesResource,
)
__all__ = [
'PolicyEvent',
'PolicyEventsQueryResults',
'QueryFailureError',
'QueryFailure', 'QueryFailureException',
'PolicyState',
'PolicyStatesQueryResults',
'SummaryResults',
'PolicyDefinitionSummary',
'PolicyAssignmentSummary',
'Summary',
'SummarizeResults',
'OperationDisplay',
'Operation',
'OperationsListResults',
'QueryOptions',
'PolicyStatesResource',
]
| 40.787879 | 76 | 0.752972 |
acf6ed43e523c107734864ff9cd7bf1e4f6da620 | 1,043 | py | Python | yelp/endpoint/business.py | keithcallenberg/yelp-python | 12d611bc2344bbc1c93c83775aa71b7b01b36ad6 | [
"MIT"
] | 195 | 2015-11-11T22:26:54.000Z | 2022-03-18T03:19:52.000Z | yelp/endpoint/business.py | keithcallenberg/yelp-python | 12d611bc2344bbc1c93c83775aa71b7b01b36ad6 | [
"MIT"
] | 57 | 2015-11-11T22:32:05.000Z | 2020-10-20T14:50:11.000Z | yelp/endpoint/business.py | keithcallenberg/yelp-python | 12d611bc2344bbc1c93c83775aa71b7b01b36ad6 | [
"MIT"
] | 138 | 2015-11-12T15:28:10.000Z | 2022-02-13T00:56:57.000Z | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import unicode_literals
from yelp.config import BUSINESS_PATH
from yelp.obj.business import Business
class BusinessEndpoints(object):
def __init__(self, client):
self.client = client
def get_by_id(self, business_id, **url_params):
"""Make a request to the business details endpoint. More info at
https://www.yelp.com/developers/documentation/v3/business
Args:
business_id (str): The business alias (i.e. yelp-san-francisco) or
ID (i.e. 4kMBvIEWPxWkWKFN__8SxQ.
**url_params: Dict corresponding to business API params
https://www.yelp.com/developers/documentation/v3/business
Returns:
yelp.obj.business.Business object that wraps the response.
"""
business_path = BUSINESS_PATH.format(business_id=business_id)
response = self.client._make_request(business_path, url_params=url_params)
return Business(response)
| 34.766667 | 82 | 0.68744 |
acf6ed77617c1324b4b59596c9831fe25df7252b | 2,595 | py | Python | data/p4VQE/R4/benchmark/startQiskit478.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | data/p4VQE/R4/benchmark/startQiskit478.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | data/p4VQE/R4/benchmark/startQiskit478.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | # qubit number=3
# total number=14
import numpy as np
from qiskit import QuantumCircuit, execute, Aer, QuantumRegister, ClassicalRegister, transpile, BasicAer, IBMQ
import networkx as nx
from qiskit.visualization import plot_histogram
from typing import *
from pprint import pprint
from math import log2
from collections import Counter
from qiskit.test.mock import FakeVigo, FakeYorktown
kernel = 'circuit/bernstein'
def make_circuit(n:int) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
prog = QuantumCircuit(input_qubit)
prog.h(input_qubit[0]) # number=1
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=3
prog.h(input_qubit[2]) # number=11
prog.cz(input_qubit[1],input_qubit[2]) # number=12
prog.h(input_qubit[2]) # number=13
prog.x(input_qubit[2]) # number=6
prog.h(input_qubit[3]) # number=4
prog.y(input_qubit[3]) # number=5
prog.cx(input_qubit[3],input_qubit[2]) # number=10
for edge in E:
k = edge[0]
l = edge[1]
prog.cp(-2 * gamma, input_qubit[k-1], input_qubit[l-1])
prog.p(gamma, k)
prog.p(gamma, l)
prog.rx(2 * beta, range(len(V)))
prog.cx(input_qubit[1],input_qubit[0]) # number=7
prog.cx(input_qubit[1],input_qubit[0]) # number=8
# circuit end
return prog
if __name__ == '__main__':
n = 4
V = np.arange(0, n, 1)
E = [(0, 1, 1.0), (0, 2, 1.0), (1, 2, 1.0), (3, 2, 1.0), (3, 1, 1.0)]
G = nx.Graph()
G.add_nodes_from(V)
G.add_weighted_edges_from(E)
step_size = 0.1
a_gamma = np.arange(0, np.pi, step_size)
a_beta = np.arange(0, np.pi, step_size)
a_gamma, a_beta = np.meshgrid(a_gamma, a_beta)
F1 = 3 - (np.sin(2 * a_beta) ** 2 * np.sin(2 * a_gamma) ** 2 - 0.5 * np.sin(4 * a_beta) * np.sin(4 * a_gamma)) * (
1 + np.cos(4 * a_gamma) ** 2)
result = np.where(F1 == np.amax(F1))
a = list(zip(result[0], result[1]))[0]
gamma = a[0] * step_size
beta = a[1] * step_size
prog = make_circuit(4)
sample_shot =5600
writefile = open("../data/startQiskit478.csv", "w")
# prog.draw('mpl', filename=(kernel + '.png'))
backend = BasicAer.get_backend('qasm_simulator')
circuit1 = transpile(prog, FakeYorktown())
circuit1.measure_all()
prog = circuit1
info = execute(prog,backend=backend, shots=sample_shot).result().get_counts()
print(info, file=writefile)
print("results end", file=writefile)
print(circuit1.depth(), file=writefile)
print(circuit1, file=writefile)
writefile.close()
| 27.903226 | 118 | 0.635453 |
acf6ed9531a933ef3ac7f54ede0e4db2dccb82fa | 179 | py | Python | 05/01/utcoffset.py | pylangstudy/201709 | 53d868786d7327a83bfa7f4149549c6f9855a6c6 | [
"CC0-1.0"
] | null | null | null | 05/01/utcoffset.py | pylangstudy/201709 | 53d868786d7327a83bfa7f4149549c6f9855a6c6 | [
"CC0-1.0"
] | 32 | 2017-09-01T00:52:17.000Z | 2017-10-01T00:30:02.000Z | 05/01/utcoffset.py | pylangstudy/201709 | 53d868786d7327a83bfa7f4149549c6f9855a6c6 | [
"CC0-1.0"
] | null | null | null | import datetime
print(datetime.tzinfo.utcoffset(datetime.datetime.now()))#TypeError: descriptor 'utcoffset' requires a 'datetime.tzinfo' object but received a 'datetime.datetime'
| 59.666667 | 162 | 0.815642 |
acf6edeaa072cba54acde058e888a881b72e7b2b | 2,770 | py | Python | agent.py | domiwei/FlapPyBird | d870c200683e3ebc168baebf62077326d01da69a | [
"MIT"
] | null | null | null | agent.py | domiwei/FlapPyBird | d870c200683e3ebc168baebf62077326d01da69a | [
"MIT"
] | null | null | null | agent.py | domiwei/FlapPyBird | d870c200683e3ebc168baebf62077326d01da69a | [
"MIT"
] | null | null | null | import random
import time
import math
import json
JUMP = 0
PAUSE = 1
ALIVE = 0
PASSPIPE = 1
DEAD = 2
class EpsilonGreedy:
def __init__(self):
self.epsilon = 0.01
def takeAction(self, actions):
if random.random() > self.epsilon:
return actions.index(max(actions))
return random.randint(0, len(actions)-1)
class Agent:
def __init__(self, policy, modelfile="", writemodel=True):
# (dx, dy, ypos, yvel) -> [award of jump, award of do nothing]
self.qtable = {}
if modelfile != "":
with open(modelfile, "r") as f:
model = f.readline()
print(model)
self.qtable = eval(model)
self.award = {ALIVE: 1, PASSPIPE: 1000, DEAD: -1000}
self.discountFactor = 0.8
self.learnRate = 0.5
self.policy = policy
self.prevAction = PAUSE
self.prevState = None
self.prevTimestamp = 0
self.writemodel = writemodel
# jump returns true if decide to jump this moment
def jump(self, player, upipe, lpipe):
state = self.getState(player, lpipe)
if state == self.prevState: # do nothing
return
action = self.policy.takeAction(self.qtable[state])
self.prevAction = action
self.prevState = state
return action == JUMP
# feeback gives feedback of previous action
def feedback(self, player, upipe, lpipe, result):
if self.prevState is None or self.prevState[0] == 0:
return
state = self.getState(player, lpipe)
if state == self.prevState: # do nothing
return
optimalFuture = max(self.qtable[state])
oldValue = self.qtable[self.prevState][self.prevAction]
reward = self.award[result]
if result == ALIVE and state[1]>30: # try to discount
reward = (-1.0*math.exp(abs(state[1]-30)/2))
# update
self.qtable[self.prevState][self.prevAction] = \
(1.0-self.learnRate)*oldValue + \
self.learnRate*(reward + self.discountFactor*optimalFuture)
if time.time() - self.prevTimestamp > 5:
print("table size: ", len(self.qtable))
if self.writemodel:
with open("qtable.model", "w") as f:
f.write(str(self.qtable))
self.prevTimestamp = time.time()
if result == DEAD:
self.prevState = None
def getState(self, player, lpipe):
dx = int((lpipe['x'] - player['x'])/4)
dy = int((lpipe['y'] - player['y'])/4)
#playerY = int(player['y']/2)
state = (dx, dy, int(player['v']/2))
if state not in self.qtable:
self.qtable[state] = [0.0, 0.01]
return state
| 31.477273 | 71 | 0.56787 |
acf6ee28162d73a34f2ea86dc3011d93641d56f5 | 1,525 | py | Python | studentreg/migrations/0021_auto_20210310_2146.py | Ahmed-Dauda/adminrecords | fd12cb56bc3ac59fcd024050b3f8c345df9a4272 | [
"MIT"
] | null | null | null | studentreg/migrations/0021_auto_20210310_2146.py | Ahmed-Dauda/adminrecords | fd12cb56bc3ac59fcd024050b3f8c345df9a4272 | [
"MIT"
] | null | null | null | studentreg/migrations/0021_auto_20210310_2146.py | Ahmed-Dauda/adminrecords | fd12cb56bc3ac59fcd024050b3f8c345df9a4272 | [
"MIT"
] | null | null | null | # Generated by Django 3.1.6 on 2021-03-10 20:46
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('studentreg', '0020_auto_20210310_1554'),
]
operations = [
migrations.AlterField(
model_name='salat',
name='friday',
field=models.PositiveIntegerField(blank=True, default=14, null=True),
),
migrations.AlterField(
model_name='salat',
name='monday',
field=models.PositiveIntegerField(blank=True, default=14, null=True),
),
migrations.AlterField(
model_name='salat',
name='saturday',
field=models.PositiveIntegerField(blank=True, default=15, null=True),
),
migrations.AlterField(
model_name='salat',
name='sunday',
field=models.PositiveIntegerField(blank=True, default=15, null=True),
),
migrations.AlterField(
model_name='salat',
name='thursday',
field=models.PositiveIntegerField(blank=True, default=14, null=True),
),
migrations.AlterField(
model_name='salat',
name='tuesday',
field=models.PositiveIntegerField(blank=True, default=14, null=True),
),
migrations.AlterField(
model_name='salat',
name='wednesday',
field=models.PositiveIntegerField(blank=True, default=14, null=True),
),
]
| 31.122449 | 81 | 0.575738 |
acf6ee8d8394e4f85e28cd4b51ffc4338e205838 | 23,378 | py | Python | pleskfm.py | nlitsme/pleskfilemanager | f488e34650f87f2469a9318acae96216373f0953 | [
"MIT"
] | null | null | null | pleskfm.py | nlitsme/pleskfilemanager | f488e34650f87f2469a9318acae96216373f0953 | [
"MIT"
] | null | null | null | pleskfm.py | nlitsme/pleskfilemanager | f488e34650f87f2469a9318acae96216373f0953 | [
"MIT"
] | 1 | 2020-07-12T17:28:14.000Z | 2020-07-12T17:28:14.000Z | """
Tool for interacting with the plesk web interface from the commandline.
Site configuration is read from ~/.pleskrc.
Usage:
plesk -c test mkdir testdir
plesk -c test empty testdir/tst1.txt
plesk -c test edit testdir/tst1.txt abcakjsdkhjasdjkhasd
plesk -c test cat testdir/tst1.txt
echo abcakjsdkhjasdjkhasd | plesk -c test tee testdir/tst2.txt
plesk -c test cat testdir/tst2.txt
AUTHOR: willem Hengeveld <itsme@xs4all.nl>
TODO:
* remember session cookie, so we don't have to login each time.
* use task-progress while waiting for results.
* add parallel recurse option to ls
* instead of specifying '-C' and disallowing relative paths.
split the action in several actions, each with their own directory.
so: "cp dir1/file1 dir1/file2 dir2/file3 dst"
will become:
"cp -C dir1 file1 file2 dst"
and
"cp -C dir2 file3 dst"
"""
import html.parser
import datetime
import os.path
import os
import sys
import asyncio
import aiohttp
if sys.version_info[0] == 3:
unicode = str
class TokenFilter(html.parser.HTMLParser):
"""
Parses html, stores the forgery_protection_token found in self.token.
<meta name="forgery_protection_token" id="forgery_protection_token" content="3b55c0fb579094ccdf0d1e84ae183062">
"""
def __init__(self):
super().__init__()
self.token = None
def checkmeta(self, attrs):
d = dict(attrs)
if d.get('name')=='forgery_protection_token':
self.token = d['content']
def handle_starttag(self, tag, attrs):
if tag == 'meta':
self.checkmeta(attrs)
def handle_startendtag(self, tag, attrs):
if tag == 'meta':
self.checkmeta(attrs)
def ExtractToken(html):
parser = TokenFilter()
parser.feed(html)
parser.close()
return parser.token
class ErrorFilter(html.parser.HTMLParser):
"""
Parses html, extracts the error message from <div class='msgbox msg-error'>
"""
def __init__(self):
super().__init__()
self.stack = []
self.level = 0
self.errormsg = ""
def handle_starttag(self, tag, attrs):
if tag in ("meta", "input", "br", "link", "img", "hr"):
return self.handle_startendtag(tag, attrs)
self.stack.append(tag)
if tag == 'div':
d = dict(attrs)
cls = d.get("class", "")
if cls.find('msg-error')>0:
self.level = len(self.stack)
def handle_startendtag(self, tag, attrs):
pass
def handle_endtag(self, tag):
if self.level == len(self.stack):
self.level = 0
if self.stack and self.stack[-1] == tag:
self.stack.pop()
else:
for i, e in reversed(list(enumerate(self.stack))):
if e==tag:
print("missing end tag for:", self.stack[i+1:], "closing", self.stack[i:i+1])
while len(self.stack)>i:
self.stack.pop()
return
print("could not find start tag for:", tag, "in", self.stack)
def handle_data(self, data):
if self.level:
if self.errormsg:
self.errormsg += " "
self.errormsg += data
def ExtractError(html):
parser = ErrorFilter()
parser.feed(html)
parser.close()
return parser.errormsg.strip()
class WebHosting:
"""
Has methods for each smb/file-manager function.
Always call 'start' as the first method.
"""
def __init__(self, loop, baseurl):
self.baseurl = baseurl
self.loop = loop
self.client = None
def __del__(self):
if self.client:
self.client.close()
def post(self, path, **kwargs):
return self.client.post(self.baseurl+path, **kwargs)
def get(self, path, **kwargs):
return self.client.get(self.baseurl+path, **kwargs)
async def start(self, args):
"""
optionally logs in, obtains the csfr token.
"""
conn = aiohttp.TCPConnector(verify_ssl=not args.ignoresslerrors)
self.client = aiohttp.ClientSession(loop=self.loop, connector=conn)
if args.username:
resp = await self.login(args.username, args.password)
# todo: save either PLESKSESSID or PHPSESSID
resp.close()
resp = await self.gettoken()
self.token = ExtractToken(await resp.text())
resp.close()
def gettoken(self):
return self.get("smb/")
def makeform(self, *args, **kwargs):
"""
create a form from the arguments:
* first a list of optional file arguments
* followed by a list of keyword args.
"""
kwargs['forgery_protection_token'] = self.token
for i, arg in enumerate(args):
kwargs['ids[%d]' % i] = arg
return kwargs
def login(self, user, pw):
return self.post("login_up.php3", data={"login_name":user, "passwd":pw, "locale_id":"default"})
def listdir(self, dirname):
# changes the current dir
# returns json: { additional:{ operations:[] }, localte:{ ... }, pager:{ ... }, pathbar:{ ... }, state: { currentDir }, data: [ { filePerms:"...", formattedSize:..., isDirectory, name, size, type, user, actions:[ { href, name, title } ] } ] }
return self.get("smb/file-manager/list-data", params={"currentDir":dirname})
async def download(self, dirname, filename, fh):
resp = await self.get("smb/file-manager/download", params={"currentDir":dirname, "file":filename})
if not resp.headers.get('Content-Disposition') and resp.headers.get('Content-Type').startswith('text/html'):
error = ExtractError(await resp.text())
resp.close()
# note: sometimes the error will be '<h1>internal error</h1>
raise Exception(error)
while True:
chunk = await resp.content.read(0x10000)
if not chunk:
break
fh.write(chunk)
resp.close()
def delfiles(self, filelist):
# note: list-data apparently conveys a notion of 'current-directory' to the server.
form = self.makeform(*filelist)
return self.post("smb/file-manager/delete", data=form)
def calcsize(self, filelist):
# returns json dict: { fileSizes:{ filename:size-string }, statusMessages: { content: "Selectiegrootte: ...", "status":"info" } }
for i, fn in enumerate(filelist):
if fn.find('/')>=0:
raise Exception("calcsize does not operate on subdirectories")
form = self.makeform(*filelist)
return self.post("smb/file-manager/calculate-size", data=form)
def makezip(self, zipname, filelist):
# returns json dict: { message: "%%archive%% is gemaakt", "status":"success" }
form = self.makeform(*filelist, archiveName=zipname)
return self.post("smb/file-manager/create-archive", data=form)
def unzip(self, zipname):
form = self.makeform(zipname)
return self.post("smb/file-manager/extract-archive", data=form, params={'overwrite':'true'})
def mkdir(self, dirname):
form = self.makeform(newDirectoryName=dirname)
return self.post("smb/file-manager/create-directory", data=form)
def createemptyfile(self, filename):
# todo: figure out what the htmlTemplate is for.
form = self.makeform(newFileName=filename, htmlTemplate=False)
return self.post("smb/file-manager/create-file", data=form)
def rename(self, oldname, newname):
form = self.makeform(oldname, newFileName=newname)
return self.post("smb/file-manager/rename", data=form)
def copy(self, filelist, destination):
form = self.makeform(*filelist)
# note: aiohttp inconsistency: in form i can use booleans, in params i can't
return self.post("smb/file-manager/copy-files", data=form, params={"destinationDir":destination, "overwrite":'false'})
def move(self, filelist, destination):
form = self.makeform(*filelist)
return self.post("smb/file-manager/move-files", data=form, params={"destinationDir":destination, "overwrite":'false'})
async def editfile(self, dirname, filename, data):
form = self.makeform(eol='LF', saveCodepage='UTF-8', loadCodepage='UTF-8', code=data)
resp = await self.post("smb/file-manager/edit", data=form, params={"currentDir":dirname, 'file':filename})
error = ExtractError(await resp.text())
resp.close()
if error:
raise Exception(error)
def upload(self, fh, filename):
form = aiohttp.FormData(self.makeform())
form.add_field(filename, fh, filename=filename, content_type='application/octet-stream')
return self.post("smb/file-manager/upload", data=form)
######################################################################################
# -- cmdline --- -- async func -- -- host method -- -- url --
# ls listfiles listdir(dirname) list-data
# cat/get downloadfile download(dirname, filename) download
# rm delfiles delfiles(list) delete
# du calcsize calcsize(list) calculate-size
# zip makezip makezip(name, list) create-archive
# unzip unzip unzip(name) extract-archive
# mkdir createdir mkdir(name) create-directory
# empty emptyfile createemptyfile(name) create-file
# rename(old, new) rename
# cp copyfiles copy(list, dst) copy-files
# mv movefiles move(list, dst) move-files
# put uploadfile upload(fh, filename) upload
# edit editfile editfile(dirname, filename, data) edit
async def listfiles(host, dirname, args):
resp = await host.listdir(dirname)
info = await resp.json()
if args.verbose:
print(info)
resp.close()
if info.get('status') == 'error':
if args.ignoreerror:
print("ERROR", dirname, info.get('message'))
return
raise Exception(info.get('message'))
print("%s:" % dirname)
for finfo in info["data"]:
perms = "d" if finfo.get("isDirectory") else "-"
perms += finfo["filePerms"].replace(" ", "")
tstr = datetime.datetime.fromtimestamp(int(finfo["modificationTimestamp"]))
print("%-10s %-12s %-12s %12s %s %s" % (perms, finfo["user"], finfo["group"], finfo["size"], tstr.strftime("%Y-%m-%d %H:%M:%S"), finfo["name"]))
print()
if args.recurse:
for finfo in info["data"]:
if finfo.get("isDirectory") and finfo["name"] not in ("..", "."):
await listfiles(host, os.path.join(dirname, finfo["name"]), args)
async def downloadfile(host, srcfilename, dst):
dirname, srcfilename = os.path.split(srcfilename)
if type(dst) not in (bytes, str, unicode):
fh = dst
elif not dst or dst == '-':
fh = sys.stdout.buffer
elif os.path.isdir(dst):
dst = os.path.join(dst, srcfilename)
fh = open(dst, "wb")
else:
fh = open(dst, "wb")
sys.stdout.flush()
await host.download(dirname, srcfilename, fh)
async def uploadfile(host, srcfilename, dstname):
dirname, dstfilename = os.path.split(dstname)
if dirname:
resp = await host.listdir(dirname)
info = await resp.json()
resp.close()
if info.get('status') == 'error':
raise Exception(info.get('message'))
newdir = info.get('state', dict()).get('currentDir')
if newdir != dirname:
print("Failed to change to '%s': curdir='%s'" % (dirname, newdir))
raise Exception("cannot change to directory")
if type(srcfilename) not in (bytes, str, unicode):
fh = srcfilename
if srcfilename == '-':
fh = sys.stdin.buffer
else:
fh = open(srcfilename, "rb")
resp = await host.upload(fh, dstfilename)
res = await resp.text()
resp.close()
async def makezip(host, dirname, zipname, files):
if dirname and dirname not in ('', '/'):
resp = await host.listdir(dirname)
info = await resp.json()
resp.close()
if info.get("status") == "error":
raise Exception(info["message"])
# server will add '.zip'
zipname = zipname.replace('.zip', '')
if zipname.find('/')>=0:
raise Exception("use -C to specify where the zipfile goes")
resp = await host.makezip(zipname, files)
info = await resp.json()
resp.close()
if info.get('status')=='fail':
raise Exception(info.get('message'))
async def unzip(host, zipname):
resp = await host.unzip(zipname)
info = await resp.json()
resp.close()
msgs = info.get('statusMessages', [])
if msgs and msgs[0].get('status')=='error':
raise Exception(msgs[0].get('content'))
async def removedir(host, dirname):
# note: this is always recursively, and always succeeds
resp = await host.delfiles([dirname])
print(await resp.text())
resp.close()
async def createdir(host, dirname):
basepath, dirname = os.path.split(dirname.rstrip('/'))
if basepath not in ('', '/'):
resp = await host.listdir(basepath)
info = await resp.json()
resp.close()
if info.get("status") == "error":
raise Exception(info["message"])
resp = await host.mkdir(dirname)
info = await resp.json()
resp.close()
if info.get("status") == "error":
raise Exception(info["message"])
async def delfiles(host, files):
resp = await host.delfiles(files)
print(await resp.text())
resp.close()
async def emptyfile(host, filename):
dirname, filename = os.path.split(filename)
if dirname not in ('', '/'):
resp = await host.listdir(dirname)
info = await resp.json()
resp.close()
if info.get("status") == "error":
raise Exception(info["message"])
resp = await host.createemptyfile(filename)
info = await resp.json()
resp.close()
if info.get("status") == "error":
raise Exception(info["message"])
async def copyfiles(host, dirname, files, destination):
if dirname and dirname not in ('', '/'):
resp = await host.listdir(dirname)
info = await resp.json()
resp.close()
if info.get("status") == "error":
raise Exception(info["message"])
resp = await host.copy(files, destination)
info = await resp.json()
resp.close()
msgs = info.get('statusMessages', [])
if msgs and msgs[0].get('status')=='error':
raise Exception(msgs[0].get('content'))
async def movefiles(host, dirname, files, destination):
if dirname and dirname not in ('', '/'):
resp = await host.listdir(dirname)
info = await resp.json()
resp.close()
if info.get("status") == "error":
raise Exception(info["message"])
resp = await host.move(files, destination)
info = await resp.json()
resp.close()
msgs = info.get('statusMessages', [])
if msgs and msgs[0].get('status')=='error':
raise Exception(msgs[0].get('content'))
async def calcsize(host, dirname, files):
if dirname and dirname not in ('', '/'):
resp = await host.listdir(dirname)
info = await resp.json()
resp.close()
if info.get("status") == "error":
raise Exception(info["message"])
resp = await host.calcsize(files)
info = await resp.json()
resp.close()
msgs = info.get('statusMessages', [])
if msgs and msgs[0].get('status')=='error':
raise Exception(msgs[0].get('content'))
print(msgs[0].get('content'))
async def editfile(host, filename, contents):
dirname, filename = os.path.split(filename)
await host.editfile(dirname, filename, contents)
async def dologin(host, args):
await host.start(args)
#################################
def makeparser():
"""
Create the commandline parser.
"""
import argparse
parser = argparse.ArgumentParser(description='plesk file utility')
parser.add_argument('--config', '-c', type=str, help='configuration to use')
parser.add_argument('--baseurl', help='plesk base url')
parser.add_argument('--ignoresslerrors', '-k', action='store_true', help='Ignore ssl certificate errors')
parser.add_argument('--username', '-u', help='username for login')
parser.add_argument('--password', '-p', help='password for login')
parser.add_argument('--verbose', '-v', action='store_true', help='print results from web requests')
sub = parser.add_subparsers(dest='command')
ls = sub.add_parser('ls', help='list files')
ls.add_argument('--recurse', '-r', action='store_true', help='recursively list directories')
ls.add_argument('--ignoreerror', '-c', action='store_true', help='continue after error')
ls.add_argument('dirname', type=str, help='which directory to list')
cat = sub.add_parser('cat', help='print remote file contents to stdout')
cat.add_argument('filename', help='which file')
tee = sub.add_parser('tee', help='save stdin to a remote file')
tee.add_argument('filename', help='which file')
cat = sub.add_parser('get', help='copy remote file')
cat.add_argument('filename', help='which remote file')
cat.add_argument('destination', help='where to store locally', default='.')
put = sub.add_parser('put', help='upload file')
put.add_argument('filename', help='which local file')
put.add_argument('destination', help='where to store remotely')
edit = sub.add_parser('edit', help='edit file contents')
edit.add_argument('filename', help='which file')
edit.add_argument('contents', help='the new contents')
azip = sub.add_parser('zip', help='archive files')
azip.add_argument('--dirname', '-C', help='the directory containing the requested files')
azip.add_argument('zipname', help='name of the zip archive')
azip.add_argument('files', nargs='*', help='which files to zip')
unzip = sub.add_parser('unzip', help='unarchive files')
unzip.add_argument('zipname', help='name of the zip archive')
mkdir = sub.add_parser('mkdir', help='create directory')
mkdir.add_argument('dirname')
rmdir = sub.add_parser('rmdir', help='delete directory')
rmdir.add_argument('dirname')
delfiles = sub.add_parser('rm', help='delete files')
delfiles.add_argument('files', nargs='*')
emptyfile = sub.add_parser('empty', help='create empty file')
emptyfile.add_argument('filename')
movefiles = sub.add_parser('mv', help='move files, note: the destination must be an absolute path')
movefiles .add_argument('--dirname', '-C', help='the directory containing the requested files')
movefiles.add_argument('files', nargs='+')
copyfiles = sub.add_parser('cp', help='copy files, note: the destination must be an absolute path')
copyfiles.add_argument('--dirname', '-C', help='the directory containing the requested files')
copyfiles.add_argument('files', nargs='+')
calcsize = sub.add_parser('du', help='calc size of filelist')
calcsize.add_argument('--dirname', '-C', help='the directory containing the requested files')
calcsize.add_argument('files', nargs='*')
help = sub.add_parser('help', help='verbose usage')
help.add_argument('subcommand', nargs='?')
# keep the available choices for later use in 'help'`
parser.subparsers = sub.choices
return parser
def loadconfig():
"""
reads the configuration file
"""
homedir = os.environ['HOME']
import configparser
config = configparser.ConfigParser()
config.read(os.path.join(homedir, ".pleskrc"))
return config
def applyconfig(args, config):
"""
Take the section specified in the config commandline option,
or the one named as the default in the config file,
or just take the first section.
and set defaults for several commandline options.
"""
cfgname = args.config
if config.sections() and not cfgname:
cfgname = config.sections()[0]
section = config[cfgname]
if not args.username: args.username = section.get('username')
if not args.baseurl: args.baseurl = section.get('baseurl')
if not args.password: args.password = section.get('password')
if not args.ignoresslerrors: args.ignoresslerrors=section.get('ignoresslerrors')
def main():
loop = asyncio.get_event_loop()
config = loadconfig()
parser = makeparser()
args = parser.parse_args()
applyconfig(args, config)
host = WebHosting(loop, args.baseurl)
tasks = []
if args.command == 'ls':
tasks.append(listfiles(host, args.dirname, args))
elif args.command == 'cat':
tasks.append(downloadfile(host, args.filename, "-"))
elif args.command == 'tee':
tasks.append(uploadfile(host, "-", args.filename))
elif args.command == 'get':
tasks.append(downloadfile(host, args.filename, args.destination))
elif args.command == 'put':
tasks.append(uploadfile(host, args.filename, os.path.join(args.destination, args.filename)))
elif args.command == 'edit':
tasks.append(editfile(host, args.filename, args.contents))
elif args.command == 'zip':
tasks.append(makezip(host, args.dirname, args.zipname, args.files))
elif args.command == 'unzip':
tasks.append(unzip(host, args.zipname))
elif args.command == 'rmdir':
tasks.append(removedir(host, args.dirname))
elif args.command == 'mkdir':
tasks.append(createdir(host, args.dirname))
elif args.command == 'rm':
tasks.append(delfiles(host, args.files))
elif args.command == 'empty':
tasks.append(emptyfile(host, args.filename))
elif args.command == 'cp':
tasks.append(copyfiles(host, args.dirname, args.files[:-1], args.files[-1]))
elif args.command == 'mv':
tasks.append(movefiles(host, args.dirname, args.files[:-1], args.files[-1]))
elif args.command == 'du':
tasks.append(calcsize(host, args.dirname, args.files))
elif args.command == 'help':
if args.subcommand:
p = parser.subparsers.get(args.subcommand)
if p:
p.print_help()
sys,exit(0)
parser.print_help()
print()
for p in parser.subparsers.values():
p.print_usage()
print()
sys.exit(0)
else:
parser.print_usage()
sys.exit(1)
loop.run_until_complete(dologin(host, args))
try:
if tasks:
loop.run_until_complete(asyncio.gather(*tasks))
except Exception as e:
print("ERROR", e)
sys.exit(1)
if __name__ == '__main__':
main()
| 35.049475 | 250 | 0.604329 |
acf6eef0ee5cacd55cfd3a12fe29cb50780718b0 | 4,440 | py | Python | tests/test_chillerconstantcop.py | marcelosalles/pyidf | c2f744211572b5e14e29522aac1421ba88addb0e | [
"Apache-2.0"
] | 19 | 2015-12-08T23:33:51.000Z | 2022-01-31T04:41:10.000Z | tests/test_chillerconstantcop.py | marcelosalles/pyidf | c2f744211572b5e14e29522aac1421ba88addb0e | [
"Apache-2.0"
] | 2 | 2019-10-04T10:57:00.000Z | 2021-10-01T06:46:17.000Z | tests/test_chillerconstantcop.py | marcelosalles/pyidf | c2f744211572b5e14e29522aac1421ba88addb0e | [
"Apache-2.0"
] | 7 | 2015-11-04T02:25:01.000Z | 2021-12-08T03:14:28.000Z | import os
import tempfile
import unittest
import logging
from pyidf import ValidationLevel
import pyidf
from pyidf.idf import IDF
from pyidf.plant_heating_and_cooling_equipment import ChillerConstantCop
log = logging.getLogger(__name__)
class TestChillerConstantCop(unittest.TestCase):
def setUp(self):
self.fd, self.path = tempfile.mkstemp()
def tearDown(self):
os.remove(self.path)
def test_create_chillerconstantcop(self):
pyidf.validation_level = ValidationLevel.error
obj = ChillerConstantCop()
# alpha
var_name = "Name"
obj.name = var_name
# real
var_nominal_capacity = 0.0
obj.nominal_capacity = var_nominal_capacity
# real
var_nominal_cop = 0.0001
obj.nominal_cop = var_nominal_cop
# real
var_design_chilled_water_flow_rate = 0.0
obj.design_chilled_water_flow_rate = var_design_chilled_water_flow_rate
# real
var_design_condenser_water_flow_rate = 0.0
obj.design_condenser_water_flow_rate = var_design_condenser_water_flow_rate
# node
var_chilled_water_inlet_node_name = "node|Chilled Water Inlet Node Name"
obj.chilled_water_inlet_node_name = var_chilled_water_inlet_node_name
# node
var_chilled_water_outlet_node_name = "node|Chilled Water Outlet Node Name"
obj.chilled_water_outlet_node_name = var_chilled_water_outlet_node_name
# node
var_condenser_inlet_node_name = "node|Condenser Inlet Node Name"
obj.condenser_inlet_node_name = var_condenser_inlet_node_name
# node
var_condenser_outlet_node_name = "node|Condenser Outlet Node Name"
obj.condenser_outlet_node_name = var_condenser_outlet_node_name
# alpha
var_condenser_type = "AirCooled"
obj.condenser_type = var_condenser_type
# alpha
var_chiller_flow_mode = "ConstantFlow"
obj.chiller_flow_mode = var_chiller_flow_mode
# real
var_sizing_factor = 0.0001
obj.sizing_factor = var_sizing_factor
# real
var_basin_heater_capacity = 0.0
obj.basin_heater_capacity = var_basin_heater_capacity
# real
var_basin_heater_setpoint_temperature = 2.0
obj.basin_heater_setpoint_temperature = var_basin_heater_setpoint_temperature
# object-list
var_basin_heater_operating_schedule_name = "object-list|Basin Heater Operating Schedule Name"
obj.basin_heater_operating_schedule_name = var_basin_heater_operating_schedule_name
idf = IDF()
idf.add(obj)
idf.save(self.path, check=False)
with open(self.path, mode='r') as f:
for line in f:
log.debug(line.strip())
idf2 = IDF(self.path)
self.assertEqual(idf2.chillerconstantcops[0].name, var_name)
self.assertAlmostEqual(idf2.chillerconstantcops[0].nominal_capacity, var_nominal_capacity)
self.assertAlmostEqual(idf2.chillerconstantcops[0].nominal_cop, var_nominal_cop)
self.assertAlmostEqual(idf2.chillerconstantcops[0].design_chilled_water_flow_rate, var_design_chilled_water_flow_rate)
self.assertAlmostEqual(idf2.chillerconstantcops[0].design_condenser_water_flow_rate, var_design_condenser_water_flow_rate)
self.assertEqual(idf2.chillerconstantcops[0].chilled_water_inlet_node_name, var_chilled_water_inlet_node_name)
self.assertEqual(idf2.chillerconstantcops[0].chilled_water_outlet_node_name, var_chilled_water_outlet_node_name)
self.assertEqual(idf2.chillerconstantcops[0].condenser_inlet_node_name, var_condenser_inlet_node_name)
self.assertEqual(idf2.chillerconstantcops[0].condenser_outlet_node_name, var_condenser_outlet_node_name)
self.assertEqual(idf2.chillerconstantcops[0].condenser_type, var_condenser_type)
self.assertEqual(idf2.chillerconstantcops[0].chiller_flow_mode, var_chiller_flow_mode)
self.assertAlmostEqual(idf2.chillerconstantcops[0].sizing_factor, var_sizing_factor)
self.assertAlmostEqual(idf2.chillerconstantcops[0].basin_heater_capacity, var_basin_heater_capacity)
self.assertAlmostEqual(idf2.chillerconstantcops[0].basin_heater_setpoint_temperature, var_basin_heater_setpoint_temperature)
self.assertEqual(idf2.chillerconstantcops[0].basin_heater_operating_schedule_name, var_basin_heater_operating_schedule_name) | 47.234043 | 132 | 0.750225 |
acf6ef358b5180ed15c3e7339b01b08390760cd0 | 969 | py | Python | quartermaster/logging.py | Mannheimd/Quartermaster | 6961ebccab2e9845e7b98ba470daab45e5061bcf | [
"MIT"
] | 1 | 2018-01-29T23:58:56.000Z | 2018-01-29T23:58:56.000Z | quartermaster/logging.py | Mannheimd/Quartermaster | 6961ebccab2e9845e7b98ba470daab45e5061bcf | [
"MIT"
] | null | null | null | quartermaster/logging.py | Mannheimd/Quartermaster | 6961ebccab2e9845e7b98ba470daab45e5061bcf | [
"MIT"
] | null | null | null | from collections import OrderedDict
import logging
levels = OrderedDict((lvl, getattr(logging, lvl.upper()))
for lvl in ('critical', 'error', 'warning', 'info', 'debug'))
def log_file(name, mode='a', verbosity=levels['debug']):
fh = logging.FileHandler(name, mode=mode)
fh.setLevel(verbosity)
return fh
def logger(verbosity=levels['error'], log_file=None):
"""Create a logger which streams to the console, and optionally a file."""
# create/get logger for this instance
logger = logging.getLogger(__name__)
logger.setLevel(levels['debug'])
fmt = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
# with stream (console) handle
ch = logging.StreamHandler()
ch.setLevel(verbosity)
ch.setFormatter(fmt)
logger.addHandler(ch)
# optionally with file handle
if log_file:
log_file.setFormatter(fmt)
logger.addHandler(log_file)
return logger
| 28.5 | 82 | 0.661507 |
acf6ef80d2482adf389c1028af5475f6c572bac5 | 3,245 | py | Python | onshape_to_robot/stl_combine.py | MisoRobotics/onshape-to-robot | 1abb2397f27c332b20af83a3c8b9d9e143c2ba66 | [
"MIT"
] | null | null | null | onshape_to_robot/stl_combine.py | MisoRobotics/onshape-to-robot | 1abb2397f27c332b20af83a3c8b9d9e143c2ba66 | [
"MIT"
] | null | null | null | onshape_to_robot/stl_combine.py | MisoRobotics/onshape-to-robot | 1abb2397f27c332b20af83a3c8b9d9e143c2ba66 | [
"MIT"
] | null | null | null | import numpy as np
import shutil
import math
import subprocess
import stl
import os
from stl import mesh
from colorama import Fore, Back, Style
def load_mesh(stl_file):
return mesh.Mesh.from_file(stl_file)
def save_mesh(mesh, stl_file):
mesh.save(stl_file, mode=stl.Mode.BINARY)
def combine_meshes(m1, m2):
return mesh.Mesh(np.concatenate([m1.data, m2.data]))
def apply_matrix(mesh, matrix):
rotation = matrix[0:3, 0:3]
translation = matrix[0:3, 3:4].T.tolist()
def transform(points):
return (rotation*np.matrix(points).T).T + translation*len(points)
mesh.v0 = transform(mesh.v0)
mesh.v1 = transform(mesh.v1)
mesh.v2 = transform(mesh.v2)
mesh.normals = transform(mesh.normals)
# Script taken from doing the needed operation
# (Filters > Remeshing, Simplification and Reconstruction >
# Quadric Edge Collapse Decimation, with parameters:
# 0.9 percentage reduction (10%), 0.3 Quality threshold (70%)
# Target number of faces is ignored with those parameters
# conserving face normals, planar simplification and
# post-simplimfication cleaning)
# And going to Filter > Show current filter script
filter_script_mlx = """<!DOCTYPE FilterScript>
<FilterScript>
<filter name="Quadric Edge Collapse Decimation">
<Param type="RichFloat" value="%reduction%" name="TargetPerc"/>
<Param type="RichFloat" value="0.3" name="QualityThr"/>
<Param type="RichBool" value="false" name="PreserveBoundary"/>
<Param type="RichFloat" value="1" name="BoundaryWeight"/>
<Param type="RichBool" value="false" name="PreserveNormal"/>
<Param type="RichBool" value="false" name="PreserveTopology"/>
<Param type="RichBool" value="false" name="OptimalPlacement"/>
<Param type="RichBool" value="true" name="PlanarQuadric"/>
<Param type="RichBool" value="false" name="QualityWeight"/>
<Param type="RichBool" value="true" name="AutoClean"/>
<Param type="RichBool" value="false" name="Selected"/>
</filter>
</FilterScript>
"""
def create_tmp_filter_file(filename='filter_file_tmp.mlx', reduction=0.9):
with open('/tmp/' + filename, 'w', encoding="utf-8") as stream:
stream.write(filter_script_mlx.replace('%reduction%', str(reduction)))
return '/tmp/' + filename
def reduce_faces(in_file, out_file, reduction=0.5):
filter_script_path = create_tmp_filter_file(reduction=reduction)
# Add input mesh
command = "meshlabserver -i " + in_file
# Add the filter script
command += " -s " + filter_script_path
# Add the output filename and output flags
command += " -o " + out_file + " -om vn fn"
command += " > /tmp/meshlab.log 2>&1"
# Execute command
# print("Going to execute: " + command)
output = subprocess.check_output(command, shell=True)
# last_line = output.splitlines()[-1]
# print("Done:")
#print(in_file + " > " + out_file + ": " + last_line)
def simplify_stl(stl_file, max_size=3):
size_M = os.path.getsize(stl_file)/(1024*1024)
if size_M > max_size:
print(Fore.BLUE + '+ '+os.path.basename(stl_file) +
(' is %.2f M, running mesh simplification' % size_M))
shutil.copyfile(stl_file, '/tmp/simplify.stl')
reduce_faces('/tmp/simplify.stl', stl_file, max_size / size_M)
| 34.521277 | 78 | 0.693991 |
acf6f004d8d62d8f398098a9d6bdf4452b483458 | 959 | py | Python | analysis/hyperwords/hyperwords/representations/representation_factory.py | tlranda/FULL-W2V | 66189559387ec1d94e1f489942159874785c4c05 | [
"Apache-2.0"
] | null | null | null | analysis/hyperwords/hyperwords/representations/representation_factory.py | tlranda/FULL-W2V | 66189559387ec1d94e1f489942159874785c4c05 | [
"Apache-2.0"
] | null | null | null | analysis/hyperwords/hyperwords/representations/representation_factory.py | tlranda/FULL-W2V | 66189559387ec1d94e1f489942159874785c4c05 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python2
from embedding import SVDEmbedding, EnsembleEmbedding, Embedding
from explicit import PositiveExplicit
def create_representation(args):
rep_type = args['<representation>']
path = args['<representation_path>']
neg = int(args['--neg'])
w_c = args['--w+c']
eig = float(args['--eig'])
if rep_type == 'PPMI':
if w_c:
raise Exception('w+c is not implemented for PPMI.')
else:
return PositiveExplicit(path, True, neg)
elif rep_type == 'SVD':
if w_c:
return EnsembleEmbedding(SVDEmbedding(path, False, eig, False), SVDEmbedding(path, False, eig, True), True)
else:
return SVDEmbedding(path, True, eig)
else:
if w_c:
return EnsembleEmbedding(Embedding(path + '.words', False), Embedding(path + '.contexts', False), True)
else:
return Embedding(path + '.words', True)
| 30.935484 | 119 | 0.59854 |
acf6f06003d289616b4335e39ca2b89330a3a48d | 1,692 | py | Python | bin/reference2single_sequence.py | Kinyugo/bactmap | f56b55fd154140913fb0f8330ae8a8730e006708 | [
"MIT"
] | 29 | 2019-08-09T19:31:48.000Z | 2022-03-17T08:05:59.000Z | bin/reference2single_sequence.py | Kinyugo/bactmap | f56b55fd154140913fb0f8330ae8a8730e006708 | [
"MIT"
] | 42 | 2020-02-11T15:11:06.000Z | 2022-03-24T11:33:25.000Z | bin/reference2single_sequence.py | Kinyugo/bactmap | f56b55fd154140913fb0f8330ae8a8730e006708 | [
"MIT"
] | 20 | 2020-02-11T13:00:58.000Z | 2022-03-23T08:05:14.000Z | #!/usr/bin/env python
from Bio import SeqIO
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
import textwrap
import argparse, sys, os
class ParserWithErrors(argparse.ArgumentParser):
def error(self, message):
print('{0}\n\n'.format(message))
self.print_help()
sys.exit(2)
def is_valid_file(self, parser, arg):
if not os.path.isfile(arg):
parser.error("The file %s does not exist!" % arg)
else:
return arg
def argparser():
description = """
A script to parse a filtered VCF and
"""
parser = ParserWithErrors(description = description)
parser.add_argument("-r", "--reference_file", required=True,
help="reference fasta file path",
type=lambda x: parser.is_valid_file(parser, x))
parser.add_argument("-o", "--output_fasta_file", required=True,
help="file path to output fasta file")
return parser
def combine_sequences(reference_sequence):
records = list(SeqIO.parse(reference_sequence, "fasta"))
new_sequence = ''.join([str(record.seq) for record in records])
new_id = '|'.join([record.id for record in records])
if len(new_id) > 100:
new_id = new_id[:97] + '...'
new_record = SeqRecord(Seq(new_sequence), id = new_id, description = '')
return(new_record)
def write_sequence(filepath, record):
with open(filepath, 'w') as output:
SeqIO.write(record, output, "fasta")
if __name__ == '__main__':
parser = argparser()
args = parser.parse_args()
new_record = combine_sequences(args.reference_file)
write_sequence(args.output_fasta_file, new_record)
| 31.333333 | 76 | 0.648345 |
acf6f19f180efe616c97b1b55541d718ce885bbf | 8,097 | py | Python | vertica_python/vertica/column.py | alonme/vertica-python | 208685ce6285bde1edab6d18500ef0887d36bf91 | [
"Apache-2.0"
] | null | null | null | vertica_python/vertica/column.py | alonme/vertica-python | 208685ce6285bde1edab6d18500ef0887d36bf91 | [
"Apache-2.0"
] | null | null | null | vertica_python/vertica/column.py | alonme/vertica-python | 208685ce6285bde1edab6d18500ef0887d36bf91 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2018-2022 Micro Focus or one of its affiliates.
# Copyright (c) 2018 Uber Technologies, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright (c) 2013-2017 Uber Technologies, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from __future__ import print_function, division, absolute_import
import re
from collections import namedtuple
from datetime import date, datetime
from decimal import Decimal
from six import PY2
from uuid import UUID
# noinspection PyCompatibility,PyUnresolvedReferences
from dateutil import parser, tz
from .. import errors
from ..datatypes import VerticaType, getDisplaySize, getPrecision, getScale
from ..compat import as_str, as_text
YEARS_RE = re.compile(r"^([0-9]+)-")
# these methods are bad...
#
# a few timestamp with tz examples:
# 2013-01-01 00:00:00
# 2013-01-01 00:00:00+00
# 2013-01-01 00:00:00.01+00
# 2013-01-01 00:00:00.00001+00
#
# Vertica stores all data in UTC:
# "TIMESTAMP WITH TIMEZONE (TIMESTAMPTZ) data is stored in GMT (UTC) by
# converting data from the current local time zone to GMT."
# Vertica fetches data in local timezone:
# "When TIMESTAMPTZ data is used, data is converted back to use the current
# local time zone"
# If vertica boxes are on UTC, you should never have a non +00 offset (as
# far as I can tell) ie. inserting '2013-01-01 00:00:00.01 EST' to a
# timestamptz type stores: 2013-01-01 05:00:00.01+00
# select t AT TIMEZONE 'America/New_York' returns: 2012-12-31 19:00:00.01
def timestamp_parse(s):
s = as_str(s)
try:
dt = _timestamp_parse(s)
except ValueError:
# Value error, year might be over 9999
year_match = YEARS_RE.match(s)
if year_match:
year = year_match.groups()[0]
dt = _timestamp_parse_without_year(s[len(year) + 1:])
dt = dt.replace(year=min(int(year), 9999))
else:
raise errors.DataError('Timestamp value not supported: %s' % s)
return dt
def _timestamp_parse(s):
if len(s) == 19:
return datetime.strptime(s, '%Y-%m-%d %H:%M:%S')
return datetime.strptime(s, '%Y-%m-%d %H:%M:%S.%f')
def _timestamp_parse_without_year(s):
if len(s) == 14:
return datetime.strptime(s, '%m-%d %H:%M:%S')
return datetime.strptime(s, '%m-%d %H:%M:%S.%f')
def timestamp_tz_parse(s):
s = as_str(s)
# if timezone is simply UTC...
if s.endswith('+00'):
# remove time zone
ts = timestamp_parse(s[:-3].encode(encoding='utf-8', errors='strict'))
ts = ts.replace(tzinfo=tz.tzutc())
return ts
# other wise do a real parse (slower)
return parser.parse(s)
def date_parse(s):
"""
Parses value of a DATE type.
:param s: string to parse into date
:return: an instance of datetime.date
:raises NotSupportedError when a date Before Christ is encountered
"""
s = as_str(s)
if s.endswith(' BC'):
raise errors.NotSupportedError('Dates Before Christ are not supported. Got: {0}'.format(s))
# Value error, year might be over 9999
return date(*map(lambda x: min(int(x), 9999), s.split('-')))
def time_parse(s):
s = as_str(s)
if len(s) == 8:
return datetime.strptime(s, '%H:%M:%S').time()
return datetime.strptime(s, '%H:%M:%S.%f').time()
def binary_data_parse(s):
"""
Parses text value of a BINARY/VARBINARY/LONG VARBINARY type.
:param s: bytearray
:return: bytes
"""
buf = []
i = 0
while i < len(s):
c = s[i: i+1]
if c == b'\\':
c2 = s[i+1: i+2]
if c2 == b'\\': # escaped \
if PY2: c = str(c)
i += 2
else: # A \xxx octal string
c = chr(int(str(s[i+1: i+4]), 8)) if PY2 else bytes([int(s[i+1: i+4], 8)])
i += 4
else:
if PY2: c = str(c)
i += 1
buf.append(c)
return b''.join(buf)
# Type casting of SQL types bytes representation into Python objects
def vertica_type_cast(type_code, unicode_error):
typecaster = {
VerticaType.UNKNOWN: bytes,
VerticaType.BOOL: lambda s: s == b't',
VerticaType.INT8: lambda s: int(s),
VerticaType.FLOAT8: lambda s: float(s),
VerticaType.CHAR: lambda s: s.decode('utf-8', unicode_error),
VerticaType.VARCHAR: lambda s: s.decode('utf-8', unicode_error),
VerticaType.DATE: date_parse,
VerticaType.TIME: time_parse,
VerticaType.TIMESTAMP: timestamp_parse,
VerticaType.TIMESTAMPTZ: timestamp_tz_parse,
VerticaType.INTERVAL: bytes,
VerticaType.TIMETZ: bytes,
VerticaType.NUMERIC: lambda s: Decimal(s.decode('utf-8', unicode_error)),
VerticaType.VARBINARY: binary_data_parse,
VerticaType.UUID: lambda s: UUID(s.decode('utf-8', unicode_error)),
VerticaType.INTERVALYM: bytes,
VerticaType.LONGVARCHAR: lambda s: s.decode('utf-8', unicode_error),
VerticaType.LONGVARBINARY: binary_data_parse,
VerticaType.BINARY: binary_data_parse
}
return typecaster.get(type_code, bytes)
ColumnTuple = namedtuple('Column', ['name', 'type_code', 'display_size', 'internal_size',
'precision', 'scale', 'null_ok'])
class Column(object):
def __init__(self, col, unicode_error='strict'):
# Describe one query result column
self.name = col['name']
self.type_code = col['data_type_oid']
self.type_name = col['data_type_name']
self.display_size = getDisplaySize(col['data_type_oid'], col['type_modifier'])
self.internal_size = col['data_type_size']
self.precision = getPrecision(col['data_type_oid'], col['type_modifier'])
self.scale = getScale(col['data_type_oid'], col['type_modifier'])
self.null_ok = col['null_ok']
self.is_identity = col['is_identity']
self.converter = vertica_type_cast(self.type_code, unicode_error)
self.props = ColumnTuple(self.name, self.type_code, self.display_size, self.internal_size,
self.precision, self.scale, self.null_ok)
def convert(self, s):
if s is None:
return
return self.converter(s)
def __str__(self):
return as_str(str(self.props))
def __unicode__(self):
return as_text(str(self.props))
def __repr__(self):
return as_str(str(self.props))
def __iter__(self):
for prop in self.props:
yield prop
def __getitem__(self, key):
return self.props[key]
| 35.513158 | 99 | 0.65728 |
acf6f263c5c2fced3e4abe7e9e353caf77128185 | 2,580 | py | Python | Python Solutions/StoreCredit/StoreCredit.py | MarcelloLins/GoogleCodeJamChallenges | 527537b176de9dd27755a2eb0175e212df705f37 | [
"Apache-2.0"
] | null | null | null | Python Solutions/StoreCredit/StoreCredit.py | MarcelloLins/GoogleCodeJamChallenges | 527537b176de9dd27755a2eb0175e212df705f37 | [
"Apache-2.0"
] | null | null | null | Python Solutions/StoreCredit/StoreCredit.py | MarcelloLins/GoogleCodeJamChallenges | 527537b176de9dd27755a2eb0175e212df705f37 | [
"Apache-2.0"
] | null | null | null | # File Handling
with open('Inputs/A-large-practice.in', 'r') as f_reader:
with open('Outputs/A-large-practice.out' ,'w') as f_writer:
# Number of Test Cases
test_cases = int(f_reader.readline())
test_case = 1
while test_case <= test_cases:
# Reading next 3 Lines
store_credit = int(f_reader.readline())
products_count = int(f_reader.readline())
product_prices = f_reader.readline()
# Converting array of strings to array of ints
product_prices = map(lambda x: int(x), product_prices.split(' '))
# Array Indexes used to navigate through
anchor_idx, current_idx, found_sum, price_too_high = 0, 0, False, False
print 'Test Case #%d' % test_case
# Iterating over list elements
while anchor_idx < products_count and not found_sum:
# Avoids having to iterate through the array
current_idx = anchor_idx + 1
while current_idx < products_count:
# Summing
anchor_price = product_prices[anchor_idx]
current_price = product_prices[current_idx]
products_sum = anchor_price + current_price
# Code Optimization (If the Anchor Value is already greater than the sum value itself, there's no reason to keep trying to sum it with the next values)
if anchor_price > store_credit:
break
# Have we found the sum ?
if products_sum == store_credit:
print '%d + %d = %d' % (anchor_price, current_price, products_sum)
# Writing result to output file
output_line = 'Case #%d: %d %d\n'
if anchor_idx < current_idx:
f_writer.write((output_line % (test_case, anchor_idx + 1, current_idx + 1)));
else:
f_writer.write((output_line % (test_case, current_idx + 1, anchor_idx + 1)));
found_sum = True
break
current_idx = current_idx + 1
anchor_idx = anchor_idx + 1
test_case = test_case + 1
print 'End of Challenge' | 45.263158 | 171 | 0.495736 |
acf6f28e1a35ee9491713f76bf149a478a4c1c50 | 607 | py | Python | tests/test_lexicon.py | bsburnham/striplog | 0c68f63d645c5bb7a5cc73b9bdaa197c4fb3cc33 | [
"Apache-2.0"
] | 156 | 2015-03-02T03:56:42.000Z | 2022-03-06T05:51:01.000Z | tests/test_lexicon.py | bsburnham/striplog | 0c68f63d645c5bb7a5cc73b9bdaa197c4fb3cc33 | [
"Apache-2.0"
] | 143 | 2015-05-11T12:49:25.000Z | 2022-02-15T19:39:54.000Z | tests/test_lexicon.py | bsburnham/striplog | 0c68f63d645c5bb7a5cc73b9bdaa197c4fb3cc33 | [
"Apache-2.0"
] | 78 | 2015-03-10T05:56:37.000Z | 2022-02-23T08:14:12.000Z | """
Define a suite a tests for the Lexicon module.
"""
from striplog import Lexicon
def test_lexicon():
"""All the tests...
"""
lexicon = Lexicon.default()
s = lexicon.__str__()
assert s is not ''
assert lexicon.__repr__() is not ''
assert lexicon.find_synonym('Halite') == 'salt'
assert len(lexicon.categories) == 5
s = "lt gn ss w/ sp gy sh"
answer = 'lighter green sandstone with spotty gray shale'
assert lexicon.expand_abbreviations(s) == answer
fname = "tutorial/lexicon.json"
l = Lexicon.from_json_file(fname)
assert l.__repr__() is not ''
| 25.291667 | 61 | 0.650741 |
acf6f379ac724c3ad3c38a6e3c90055c85c095cf | 8,744 | py | Python | packages/gtmapi/lmsrvlabbook/tests/test_container_mutations.py | gigabackup/gigantum-client | 70fe6b39b87b1c56351f2b4c551b6f1693813e4f | [
"MIT"
] | 60 | 2018-09-26T15:46:00.000Z | 2021-10-10T02:37:14.000Z | packages/gtmapi/lmsrvlabbook/tests/test_container_mutations.py | gigabackup/gigantum-client | 70fe6b39b87b1c56351f2b4c551b6f1693813e4f | [
"MIT"
] | 1,706 | 2018-09-26T16:11:22.000Z | 2021-08-20T13:37:59.000Z | packages/gtmapi/lmsrvlabbook/tests/test_container_mutations.py | griffinmilsap/gigantum-client | 70fe6b39b87b1c56351f2b4c551b6f1693813e4f | [
"MIT"
] | 11 | 2019-03-14T13:23:51.000Z | 2022-01-25T01:29:16.000Z | import pytest
import getpass
import time
import pprint
import subprocess
import requests
from lmsrvlabbook.tests.fixtures import fixture_working_dir, fixture_working_dir_env_repo_scoped, \
build_image_for_jupyterlab, build_image_for_rserver
from gtmcore.container import container_for_context
@pytest.fixture
def start_proxy():
if getpass.getuser() == 'circleci':
cmds = ['configurable-http-proxy', '--port=10000', '--api-port=1999',
'--no-prepend-path', '--no-include-prefix']
proxyserver = subprocess.Popen(
cmds, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
time.sleep(3)
try:
yield
finally:
if getpass.getuser() == 'circleci':
time.sleep(1)
proxyserver.kill()
class TestContainerMutations(object):
def test_start_stop_container(self, build_image_for_jupyterlab, start_proxy):
"""Test start stop mutations"""
docker_client = build_image_for_jupyterlab[2]
gql_client = build_image_for_jupyterlab[4]
is_running_query = """
{
labbook(name: "containerunittestbook", owner: "unittester") {
environment {
imageStatus
containerStatus
}
}
}
"""
r = gql_client.execute(is_running_query)
assert 'errors' not in r
assert r['data']['labbook']['environment']['imageStatus'] == 'EXISTS'
assert r['data']['labbook']['environment']['containerStatus'] == 'NOT_RUNNING'
try:
# We wrap the below in a try to ensure the container is cleaned up
# Start the container
start_query = """
mutation myStart {
startContainer(input: {labbookName: "containerunittestbook", owner: "unittester"}) {
environment {
imageStatus
containerStatus
}
}
}
"""
r = gql_client.execute(start_query)
pprint.pprint(r)
assert r['data']['startContainer']['environment']['imageStatus'] == 'EXISTS'
assert r['data']['startContainer']['environment']['containerStatus'] == 'RUNNING'
# TEST GIG-909: Prevent rebuilding images when container for LB already running
build_q = """
mutation myBuild {
buildImage(input: {
labbookName: "containerunittestbook",
owner: "unittester"
}) {
environment {
imageStatus
containerStatus
}
}
}
"""
r = gql_client.execute(build_q)
assert 'errors' in r # Yes, we really want to check that the errors key exists
assert 'Cannot build image for running container' in r['errors'][0]['message']
assert not r['data']['buildImage'] # Yes, this should be empty due to failure.
# Stop the container
stop_query = """
mutation myStop {
stopContainer(input: {labbookName: "containerunittestbook", owner: "unittester"}) {
environment {
imageStatus
containerStatus
}
}
}
"""
r = gql_client.execute(stop_query)
assert 'errors' not in r
assert r['data']['stopContainer']['environment']['imageStatus'] == 'EXISTS'
assert r['data']['stopContainer']['environment']['containerStatus'] == 'NOT_RUNNING'
except:
try:
# Mutation failed. Container *might* have stopped, but try to stop it just in case
docker_client.containers.get('gmlb-default-unittester-containerunittestbook').stop(timeout=4)
except:
# Make a best effort
pass
raise
finally:
try:
# Remove the container.
docker_client.containers.get('gmlb-default-unittester-containerunittestbook').remove()
except:
# Make a best effort
pass
@pytest.mark.skipif(getpass.getuser() == 'circleci', reason="Cannot run this networking test in CircleCI environment")
def test_start_jupyterlab(self, build_image_for_jupyterlab):
"""Test listing labbooks"""
# Start the container
lb = build_image_for_jupyterlab[0]
gql_client = build_image_for_jupyterlab[4]
owner = build_image_for_jupyterlab[-1]
proj_container = container_for_context(username='default', labbook=lb)
docker_client = proj_container._client
container_id = proj_container.image_tag
try:
proj_container.start_project_container()
q = f"""
mutation x {{
startDevTool(input: {{
owner: "{owner}",
labbookName: "{lb.name}",
devTool: "jupyterlab"
}}) {{
path
}}
}}
"""
r = gql_client.execute(q)
assert 'errors' not in r
assert ':10000/jupyter/' in r['data']['startDevTool']['path']
rc, t = docker_client.containers.get(container_id).exec_run(
'sh -c "ps aux | grep jupyter-lab | grep -v \' grep \'"', user='giguser')
l = [a for a in t.decode().split('\n') if a]
assert len(l) == 1
finally:
# Remove the container you fired up
docker_client.containers.get(container_id).stop(timeout=10)
docker_client.containers.get(container_id).remove()
@pytest.mark.skipif(getpass.getuser() == 'circleci', reason="Cannot run this networking test in CircleCI")
def test_start_bundled_app(self, build_image_for_jupyterlab):
"""Test bundled app
Note: In the fixture `build_image_for_jupyterlab`, the environment has been augmented to add the bundled app
"share" with the command "cd /mnt; python3 -m http.server 9999" on port 9999. This test then verifies that
the command is executed and route opened through the CHP.
The reason the dev tool is called "share" is because this test simply uses http.server to run a simple web
server in the /mnt directory. There will be a folder there called "share". http.server doesn't support
route prefixes unless you write a bit of code, so simply calling the app "share" is a simple work around
to create a funtioning test. So if you go to the route provided you'll get a 200 back if everything started
and is wired up OK.
"""
lb = build_image_for_jupyterlab[0]
gql_client = build_image_for_jupyterlab[4]
owner = build_image_for_jupyterlab[-1]
# Start the container
project_container = container_for_context('default', labbook=lb)
project_container.start_project_container()
container_id = project_container._container.id
docker_client = project_container._client
try:
q = f"""
mutation x {{
startDevTool(input: {{
owner: "{owner}",
labbookName: "{lb.name}",
devTool: "share"
}}) {{
path
}}
}}
"""
r = gql_client.execute(q)
assert 'errors' not in r
time.sleep(5)
url = f"http://localhost{r['data']['startDevTool']['path']}"
response = requests.get(url)
assert response.status_code == 200
# Running again should work and just load route
r = gql_client.execute(q)
assert 'errors' not in r
url = f"http://localhost{r['data']['startDevTool']['path']}"
response = requests.get(url)
assert response.status_code == 200
finally:
# Remove the container you fired up
docker_client.containers.get(container_id=container_id).stop(timeout=10)
docker_client.containers.get(container_id=container_id).remove()
@pytest.mark.skipif(getpass.getuser() == 'circleci', reason="Cannot run this networking test in CircleCI environment")
def test_start_rserver(self, build_image_for_rserver):
pytest.xfail("RStudio Server tests not implemented")
| 39.387387 | 122 | 0.560041 |
acf6f4b0bd48c96e1155c74b3522dfb083023b36 | 152,085 | py | Python | pandas/tests/test_graphics.py | jn1818/pandas-zh-cn-translate | 22f0c7e8a6e5ff9e9ae5b424985b2455186a478a | [
"PSF-2.0",
"Apache-2.0",
"BSD-2-Clause",
"BSD-3-Clause"
] | 2 | 2016-02-10T12:57:21.000Z | 2019-09-14T19:02:14.000Z | pandas/tests/test_graphics.py | jn1818/pandas-zh-cn-translate | 22f0c7e8a6e5ff9e9ae5b424985b2455186a478a | [
"PSF-2.0",
"Apache-2.0",
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null | pandas/tests/test_graphics.py | jn1818/pandas-zh-cn-translate | 22f0c7e8a6e5ff9e9ae5b424985b2455186a478a | [
"PSF-2.0",
"Apache-2.0",
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# coding: utf-8
import nose
import itertools
import os
import string
import warnings
from distutils.version import LooseVersion
from datetime import datetime, date
import pandas as pd
from pandas import (Series, DataFrame, MultiIndex, PeriodIndex, date_range,
bdate_range)
from pandas.compat import (range, lrange, StringIO, lmap, lzip, u, zip,
iteritems, OrderedDict, PY3)
from pandas.util.decorators import cache_readonly
import pandas.core.common as com
import pandas.util.testing as tm
from pandas.util.testing import ensure_clean
from pandas.core.config import set_option
import numpy as np
from numpy import random
from numpy.random import rand, randn
from numpy.testing import assert_allclose
from numpy.testing.decorators import slow
import pandas.tools.plotting as plotting
"""
These tests are for ``Dataframe.plot`` and ``Series.plot``.
Other plot methods such as ``.hist``, ``.boxplot`` and other miscellaneous
are tested in test_graphics_others.py
"""
def _skip_if_no_scipy_gaussian_kde():
try:
import scipy
from scipy.stats import gaussian_kde
except ImportError:
raise nose.SkipTest("scipy version doesn't support gaussian_kde")
def _ok_for_gaussian_kde(kind):
if kind in ['kde','density']:
try:
import scipy
from scipy.stats import gaussian_kde
except ImportError:
return False
return True
@tm.mplskip
class TestPlotBase(tm.TestCase):
def setUp(self):
import matplotlib as mpl
mpl.rcdefaults()
n = 100
with tm.RNGContext(42):
gender = tm.choice(['Male', 'Female'], size=n)
classroom = tm.choice(['A', 'B', 'C'], size=n)
self.hist_df = DataFrame({'gender': gender,
'classroom': classroom,
'height': random.normal(66, 4, size=n),
'weight': random.normal(161, 32, size=n),
'category': random.randint(4, size=n)})
self.mpl_le_1_2_1 = plotting._mpl_le_1_2_1()
self.mpl_ge_1_3_1 = plotting._mpl_ge_1_3_1()
self.mpl_ge_1_4_0 = plotting._mpl_ge_1_4_0()
self.mpl_ge_1_5_0 = plotting._mpl_ge_1_5_0()
if self.mpl_ge_1_4_0:
self.bp_n_objects = 7
else:
self.bp_n_objects = 8
if self.mpl_ge_1_5_0:
# 1.5 added PolyCollections to legend handler
# so we have twice as many items.
self.polycollection_factor = 2
else:
self.polycollection_factor = 1
def tearDown(self):
tm.close()
@cache_readonly
def plt(self):
import matplotlib.pyplot as plt
return plt
@cache_readonly
def colorconverter(self):
import matplotlib.colors as colors
return colors.colorConverter
def _check_legend_labels(self, axes, labels=None, visible=True):
"""
Check each axes has expected legend labels
Parameters
----------
axes : matplotlib Axes object, or its list-like
labels : list-like
expected legend labels
visible : bool
expected legend visibility. labels are checked only when visible is True
"""
if visible and (labels is None):
raise ValueError('labels must be specified when visible is True')
axes = self._flatten_visible(axes)
for ax in axes:
if visible:
self.assertTrue(ax.get_legend() is not None)
self._check_text_labels(ax.get_legend().get_texts(), labels)
else:
self.assertTrue(ax.get_legend() is None)
def _check_data(self, xp, rs):
"""
Check each axes has identical lines
Parameters
----------
xp : matplotlib Axes object
rs : matplotlib Axes object
"""
xp_lines = xp.get_lines()
rs_lines = rs.get_lines()
def check_line(xpl, rsl):
xpdata = xpl.get_xydata()
rsdata = rsl.get_xydata()
assert_allclose(xpdata, rsdata)
self.assertEqual(len(xp_lines), len(rs_lines))
[check_line(xpl, rsl) for xpl, rsl in zip(xp_lines, rs_lines)]
tm.close()
def _check_visible(self, collections, visible=True):
"""
Check each artist is visible or not
Parameters
----------
collections : matplotlib Artist or its list-like
target Artist or its list or collection
visible : bool
expected visibility
"""
from matplotlib.collections import Collection
if not isinstance(collections, Collection) and not com.is_list_like(collections):
collections = [collections]
for patch in collections:
self.assertEqual(patch.get_visible(), visible)
def _get_colors_mapped(self, series, colors):
unique = series.unique()
# unique and colors length can be differed
# depending on slice value
mapped = dict(zip(unique, colors))
return [mapped[v] for v in series.values]
def _check_colors(self, collections, linecolors=None, facecolors=None,
mapping=None):
"""
Check each artist has expected line colors and face colors
Parameters
----------
collections : list-like
list or collection of target artist
linecolors : list-like which has the same length as collections
list of expected line colors
facecolors : list-like which has the same length as collections
list of expected face colors
mapping : Series
Series used for color grouping key
used for andrew_curves, parallel_coordinates, radviz test
"""
from matplotlib.lines import Line2D
from matplotlib.collections import Collection, PolyCollection
conv = self.colorconverter
if linecolors is not None:
if mapping is not None:
linecolors = self._get_colors_mapped(mapping, linecolors)
linecolors = linecolors[:len(collections)]
self.assertEqual(len(collections), len(linecolors))
for patch, color in zip(collections, linecolors):
if isinstance(patch, Line2D):
result = patch.get_color()
# Line2D may contains string color expression
result = conv.to_rgba(result)
elif isinstance(patch, PolyCollection):
result = tuple(patch.get_edgecolor()[0])
else:
result = patch.get_edgecolor()
expected = conv.to_rgba(color)
self.assertEqual(result, expected)
if facecolors is not None:
if mapping is not None:
facecolors = self._get_colors_mapped(mapping, facecolors)
facecolors = facecolors[:len(collections)]
self.assertEqual(len(collections), len(facecolors))
for patch, color in zip(collections, facecolors):
if isinstance(patch, Collection):
# returned as list of np.array
result = patch.get_facecolor()[0]
else:
result = patch.get_facecolor()
if isinstance(result, np.ndarray):
result = tuple(result)
expected = conv.to_rgba(color)
self.assertEqual(result, expected)
def _check_text_labels(self, texts, expected):
"""
Check each text has expected labels
Parameters
----------
texts : matplotlib Text object, or its list-like
target text, or its list
expected : str or list-like which has the same length as texts
expected text label, or its list
"""
if not com.is_list_like(texts):
self.assertEqual(texts.get_text(), expected)
else:
labels = [t.get_text() for t in texts]
self.assertEqual(len(labels), len(expected))
for l, e in zip(labels, expected):
self.assertEqual(l, e)
def _check_ticks_props(self, axes, xlabelsize=None, xrot=None,
ylabelsize=None, yrot=None):
"""
Check each axes has expected tick properties
Parameters
----------
axes : matplotlib Axes object, or its list-like
xlabelsize : number
expected xticks font size
xrot : number
expected xticks rotation
ylabelsize : number
expected yticks font size
yrot : number
expected yticks rotation
"""
from matplotlib.ticker import NullFormatter
axes = self._flatten_visible(axes)
for ax in axes:
if xlabelsize or xrot:
if isinstance(ax.xaxis.get_minor_formatter(), NullFormatter):
# If minor ticks has NullFormatter, rot / fontsize are not retained
labels = ax.get_xticklabels()
else:
labels = ax.get_xticklabels() + ax.get_xticklabels(minor=True)
for label in labels:
if xlabelsize is not None:
self.assertAlmostEqual(label.get_fontsize(), xlabelsize)
if xrot is not None:
self.assertAlmostEqual(label.get_rotation(), xrot)
if ylabelsize or yrot:
if isinstance(ax.yaxis.get_minor_formatter(), NullFormatter):
labels = ax.get_yticklabels()
else:
labels = ax.get_yticklabels() + ax.get_yticklabels(minor=True)
for label in labels:
if ylabelsize is not None:
self.assertAlmostEqual(label.get_fontsize(), ylabelsize)
if yrot is not None:
self.assertAlmostEqual(label.get_rotation(), yrot)
def _check_ax_scales(self, axes, xaxis='linear', yaxis='linear'):
"""
Check each axes has expected scales
Parameters
----------
axes : matplotlib Axes object, or its list-like
xaxis : {'linear', 'log'}
expected xaxis scale
yaxis : {'linear', 'log'}
expected yaxis scale
"""
axes = self._flatten_visible(axes)
for ax in axes:
self.assertEqual(ax.xaxis.get_scale(), xaxis)
self.assertEqual(ax.yaxis.get_scale(), yaxis)
def _check_axes_shape(self, axes, axes_num=None, layout=None, figsize=(8.0, 6.0)):
"""
Check expected number of axes is drawn in expected layout
Parameters
----------
axes : matplotlib Axes object, or its list-like
axes_num : number
expected number of axes. Unnecessary axes should be set to invisible.
layout : tuple
expected layout, (expected number of rows , columns)
figsize : tuple
expected figsize. default is matplotlib default
"""
visible_axes = self._flatten_visible(axes)
if axes_num is not None:
self.assertEqual(len(visible_axes), axes_num)
for ax in visible_axes:
# check something drawn on visible axes
self.assertTrue(len(ax.get_children()) > 0)
if layout is not None:
result = self._get_axes_layout(plotting._flatten(axes))
self.assertEqual(result, layout)
self.assert_numpy_array_equal(np.round(visible_axes[0].figure.get_size_inches()),
np.array(figsize))
def _get_axes_layout(self, axes):
x_set = set()
y_set = set()
for ax in axes:
# check axes coordinates to estimate layout
points = ax.get_position().get_points()
x_set.add(points[0][0])
y_set.add(points[0][1])
return (len(y_set), len(x_set))
def _flatten_visible(self, axes):
"""
Flatten axes, and filter only visible
Parameters
----------
axes : matplotlib Axes object, or its list-like
"""
axes = plotting._flatten(axes)
axes = [ax for ax in axes if ax.get_visible()]
return axes
def _check_has_errorbars(self, axes, xerr=0, yerr=0):
"""
Check axes has expected number of errorbars
Parameters
----------
axes : matplotlib Axes object, or its list-like
xerr : number
expected number of x errorbar
yerr : number
expected number of y errorbar
"""
axes = self._flatten_visible(axes)
for ax in axes:
containers = ax.containers
xerr_count = 0
yerr_count = 0
for c in containers:
has_xerr = getattr(c, 'has_xerr', False)
has_yerr = getattr(c, 'has_yerr', False)
if has_xerr:
xerr_count += 1
if has_yerr:
yerr_count += 1
self.assertEqual(xerr, xerr_count)
self.assertEqual(yerr, yerr_count)
def _check_box_return_type(self, returned, return_type, expected_keys=None,
check_ax_title=True):
"""
Check box returned type is correct
Parameters
----------
returned : object to be tested, returned from boxplot
return_type : str
return_type passed to boxplot
expected_keys : list-like, optional
group labels in subplot case. If not passed,
the function checks assuming boxplot uses single ax
check_ax_title : bool
Whether to check the ax.title is the same as expected_key
Intended to be checked by calling from ``boxplot``.
Normal ``plot`` doesn't attach ``ax.title``, it must be disabled.
"""
from matplotlib.axes import Axes
types = {'dict': dict, 'axes': Axes, 'both': tuple}
if expected_keys is None:
# should be fixed when the returning default is changed
if return_type is None:
return_type = 'dict'
self.assertTrue(isinstance(returned, types[return_type]))
if return_type == 'both':
self.assertIsInstance(returned.ax, Axes)
self.assertIsInstance(returned.lines, dict)
else:
# should be fixed when the returning default is changed
if return_type is None:
for r in self._flatten_visible(returned):
self.assertIsInstance(r, Axes)
return
self.assertTrue(isinstance(returned, OrderedDict))
self.assertEqual(sorted(returned.keys()), sorted(expected_keys))
for key, value in iteritems(returned):
self.assertTrue(isinstance(value, types[return_type]))
# check returned dict has correct mapping
if return_type == 'axes':
if check_ax_title:
self.assertEqual(value.get_title(), key)
elif return_type == 'both':
if check_ax_title:
self.assertEqual(value.ax.get_title(), key)
self.assertIsInstance(value.ax, Axes)
self.assertIsInstance(value.lines, dict)
elif return_type == 'dict':
line = value['medians'][0]
if check_ax_title:
self.assertEqual(line.get_axes().get_title(), key)
else:
raise AssertionError
def _check_grid_settings(self, obj, kinds, kws={}):
# Make sure plot defaults to rcParams['axes.grid'] setting, GH 9792
import matplotlib as mpl
def is_grid_on():
xoff = all(not g.gridOn for g in self.plt.gca().xaxis.get_major_ticks())
yoff = all(not g.gridOn for g in self.plt.gca().yaxis.get_major_ticks())
return not(xoff and yoff)
spndx=1
for kind in kinds:
if not _ok_for_gaussian_kde(kind):
continue
self.plt.subplot(1,4*len(kinds),spndx); spndx+=1
mpl.rc('axes',grid=False)
obj.plot(kind=kind, **kws)
self.assertFalse(is_grid_on())
self.plt.subplot(1,4*len(kinds),spndx); spndx+=1
mpl.rc('axes',grid=True)
obj.plot(kind=kind, grid=False, **kws)
self.assertFalse(is_grid_on())
if kind != 'pie':
self.plt.subplot(1,4*len(kinds),spndx); spndx+=1
mpl.rc('axes',grid=True)
obj.plot(kind=kind, **kws)
self.assertTrue(is_grid_on())
self.plt.subplot(1,4*len(kinds),spndx); spndx+=1
mpl.rc('axes',grid=False)
obj.plot(kind=kind, grid=True, **kws)
self.assertTrue(is_grid_on())
def _maybe_unpack_cycler(self, rcParams, field='color'):
"""
Compat layer for MPL 1.5 change to color cycle
Before: plt.rcParams['axes.color_cycle'] -> ['b', 'g', 'r'...]
After : plt.rcParams['axes.prop_cycle'] -> cycler(...)
"""
if self.mpl_ge_1_5_0:
cyl = rcParams['axes.prop_cycle']
colors = [v[field] for v in cyl]
else:
colors = rcParams['axes.color_cycle']
return colors
@tm.mplskip
class TestSeriesPlots(TestPlotBase):
def setUp(self):
TestPlotBase.setUp(self)
import matplotlib as mpl
mpl.rcdefaults()
self.ts = tm.makeTimeSeries()
self.ts.name = 'ts'
self.series = tm.makeStringSeries()
self.series.name = 'series'
self.iseries = tm.makePeriodSeries()
self.iseries.name = 'iseries'
@slow
def test_plot(self):
_check_plot_works(self.ts.plot, label='foo')
_check_plot_works(self.ts.plot, use_index=False)
axes = _check_plot_works(self.ts.plot, rot=0)
self._check_ticks_props(axes, xrot=0)
ax = _check_plot_works(self.ts.plot, style='.', logy=True)
self._check_ax_scales(ax, yaxis='log')
ax = _check_plot_works(self.ts.plot, style='.', logx=True)
self._check_ax_scales(ax, xaxis='log')
ax = _check_plot_works(self.ts.plot, style='.', loglog=True)
self._check_ax_scales(ax, xaxis='log', yaxis='log')
_check_plot_works(self.ts[:10].plot.bar)
_check_plot_works(self.ts.plot.area, stacked=False)
_check_plot_works(self.iseries.plot)
for kind in ['line', 'bar', 'barh', 'kde', 'hist', 'box']:
if not _ok_for_gaussian_kde(kind):
continue
_check_plot_works(self.series[:5].plot, kind=kind)
_check_plot_works(self.series[:10].plot.barh)
ax = _check_plot_works(Series(randn(10)).plot.bar, color='black')
self._check_colors([ax.patches[0]], facecolors=['black'])
# GH 6951
ax = _check_plot_works(self.ts.plot, subplots=True)
self._check_axes_shape(ax, axes_num=1, layout=(1, 1))
ax = _check_plot_works(self.ts.plot, subplots=True, layout=(-1, 1))
self._check_axes_shape(ax, axes_num=1, layout=(1, 1))
ax = _check_plot_works(self.ts.plot, subplots=True, layout=(1, -1))
self._check_axes_shape(ax, axes_num=1, layout=(1, 1))
@slow
def test_plot_figsize_and_title(self):
# figsize and title
ax = self.series.plot(title='Test', figsize=(16, 8))
self._check_text_labels(ax.title, 'Test')
self._check_axes_shape(ax, axes_num=1, layout=(1, 1), figsize=(16, 8))
def test_dont_modify_rcParams(self):
# GH 8242
if self.mpl_ge_1_5_0:
key = 'axes.prop_cycle'
else:
key = 'axes.color_cycle'
colors = self.plt.rcParams[key]
Series([1, 2, 3]).plot()
self.assertEqual(colors, self.plt.rcParams[key])
def test_ts_line_lim(self):
ax = self.ts.plot()
xmin, xmax = ax.get_xlim()
lines = ax.get_lines()
self.assertEqual(xmin, lines[0].get_data(orig=False)[0][0])
self.assertEqual(xmax, lines[0].get_data(orig=False)[0][-1])
tm.close()
ax = self.ts.plot(secondary_y=True)
xmin, xmax = ax.get_xlim()
lines = ax.get_lines()
self.assertEqual(xmin, lines[0].get_data(orig=False)[0][0])
self.assertEqual(xmax, lines[0].get_data(orig=False)[0][-1])
def test_ts_area_lim(self):
ax = self.ts.plot.area(stacked=False)
xmin, xmax = ax.get_xlim()
line = ax.get_lines()[0].get_data(orig=False)[0]
self.assertEqual(xmin, line[0])
self.assertEqual(xmax, line[-1])
tm.close()
# GH 7471
ax = self.ts.plot.area(stacked=False, x_compat=True)
xmin, xmax = ax.get_xlim()
line = ax.get_lines()[0].get_data(orig=False)[0]
self.assertEqual(xmin, line[0])
self.assertEqual(xmax, line[-1])
tm.close()
tz_ts = self.ts.copy()
tz_ts.index = tz_ts.tz_localize('GMT').tz_convert('CET')
ax = tz_ts.plot.area(stacked=False, x_compat=True)
xmin, xmax = ax.get_xlim()
line = ax.get_lines()[0].get_data(orig=False)[0]
self.assertEqual(xmin, line[0])
self.assertEqual(xmax, line[-1])
tm.close()
ax = tz_ts.plot.area(stacked=False, secondary_y=True)
xmin, xmax = ax.get_xlim()
line = ax.get_lines()[0].get_data(orig=False)[0]
self.assertEqual(xmin, line[0])
self.assertEqual(xmax, line[-1])
def test_label(self):
s = Series([1, 2])
ax = s.plot(label='LABEL', legend=True)
self._check_legend_labels(ax, labels=['LABEL'])
self.plt.close()
ax = s.plot(legend=True)
self._check_legend_labels(ax, labels=['None'])
self.plt.close()
# get name from index
s.name = 'NAME'
ax = s.plot(legend=True)
self._check_legend_labels(ax, labels=['NAME'])
self.plt.close()
# override the default
ax = s.plot(legend=True, label='LABEL')
self._check_legend_labels(ax, labels=['LABEL'])
self.plt.close()
# Add lebel info, but don't draw
ax = s.plot(legend=False, label='LABEL')
self.assertEqual(ax.get_legend(), None) # Hasn't been drawn
ax.legend() # draw it
self._check_legend_labels(ax, labels=['LABEL'])
def test_line_area_nan_series(self):
values = [1, 2, np.nan, 3]
s = Series(values)
ts = Series(values, index=tm.makeDateIndex(k=4))
for d in [s, ts]:
ax = _check_plot_works(d.plot)
masked = ax.lines[0].get_ydata()
# remove nan for comparison purpose
self.assert_numpy_array_equal(np.delete(masked.data, 2), np.array([1, 2, 3]))
self.assert_numpy_array_equal(masked.mask, np.array([False, False, True, False]))
expected = np.array([1, 2, 0, 3])
ax = _check_plot_works(d.plot, stacked=True)
self.assert_numpy_array_equal(ax.lines[0].get_ydata(), expected)
ax = _check_plot_works(d.plot.area)
self.assert_numpy_array_equal(ax.lines[0].get_ydata(), expected)
ax = _check_plot_works(d.plot.area, stacked=False)
self.assert_numpy_array_equal(ax.lines[0].get_ydata(), expected)
def test_line_use_index_false(self):
s = Series([1, 2, 3], index=['a', 'b', 'c'])
s.index.name = 'The Index'
ax = s.plot(use_index=False)
label = ax.get_xlabel()
self.assertEqual(label, '')
ax2 = s.plot.bar(use_index=False)
label2 = ax2.get_xlabel()
self.assertEqual(label2, '')
@slow
def test_bar_log(self):
expected = np.array([1., 10., 100., 1000.])
if not self.mpl_le_1_2_1:
expected = np.hstack((.1, expected, 1e4))
ax = Series([200, 500]).plot.bar(log=True)
tm.assert_numpy_array_equal(ax.yaxis.get_ticklocs(), expected)
tm.close()
ax = Series([200, 500]).plot.barh(log=True)
tm.assert_numpy_array_equal(ax.xaxis.get_ticklocs(), expected)
tm.close()
# GH 9905
expected = np.array([1.0e-03, 1.0e-02, 1.0e-01, 1.0e+00])
if not self.mpl_le_1_2_1:
expected = np.hstack((1.0e-04, expected, 1.0e+01))
ax = Series([0.1, 0.01, 0.001]).plot(log=True, kind='bar')
tm.assert_numpy_array_equal(ax.get_ylim(), (0.001, 0.10000000000000001))
tm.assert_numpy_array_equal(ax.yaxis.get_ticklocs(), expected)
tm.close()
ax = Series([0.1, 0.01, 0.001]).plot(log=True, kind='barh')
tm.assert_numpy_array_equal(ax.get_xlim(), (0.001, 0.10000000000000001))
tm.assert_numpy_array_equal(ax.xaxis.get_ticklocs(), expected)
@slow
def test_bar_ignore_index(self):
df = Series([1, 2, 3, 4], index=['a', 'b', 'c', 'd'])
ax = df.plot.bar(use_index=False)
self._check_text_labels(ax.get_xticklabels(), ['0', '1', '2', '3'])
def test_rotation(self):
df = DataFrame(randn(5, 5))
# Default rot 0
axes = df.plot()
self._check_ticks_props(axes, xrot=0)
axes = df.plot(rot=30)
self._check_ticks_props(axes, xrot=30)
def test_irregular_datetime(self):
rng = date_range('1/1/2000', '3/1/2000')
rng = rng[[0, 1, 2, 3, 5, 9, 10, 11, 12]]
ser = Series(randn(len(rng)), rng)
ax = ser.plot()
xp = datetime(1999, 1, 1).toordinal()
ax.set_xlim('1/1/1999', '1/1/2001')
self.assertEqual(xp, ax.get_xlim()[0])
@slow
def test_pie_series(self):
# if sum of values is less than 1.0, pie handle them as rate and draw semicircle.
series = Series(np.random.randint(1, 5),
index=['a', 'b', 'c', 'd', 'e'], name='YLABEL')
ax = _check_plot_works(series.plot.pie)
self._check_text_labels(ax.texts, series.index)
self.assertEqual(ax.get_ylabel(), 'YLABEL')
# without wedge labels
ax = _check_plot_works(series.plot.pie, labels=None)
self._check_text_labels(ax.texts, [''] * 5)
# with less colors than elements
color_args = ['r', 'g', 'b']
ax = _check_plot_works(series.plot.pie, colors=color_args)
color_expected = ['r', 'g', 'b', 'r', 'g']
self._check_colors(ax.patches, facecolors=color_expected)
# with labels and colors
labels = ['A', 'B', 'C', 'D', 'E']
color_args = ['r', 'g', 'b', 'c', 'm']
ax = _check_plot_works(series.plot.pie, labels=labels, colors=color_args)
self._check_text_labels(ax.texts, labels)
self._check_colors(ax.patches, facecolors=color_args)
# with autopct and fontsize
ax = _check_plot_works(series.plot.pie, colors=color_args,
autopct='%.2f', fontsize=7)
pcts = ['{0:.2f}'.format(s * 100) for s in series.values / float(series.sum())]
iters = [iter(series.index), iter(pcts)]
expected_texts = list(next(it) for it in itertools.cycle(iters))
self._check_text_labels(ax.texts, expected_texts)
for t in ax.texts:
self.assertEqual(t.get_fontsize(), 7)
# includes negative value
with tm.assertRaises(ValueError):
series = Series([1, 2, 0, 4, -1], index=['a', 'b', 'c', 'd', 'e'])
series.plot.pie()
# includes nan
series = Series([1, 2, np.nan, 4],
index=['a', 'b', 'c', 'd'], name='YLABEL')
ax = _check_plot_works(series.plot.pie)
self._check_text_labels(ax.texts, ['a', 'b', '', 'd'])
def test_pie_nan(self):
s = Series([1, np.nan, 1, 1])
ax = s.plot.pie(legend=True)
expected = ['0', '', '2', '3']
result = [x.get_text() for x in ax.texts]
self.assertEqual(result, expected)
@slow
def test_hist_df_kwargs(self):
df = DataFrame(np.random.randn(10, 2))
ax = df.plot.hist(bins=5)
self.assertEqual(len(ax.patches), 10)
@slow
def test_hist_df_with_nonnumerics(self):
# GH 9853
with tm.RNGContext(1):
df = DataFrame(np.random.randn(10, 4), columns=['A', 'B', 'C', 'D'])
df['E'] = ['x', 'y'] * 5
ax = df.plot.hist(bins=5)
self.assertEqual(len(ax.patches), 20)
ax = df.plot.hist() # bins=10
self.assertEqual(len(ax.patches), 40)
@slow
def test_hist_legacy(self):
_check_plot_works(self.ts.hist)
_check_plot_works(self.ts.hist, grid=False)
_check_plot_works(self.ts.hist, figsize=(8, 10))
_check_plot_works(self.ts.hist, filterwarnings='ignore', by=self.ts.index.month)
_check_plot_works(self.ts.hist, filterwarnings='ignore', by=self.ts.index.month, bins=5)
fig, ax = self.plt.subplots(1, 1)
_check_plot_works(self.ts.hist, ax=ax)
_check_plot_works(self.ts.hist, ax=ax, figure=fig)
_check_plot_works(self.ts.hist, figure=fig)
tm.close()
fig, (ax1, ax2) = self.plt.subplots(1, 2)
_check_plot_works(self.ts.hist, figure=fig, ax=ax1)
_check_plot_works(self.ts.hist, figure=fig, ax=ax2)
with tm.assertRaises(ValueError):
self.ts.hist(by=self.ts.index, figure=fig)
@slow
def test_hist_bins_legacy(self):
df = DataFrame(np.random.randn(10, 2))
ax = df.hist(bins=2)[0][0]
self.assertEqual(len(ax.patches), 2)
@slow
def test_hist_layout(self):
df = self.hist_df
with tm.assertRaises(ValueError):
df.height.hist(layout=(1, 1))
with tm.assertRaises(ValueError):
df.height.hist(layout=[1, 1])
@slow
def test_hist_layout_with_by(self):
df = self.hist_df
axes = _check_plot_works(df.height.hist, filterwarnings='ignore',
by=df.gender, layout=(2, 1))
self._check_axes_shape(axes, axes_num=2, layout=(2, 1))
axes = _check_plot_works(df.height.hist, filterwarnings='ignore',
by=df.gender, layout=(3, -1))
self._check_axes_shape(axes, axes_num=2, layout=(3, 1))
axes = _check_plot_works(df.height.hist, filterwarnings='ignore',
by=df.category, layout=(4, 1))
self._check_axes_shape(axes, axes_num=4, layout=(4, 1))
axes = _check_plot_works(df.height.hist, filterwarnings='ignore',
by=df.category, layout=(2, -1))
self._check_axes_shape(axes, axes_num=4, layout=(2, 2))
axes = _check_plot_works(df.height.hist, filterwarnings='ignore',
by=df.category, layout=(3, -1))
self._check_axes_shape(axes, axes_num=4, layout=(3, 2))
axes = _check_plot_works(df.height.hist, filterwarnings='ignore',
by=df.category, layout=(-1, 4))
self._check_axes_shape(axes, axes_num=4, layout=(1, 4))
axes = _check_plot_works(df.height.hist, filterwarnings='ignore',
by=df.classroom, layout=(2, 2))
self._check_axes_shape(axes, axes_num=3, layout=(2, 2))
axes = df.height.hist(by=df.category, layout=(4, 2), figsize=(12, 7))
self._check_axes_shape(axes, axes_num=4, layout=(4, 2), figsize=(12, 7))
@slow
def test_hist_no_overlap(self):
from matplotlib.pyplot import subplot, gcf
x = Series(randn(2))
y = Series(randn(2))
subplot(121)
x.hist()
subplot(122)
y.hist()
fig = gcf()
axes = fig.get_axes()
self.assertEqual(len(axes), 2)
@slow
def test_hist_secondary_legend(self):
# GH 9610
df = DataFrame(np.random.randn(30, 4), columns=list('abcd'))
# primary -> secondary
ax = df['a'].plot.hist(legend=True)
df['b'].plot.hist(ax=ax, legend=True, secondary_y=True)
# both legends are dran on left ax
# left and right axis must be visible
self._check_legend_labels(ax, labels=['a', 'b (right)'])
self.assertTrue(ax.get_yaxis().get_visible())
self.assertTrue(ax.right_ax.get_yaxis().get_visible())
tm.close()
# secondary -> secondary
ax = df['a'].plot.hist(legend=True, secondary_y=True)
df['b'].plot.hist(ax=ax, legend=True, secondary_y=True)
# both legends are draw on left ax
# left axis must be invisible, right axis must be visible
self._check_legend_labels(ax.left_ax, labels=['a (right)', 'b (right)'])
self.assertFalse(ax.left_ax.get_yaxis().get_visible())
self.assertTrue(ax.get_yaxis().get_visible())
tm.close()
# secondary -> primary
ax = df['a'].plot.hist(legend=True, secondary_y=True)
# right axes is returned
df['b'].plot.hist(ax=ax, legend=True)
# both legends are draw on left ax
# left and right axis must be visible
self._check_legend_labels(ax.left_ax, labels=['a (right)', 'b'])
self.assertTrue(ax.left_ax.get_yaxis().get_visible())
self.assertTrue(ax.get_yaxis().get_visible())
tm.close()
@slow
def test_df_series_secondary_legend(self):
# GH 9779
df = DataFrame(np.random.randn(30, 3), columns=list('abc'))
s = Series(np.random.randn(30), name='x')
# primary -> secondary (without passing ax)
ax = df.plot()
s.plot(legend=True, secondary_y=True)
# both legends are dran on left ax
# left and right axis must be visible
self._check_legend_labels(ax, labels=['a', 'b', 'c', 'x (right)'])
self.assertTrue(ax.get_yaxis().get_visible())
self.assertTrue(ax.right_ax.get_yaxis().get_visible())
tm.close()
# primary -> secondary (with passing ax)
ax = df.plot()
s.plot(ax=ax, legend=True, secondary_y=True)
# both legends are dran on left ax
# left and right axis must be visible
self._check_legend_labels(ax, labels=['a', 'b', 'c', 'x (right)'])
self.assertTrue(ax.get_yaxis().get_visible())
self.assertTrue(ax.right_ax.get_yaxis().get_visible())
tm.close()
# seconcary -> secondary (without passing ax)
ax = df.plot(secondary_y=True)
s.plot(legend=True, secondary_y=True)
# both legends are dran on left ax
# left axis must be invisible and right axis must be visible
expected = ['a (right)', 'b (right)', 'c (right)', 'x (right)']
self._check_legend_labels(ax.left_ax, labels=expected)
self.assertFalse(ax.left_ax.get_yaxis().get_visible())
self.assertTrue(ax.get_yaxis().get_visible())
tm.close()
# secondary -> secondary (with passing ax)
ax = df.plot(secondary_y=True)
s.plot(ax=ax, legend=True, secondary_y=True)
# both legends are dran on left ax
# left axis must be invisible and right axis must be visible
expected = ['a (right)', 'b (right)', 'c (right)', 'x (right)']
self._check_legend_labels(ax.left_ax, expected)
self.assertFalse(ax.left_ax.get_yaxis().get_visible())
self.assertTrue(ax.get_yaxis().get_visible())
tm.close()
# secondary -> secondary (with passing ax)
ax = df.plot(secondary_y=True, mark_right=False)
s.plot(ax=ax, legend=True, secondary_y=True)
# both legends are dran on left ax
# left axis must be invisible and right axis must be visible
expected = ['a', 'b', 'c', 'x (right)']
self._check_legend_labels(ax.left_ax, expected)
self.assertFalse(ax.left_ax.get_yaxis().get_visible())
self.assertTrue(ax.get_yaxis().get_visible())
tm.close()
@slow
def test_plot_fails_with_dupe_color_and_style(self):
x = Series(randn(2))
with tm.assertRaises(ValueError):
x.plot(style='k--', color='k')
@slow
def test_hist_kde(self):
ax = self.ts.plot.hist(logy=True)
self._check_ax_scales(ax, yaxis='log')
xlabels = ax.get_xticklabels()
# ticks are values, thus ticklabels are blank
self._check_text_labels(xlabels, [''] * len(xlabels))
ylabels = ax.get_yticklabels()
self._check_text_labels(ylabels, [''] * len(ylabels))
tm._skip_if_no_scipy()
_skip_if_no_scipy_gaussian_kde()
_check_plot_works(self.ts.plot.kde)
_check_plot_works(self.ts.plot.density)
ax = self.ts.plot.kde(logy=True)
self._check_ax_scales(ax, yaxis='log')
xlabels = ax.get_xticklabels()
self._check_text_labels(xlabels, [''] * len(xlabels))
ylabels = ax.get_yticklabels()
self._check_text_labels(ylabels, [''] * len(ylabels))
@slow
def test_kde_kwargs(self):
tm._skip_if_no_scipy()
_skip_if_no_scipy_gaussian_kde()
from numpy import linspace
_check_plot_works(self.ts.plot.kde, bw_method=.5, ind=linspace(-100,100,20))
_check_plot_works(self.ts.plot.density, bw_method=.5, ind=linspace(-100,100,20))
ax = self.ts.plot.kde(logy=True, bw_method=.5, ind=linspace(-100,100,20))
self._check_ax_scales(ax, yaxis='log')
self._check_text_labels(ax.yaxis.get_label(), 'Density')
@slow
def test_kde_missing_vals(self):
tm._skip_if_no_scipy()
_skip_if_no_scipy_gaussian_kde()
s = Series(np.random.uniform(size=50))
s[0] = np.nan
ax = _check_plot_works(s.plot.kde)
@slow
def test_hist_kwargs(self):
ax = self.ts.plot.hist(bins=5)
self.assertEqual(len(ax.patches), 5)
self._check_text_labels(ax.yaxis.get_label(), 'Frequency')
tm.close()
if self.mpl_ge_1_3_1:
ax = self.ts.plot.hist(orientation='horizontal')
self._check_text_labels(ax.xaxis.get_label(), 'Frequency')
tm.close()
ax = self.ts.plot.hist(align='left', stacked=True)
tm.close()
@slow
def test_hist_kde_color(self):
ax = self.ts.plot.hist(logy=True, bins=10, color='b')
self._check_ax_scales(ax, yaxis='log')
self.assertEqual(len(ax.patches), 10)
self._check_colors(ax.patches, facecolors=['b'] * 10)
tm._skip_if_no_scipy()
_skip_if_no_scipy_gaussian_kde()
ax = self.ts.plot.kde(logy=True, color='r')
self._check_ax_scales(ax, yaxis='log')
lines = ax.get_lines()
self.assertEqual(len(lines), 1)
self._check_colors(lines, ['r'])
@slow
def test_boxplot_series(self):
ax = self.ts.plot.box(logy=True)
self._check_ax_scales(ax, yaxis='log')
xlabels = ax.get_xticklabels()
self._check_text_labels(xlabels, [self.ts.name])
ylabels = ax.get_yticklabels()
self._check_text_labels(ylabels, [''] * len(ylabels))
@slow
def test_kind_both_ways(self):
s = Series(range(3))
for kind in plotting._common_kinds + plotting._series_kinds:
if not _ok_for_gaussian_kde(kind):
continue
s.plot(kind=kind)
getattr(s.plot, kind)()
@slow
def test_invalid_plot_data(self):
s = Series(list('abcd'))
for kind in plotting._common_kinds:
if not _ok_for_gaussian_kde(kind):
continue
with tm.assertRaises(TypeError):
s.plot(kind=kind)
@slow
def test_valid_object_plot(self):
s = Series(lrange(10), dtype=object)
for kind in plotting._common_kinds:
if not _ok_for_gaussian_kde(kind):
continue
_check_plot_works(s.plot, kind=kind)
def test_partially_invalid_plot_data(self):
s = Series(['a', 'b', 1.0, 2])
for kind in plotting._common_kinds:
if not _ok_for_gaussian_kde(kind):
continue
with tm.assertRaises(TypeError):
s.plot(kind=kind)
def test_invalid_kind(self):
s = Series([1, 2])
with tm.assertRaises(ValueError):
s.plot(kind='aasdf')
@slow
def test_dup_datetime_index_plot(self):
dr1 = date_range('1/1/2009', periods=4)
dr2 = date_range('1/2/2009', periods=4)
index = dr1.append(dr2)
values = randn(index.size)
s = Series(values, index=index)
_check_plot_works(s.plot)
@slow
def test_errorbar_plot(self):
s = Series(np.arange(10), name='x')
s_err = np.random.randn(10)
d_err = DataFrame(randn(10, 2), index=s.index, columns=['x', 'y'])
# test line and bar plots
kinds = ['line', 'bar']
for kind in kinds:
ax = _check_plot_works(s.plot, yerr=Series(s_err), kind=kind)
self._check_has_errorbars(ax, xerr=0, yerr=1)
ax = _check_plot_works(s.plot, yerr=s_err, kind=kind)
self._check_has_errorbars(ax, xerr=0, yerr=1)
ax = _check_plot_works(s.plot, yerr=s_err.tolist(), kind=kind)
self._check_has_errorbars(ax, xerr=0, yerr=1)
ax = _check_plot_works(s.plot, yerr=d_err, kind=kind)
self._check_has_errorbars(ax, xerr=0, yerr=1)
ax = _check_plot_works(s.plot, xerr=0.2, yerr=0.2, kind=kind)
self._check_has_errorbars(ax, xerr=1, yerr=1)
ax = _check_plot_works(s.plot, xerr=s_err)
self._check_has_errorbars(ax, xerr=1, yerr=0)
# test time series plotting
ix = date_range('1/1/2000', '1/1/2001', freq='M')
ts = Series(np.arange(12), index=ix, name='x')
ts_err = Series(np.random.randn(12), index=ix)
td_err = DataFrame(randn(12, 2), index=ix, columns=['x', 'y'])
ax = _check_plot_works(ts.plot, yerr=ts_err)
self._check_has_errorbars(ax, xerr=0, yerr=1)
ax = _check_plot_works(ts.plot, yerr=td_err)
self._check_has_errorbars(ax, xerr=0, yerr=1)
# check incorrect lengths and types
with tm.assertRaises(ValueError):
s.plot(yerr=np.arange(11))
s_err = ['zzz']*10
# in mpl 1.5+ this is a TypeError
with tm.assertRaises((ValueError, TypeError)):
s.plot(yerr=s_err)
def test_table(self):
_check_plot_works(self.series.plot, table=True)
_check_plot_works(self.series.plot, table=self.series)
@slow
def test_series_grid_settings(self):
# Make sure plot defaults to rcParams['axes.grid'] setting, GH 9792
self._check_grid_settings(Series([1,2,3]),
plotting._series_kinds + plotting._common_kinds)
@slow
def test_standard_colors(self):
for c in ['r', 'red', 'green', '#FF0000']:
result = plotting._get_standard_colors(1, color=c)
self.assertEqual(result, [c])
result = plotting._get_standard_colors(1, color=[c])
self.assertEqual(result, [c])
result = plotting._get_standard_colors(3, color=c)
self.assertEqual(result, [c] * 3)
result = plotting._get_standard_colors(3, color=[c])
self.assertEqual(result, [c] * 3)
@slow
def test_standard_colors_all(self):
import matplotlib.colors as colors
# multiple colors like mediumaquamarine
for c in colors.cnames:
result = plotting._get_standard_colors(num_colors=1, color=c)
self.assertEqual(result, [c])
result = plotting._get_standard_colors(num_colors=1, color=[c])
self.assertEqual(result, [c])
result = plotting._get_standard_colors(num_colors=3, color=c)
self.assertEqual(result, [c] * 3)
result = plotting._get_standard_colors(num_colors=3, color=[c])
self.assertEqual(result, [c] * 3)
# single letter colors like k
for c in colors.ColorConverter.colors:
result = plotting._get_standard_colors(num_colors=1, color=c)
self.assertEqual(result, [c])
result = plotting._get_standard_colors(num_colors=1, color=[c])
self.assertEqual(result, [c])
result = plotting._get_standard_colors(num_colors=3, color=c)
self.assertEqual(result, [c] * 3)
result = plotting._get_standard_colors(num_colors=3, color=[c])
self.assertEqual(result, [c] * 3)
def test_series_plot_color_kwargs(self):
# GH1890
ax = Series(np.arange(12) + 1).plot(color='green')
self._check_colors(ax.get_lines(), linecolors=['green'])
def test_time_series_plot_color_kwargs(self):
# #1890
ax = Series(np.arange(12) + 1, index=date_range(
'1/1/2000', periods=12)).plot(color='green')
self._check_colors(ax.get_lines(), linecolors=['green'])
def test_time_series_plot_color_with_empty_kwargs(self):
import matplotlib as mpl
if self.mpl_ge_1_5_0:
def_colors = self._maybe_unpack_cycler(mpl.rcParams)
else:
def_colors = mpl.rcParams['axes.color_cycle']
index = date_range('1/1/2000', periods=12)
s = Series(np.arange(1, 13), index=index)
ncolors = 3
for i in range(ncolors):
ax = s.plot()
self._check_colors(ax.get_lines(), linecolors=def_colors[:ncolors])
def test_xticklabels(self):
# GH11529
s = Series(np.arange(10), index=['P%02d' % i for i in range(10)])
ax = s.plot(xticks=[0,3,5,9])
exp = ['P%02d' % i for i in [0,3,5,9]]
self._check_text_labels(ax.get_xticklabels(), exp)
@tm.mplskip
class TestDataFramePlots(TestPlotBase):
def setUp(self):
TestPlotBase.setUp(self)
import matplotlib as mpl
mpl.rcdefaults()
self.tdf = tm.makeTimeDataFrame()
self.hexbin_df = DataFrame({"A": np.random.uniform(size=20),
"B": np.random.uniform(size=20),
"C": np.arange(20) + np.random.uniform(size=20)})
from pandas import read_csv
path = os.path.join(curpath(), 'data', 'iris.csv')
self.iris = read_csv(path)
@slow
def test_plot(self):
df = self.tdf
_check_plot_works(df.plot, filterwarnings='ignore', grid=False)
axes = _check_plot_works(df.plot, filterwarnings='ignore', subplots=True)
self._check_axes_shape(axes, axes_num=4, layout=(4, 1))
axes = _check_plot_works(df.plot, filterwarnings='ignore',
subplots=True, layout=(-1, 2))
self._check_axes_shape(axes, axes_num=4, layout=(2, 2))
axes = _check_plot_works(df.plot, filterwarnings='ignore',
subplots=True, use_index=False)
self._check_axes_shape(axes, axes_num=4, layout=(4, 1))
df = DataFrame({'x': [1, 2], 'y': [3, 4]})
with tm.assertRaises(TypeError):
df.plot.line(blarg=True)
df = DataFrame(np.random.rand(10, 3),
index=list(string.ascii_letters[:10]))
_check_plot_works(df.plot, use_index=True)
_check_plot_works(df.plot, sort_columns=False)
_check_plot_works(df.plot, yticks=[1, 5, 10])
_check_plot_works(df.plot, xticks=[1, 5, 10])
_check_plot_works(df.plot, ylim=(-100, 100), xlim=(-100, 100))
_check_plot_works(df.plot, filterwarnings='ignore', subplots=True, title='blah')
# We have to redo it here because _check_plot_works does two plots, once without an ax
# kwarg and once with an ax kwarg and the new sharex behaviour does not remove the
# visibility of the latter axis (as ax is present).
# see: https://github.com/pydata/pandas/issues/9737
axes = df.plot(subplots=True, title='blah')
self._check_axes_shape(axes, axes_num=3, layout=(3, 1))
#axes[0].figure.savefig("test.png")
for ax in axes[:2]:
self._check_visible(ax.xaxis) # xaxis must be visible for grid
self._check_visible(ax.get_xticklabels(), visible=False)
self._check_visible(ax.get_xticklabels(minor=True), visible=False)
self._check_visible([ax.xaxis.get_label()], visible=False)
for ax in [axes[2]]:
self._check_visible(ax.xaxis)
self._check_visible(ax.get_xticklabels())
self._check_visible([ax.xaxis.get_label()])
self._check_ticks_props(ax, xrot=0)
_check_plot_works(df.plot, title='blah')
tuples = lzip(string.ascii_letters[:10], range(10))
df = DataFrame(np.random.rand(10, 3),
index=MultiIndex.from_tuples(tuples))
_check_plot_works(df.plot, use_index=True)
# unicode
index = MultiIndex.from_tuples([(u('\u03b1'), 0),
(u('\u03b1'), 1),
(u('\u03b2'), 2),
(u('\u03b2'), 3),
(u('\u03b3'), 4),
(u('\u03b3'), 5),
(u('\u03b4'), 6),
(u('\u03b4'), 7)], names=['i0', 'i1'])
columns = MultiIndex.from_tuples([('bar', u('\u0394')),
('bar', u('\u0395'))], names=['c0',
'c1'])
df = DataFrame(np.random.randint(0, 10, (8, 2)),
columns=columns,
index=index)
_check_plot_works(df.plot, title=u('\u03A3'))
# GH 6951
# Test with single column
df = DataFrame({'x': np.random.rand(10)})
axes = _check_plot_works(df.plot.bar, subplots=True)
self._check_axes_shape(axes, axes_num=1, layout=(1, 1))
axes = _check_plot_works(df.plot.bar, subplots=True,
layout=(-1, 1))
self._check_axes_shape(axes, axes_num=1, layout=(1, 1))
# When ax is supplied and required number of axes is 1,
# passed ax should be used:
fig, ax = self.plt.subplots()
axes = df.plot.bar(subplots=True, ax=ax)
self.assertEqual(len(axes), 1)
if self.mpl_ge_1_5_0:
result = ax.axes
else:
result = ax.get_axes() # deprecated
self.assertIs(result, axes[0])
def test_color_and_style_arguments(self):
df = DataFrame({'x': [1, 2], 'y': [3, 4]})
# passing both 'color' and 'style' arguments should be allowed
# if there is no color symbol in the style strings:
ax = df.plot(color = ['red', 'black'], style = ['-', '--'])
# check that the linestyles are correctly set:
linestyle = [line.get_linestyle() for line in ax.lines]
self.assertEqual(linestyle, ['-', '--'])
# check that the colors are correctly set:
color = [line.get_color() for line in ax.lines]
self.assertEqual(color, ['red', 'black'])
# passing both 'color' and 'style' arguments should not be allowed
# if there is a color symbol in the style strings:
with tm.assertRaises(ValueError):
df.plot(color = ['red', 'black'], style = ['k-', 'r--'])
def test_nonnumeric_exclude(self):
df = DataFrame({'A': ["x", "y", "z"], 'B': [1, 2, 3]})
ax = df.plot()
self.assertEqual(len(ax.get_lines()), 1) # B was plotted
@slow
def test_implicit_label(self):
df = DataFrame(randn(10, 3), columns=['a', 'b', 'c'])
ax = df.plot(x='a', y='b')
self._check_text_labels(ax.xaxis.get_label(), 'a')
@slow
def test_donot_overwrite_index_name(self):
# GH 8494
df = DataFrame(randn(2, 2), columns=['a', 'b'])
df.index.name = 'NAME'
df.plot(y='b', label='LABEL')
self.assertEqual(df.index.name, 'NAME')
@slow
def test_plot_xy(self):
# columns.inferred_type == 'string'
df = self.tdf
self._check_data(df.plot(x=0, y=1),
df.set_index('A')['B'].plot())
self._check_data(df.plot(x=0), df.set_index('A').plot())
self._check_data(df.plot(y=0), df.B.plot())
self._check_data(df.plot(x='A', y='B'),
df.set_index('A').B.plot())
self._check_data(df.plot(x='A'), df.set_index('A').plot())
self._check_data(df.plot(y='B'), df.B.plot())
# columns.inferred_type == 'integer'
df.columns = lrange(1, len(df.columns) + 1)
self._check_data(df.plot(x=1, y=2),
df.set_index(1)[2].plot())
self._check_data(df.plot(x=1), df.set_index(1).plot())
self._check_data(df.plot(y=1), df[1].plot())
# figsize and title
ax = df.plot(x=1, y=2, title='Test', figsize=(16, 8))
self._check_text_labels(ax.title, 'Test')
self._check_axes_shape(ax, axes_num=1, layout=(1, 1), figsize=(16., 8.))
# columns.inferred_type == 'mixed'
# TODO add MultiIndex test
@slow
def test_logscales(self):
df = DataFrame({'a': np.arange(100)},
index=np.arange(100))
ax = df.plot(logy=True)
self._check_ax_scales(ax, yaxis='log')
ax = df.plot(logx=True)
self._check_ax_scales(ax, xaxis='log')
ax = df.plot(loglog=True)
self._check_ax_scales(ax, xaxis='log', yaxis='log')
@slow
def test_xcompat(self):
import pandas as pd
df = self.tdf
ax = df.plot(x_compat=True)
lines = ax.get_lines()
self.assertNotIsInstance(lines[0].get_xdata(), PeriodIndex)
tm.close()
pd.plot_params['xaxis.compat'] = True
ax = df.plot()
lines = ax.get_lines()
self.assertNotIsInstance(lines[0].get_xdata(), PeriodIndex)
tm.close()
pd.plot_params['x_compat'] = False
ax = df.plot()
lines = ax.get_lines()
self.assertNotIsInstance(lines[0].get_xdata(), PeriodIndex)
self.assertIsInstance(PeriodIndex(lines[0].get_xdata()), PeriodIndex)
tm.close()
# useful if you're plotting a bunch together
with pd.plot_params.use('x_compat', True):
ax = df.plot()
lines = ax.get_lines()
self.assertNotIsInstance(lines[0].get_xdata(), PeriodIndex)
tm.close()
ax = df.plot()
lines = ax.get_lines()
self.assertNotIsInstance(lines[0].get_xdata(), PeriodIndex)
self.assertIsInstance(PeriodIndex(lines[0].get_xdata()), PeriodIndex)
def test_period_compat(self):
# GH 9012
# period-array conversions
df = DataFrame(
np.random.rand(21, 2),
index=bdate_range(datetime(2000, 1, 1), datetime(2000, 1, 31)),
columns=['a', 'b'])
df.plot()
self.plt.axhline(y=0)
tm.close()
def test_unsorted_index(self):
df = DataFrame({'y': np.arange(100)},
index=np.arange(99, -1, -1), dtype=np.int64)
ax = df.plot()
l = ax.get_lines()[0]
rs = l.get_xydata()
rs = Series(rs[:, 1], rs[:, 0], dtype=np.int64, name='y')
tm.assert_series_equal(rs, df.y, check_index_type=False)
tm.close()
df.index = pd.Index(np.arange(99, -1, -1), dtype=np.float64)
ax = df.plot()
l = ax.get_lines()[0]
rs = l.get_xydata()
rs = Series(rs[:, 1], rs[:, 0], dtype=np.int64, name='y')
tm.assert_series_equal(rs, df.y)
@slow
def test_subplots(self):
df = DataFrame(np.random.rand(10, 3),
index=list(string.ascii_letters[:10]))
for kind in ['bar', 'barh', 'line', 'area']:
axes = df.plot(kind=kind, subplots=True, sharex=True, legend=True)
self._check_axes_shape(axes, axes_num=3, layout=(3, 1))
self.assertEqual(axes.shape, (3, ))
for ax, column in zip(axes, df.columns):
self._check_legend_labels(ax, labels=[com.pprint_thing(column)])
for ax in axes[:-2]:
self._check_visible(ax.xaxis) # xaxis must be visible for grid
self._check_visible(ax.get_xticklabels(), visible=False)
self._check_visible(ax.get_xticklabels(minor=True), visible=False)
self._check_visible(ax.xaxis.get_label(), visible=False)
self._check_visible(ax.get_yticklabels())
self._check_visible(axes[-1].xaxis)
self._check_visible(axes[-1].get_xticklabels())
self._check_visible(axes[-1].get_xticklabels(minor=True))
self._check_visible(axes[-1].xaxis.get_label())
self._check_visible(axes[-1].get_yticklabels())
axes = df.plot(kind=kind, subplots=True, sharex=False)
for ax in axes:
self._check_visible(ax.xaxis)
self._check_visible(ax.get_xticklabels())
self._check_visible(ax.get_xticklabels(minor=True))
self._check_visible(ax.xaxis.get_label())
self._check_visible(ax.get_yticklabels())
axes = df.plot(kind=kind, subplots=True, legend=False)
for ax in axes:
self.assertTrue(ax.get_legend() is None)
@slow
def test_subplots_timeseries(self):
idx = date_range(start='2014-07-01', freq='M', periods=10)
df = DataFrame(np.random.rand(10, 3), index=idx)
for kind in ['line', 'area']:
axes = df.plot(kind=kind, subplots=True, sharex=True)
self._check_axes_shape(axes, axes_num=3, layout=(3, 1))
for ax in axes[:-2]:
# GH 7801
self._check_visible(ax.xaxis) # xaxis must be visible for grid
self._check_visible(ax.get_xticklabels(), visible=False)
self._check_visible(ax.get_xticklabels(minor=True), visible=False)
self._check_visible(ax.xaxis.get_label(), visible=False)
self._check_visible(ax.get_yticklabels())
self._check_visible(axes[-1].xaxis)
self._check_visible(axes[-1].get_xticklabels())
self._check_visible(axes[-1].get_xticklabels(minor=True))
self._check_visible(axes[-1].xaxis.get_label())
self._check_visible(axes[-1].get_yticklabels())
self._check_ticks_props(axes, xrot=0)
axes = df.plot(kind=kind, subplots=True, sharex=False, rot=45, fontsize=7)
for ax in axes:
self._check_visible(ax.xaxis)
self._check_visible(ax.get_xticklabels())
self._check_visible(ax.get_xticklabels(minor=True))
self._check_visible(ax.xaxis.get_label())
self._check_visible(ax.get_yticklabels())
self._check_ticks_props(ax, xlabelsize=7, xrot=45, ylabelsize=7)
@slow
def test_subplots_layout(self):
# GH 6667
df = DataFrame(np.random.rand(10, 3),
index=list(string.ascii_letters[:10]))
axes = df.plot(subplots=True, layout=(2, 2))
self._check_axes_shape(axes, axes_num=3, layout=(2, 2))
self.assertEqual(axes.shape, (2, 2))
axes = df.plot(subplots=True, layout=(-1, 2))
self._check_axes_shape(axes, axes_num=3, layout=(2, 2))
self.assertEqual(axes.shape, (2, 2))
axes = df.plot(subplots=True, layout=(2, -1))
self._check_axes_shape(axes, axes_num=3, layout=(2, 2))
self.assertEqual(axes.shape, (2, 2))
axes = df.plot(subplots=True, layout=(1, 4))
self._check_axes_shape(axes, axes_num=3, layout=(1, 4))
self.assertEqual(axes.shape, (1, 4))
axes = df.plot(subplots=True, layout=(-1, 4))
self._check_axes_shape(axes, axes_num=3, layout=(1, 4))
self.assertEqual(axes.shape, (1, 4))
axes = df.plot(subplots=True, layout=(4, -1))
self._check_axes_shape(axes, axes_num=3, layout=(4, 1))
self.assertEqual(axes.shape, (4, 1))
with tm.assertRaises(ValueError):
axes = df.plot(subplots=True, layout=(1, 1))
with tm.assertRaises(ValueError):
axes = df.plot(subplots=True, layout=(-1, -1))
# single column
df = DataFrame(np.random.rand(10, 1),
index=list(string.ascii_letters[:10]))
axes = df.plot(subplots=True)
self._check_axes_shape(axes, axes_num=1, layout=(1, 1))
self.assertEqual(axes.shape, (1, ))
axes = df.plot(subplots=True, layout=(3, 3))
self._check_axes_shape(axes, axes_num=1, layout=(3, 3))
self.assertEqual(axes.shape, (3, 3))
@slow
def test_subplots_warnings(self):
# GH 9464
warnings.simplefilter('error')
try:
df = DataFrame(np.random.randn(100, 4))
df.plot(subplots=True, layout=(3, 2))
df = DataFrame(np.random.randn(100, 4),
index=date_range('1/1/2000', periods=100))
df.plot(subplots=True, layout=(3, 2))
except Warning as w:
self.fail(w)
warnings.simplefilter('default')
@slow
def test_subplots_multiple_axes(self):
# GH 5353, 6970, GH 7069
fig, axes = self.plt.subplots(2, 3)
df = DataFrame(np.random.rand(10, 3),
index=list(string.ascii_letters[:10]))
returned = df.plot(subplots=True, ax=axes[0], sharex=False, sharey=False)
self._check_axes_shape(returned, axes_num=3, layout=(1, 3))
self.assertEqual(returned.shape, (3, ))
self.assertIs(returned[0].figure, fig)
# draw on second row
returned = df.plot(subplots=True, ax=axes[1], sharex=False, sharey=False)
self._check_axes_shape(returned, axes_num=3, layout=(1, 3))
self.assertEqual(returned.shape, (3, ))
self.assertIs(returned[0].figure, fig)
self._check_axes_shape(axes, axes_num=6, layout=(2, 3))
tm.close()
with tm.assertRaises(ValueError):
fig, axes = self.plt.subplots(2, 3)
# pass different number of axes from required
df.plot(subplots=True, ax=axes)
# pass 2-dim axes and invalid layout
# invalid lauout should not affect to input and return value
# (show warning is tested in
# TestDataFrameGroupByPlots.test_grouped_box_multiple_axes
fig, axes = self.plt.subplots(2, 2)
with warnings.catch_warnings():
warnings.simplefilter('ignore')
df = DataFrame(np.random.rand(10, 4),
index=list(string.ascii_letters[:10]))
returned = df.plot(subplots=True, ax=axes, layout=(2, 1),
sharex=False, sharey=False)
self._check_axes_shape(returned, axes_num=4, layout=(2, 2))
self.assertEqual(returned.shape, (4, ))
returned = df.plot(subplots=True, ax=axes, layout=(2, -1),
sharex=False, sharey=False)
self._check_axes_shape(returned, axes_num=4, layout=(2, 2))
self.assertEqual(returned.shape, (4, ))
returned = df.plot(subplots=True, ax=axes, layout=(-1, 2),
sharex=False, sharey=False)
self._check_axes_shape(returned, axes_num=4, layout=(2, 2))
self.assertEqual(returned.shape, (4, ))
# single column
fig, axes = self.plt.subplots(1, 1)
df = DataFrame(np.random.rand(10, 1),
index=list(string.ascii_letters[:10]))
axes = df.plot(subplots=True, ax=[axes], sharex=False, sharey=False)
self._check_axes_shape(axes, axes_num=1, layout=(1, 1))
self.assertEqual(axes.shape, (1, ))
def test_subplots_ts_share_axes(self):
# GH 3964
fig, axes = self.plt.subplots(3, 3, sharex=True, sharey=True)
self.plt.subplots_adjust(left=0.05, right=0.95, hspace=0.3, wspace=0.3)
df = DataFrame(np.random.randn(10, 9), index=date_range(start='2014-07-01', freq='M', periods=10))
for i, ax in enumerate(axes.ravel()):
df[i].plot(ax=ax, fontsize=5)
#Rows other than bottom should not be visible
for ax in axes[0:-1].ravel():
self._check_visible(ax.get_xticklabels(), visible=False)
#Bottom row should be visible
for ax in axes[-1].ravel():
self._check_visible(ax.get_xticklabels(), visible=True)
#First column should be visible
for ax in axes[[0, 1, 2], [0]].ravel():
self._check_visible(ax.get_yticklabels(), visible=True)
#Other columns should not be visible
for ax in axes[[0, 1, 2], [1]].ravel():
self._check_visible(ax.get_yticklabels(), visible=False)
for ax in axes[[0, 1, 2], [2]].ravel():
self._check_visible(ax.get_yticklabels(), visible=False)
def test_subplots_sharex_axes_existing_axes(self):
# GH 9158
d = {'A': [1., 2., 3., 4.], 'B': [4., 3., 2., 1.], 'C': [5, 1, 3, 4]}
df = DataFrame(d, index=date_range('2014 10 11', '2014 10 14'))
axes = df[['A', 'B']].plot(subplots=True)
df['C'].plot(ax=axes[0], secondary_y=True)
self._check_visible(axes[0].get_xticklabels(), visible=False)
self._check_visible(axes[1].get_xticklabels(), visible=True)
for ax in axes.ravel():
self._check_visible(ax.get_yticklabels(), visible=True)
@slow
def test_subplots_dup_columns(self):
# GH 10962
df = DataFrame(np.random.rand(5, 5), columns=list('aaaaa'))
axes = df.plot(subplots=True)
for ax in axes:
self._check_legend_labels(ax, labels=['a'])
self.assertEqual(len(ax.lines), 1)
tm.close()
axes = df.plot(subplots=True, secondary_y='a')
for ax in axes:
# (right) is only attached when subplots=False
self._check_legend_labels(ax, labels=['a'])
self.assertEqual(len(ax.lines), 1)
tm.close()
ax = df.plot(secondary_y='a')
self._check_legend_labels(ax, labels=['a (right)'] * 5)
self.assertEqual(len(ax.lines), 0)
self.assertEqual(len(ax.right_ax.lines), 5)
def test_negative_log(self):
df = - DataFrame(rand(6, 4),
index=list(string.ascii_letters[:6]),
columns=['x', 'y', 'z', 'four'])
with tm.assertRaises(ValueError):
df.plot.area(logy=True)
with tm.assertRaises(ValueError):
df.plot.area(loglog=True)
def _compare_stacked_y_cood(self, normal_lines, stacked_lines):
base = np.zeros(len(normal_lines[0].get_data()[1]))
for nl, sl in zip(normal_lines, stacked_lines):
base += nl.get_data()[1] # get y coodinates
sy = sl.get_data()[1]
self.assert_numpy_array_equal(base, sy)
def test_line_area_stacked(self):
with tm.RNGContext(42):
df = DataFrame(rand(6, 4),
columns=['w', 'x', 'y', 'z'])
neg_df = - df
# each column has either positive or negative value
sep_df = DataFrame({'w': rand(6), 'x': rand(6),
'y': - rand(6), 'z': - rand(6)})
# each column has positive-negative mixed value
mixed_df = DataFrame(randn(6, 4), index=list(string.ascii_letters[:6]),
columns=['w', 'x', 'y', 'z'])
for kind in ['line', 'area']:
ax1 = _check_plot_works(df.plot, kind=kind, stacked=False)
ax2 = _check_plot_works(df.plot, kind=kind, stacked=True)
self._compare_stacked_y_cood(ax1.lines, ax2.lines)
ax1 = _check_plot_works(neg_df.plot, kind=kind, stacked=False)
ax2 = _check_plot_works(neg_df.plot, kind=kind, stacked=True)
self._compare_stacked_y_cood(ax1.lines, ax2.lines)
ax1 = _check_plot_works(sep_df.plot, kind=kind, stacked=False)
ax2 = _check_plot_works(sep_df.plot, kind=kind, stacked=True)
self._compare_stacked_y_cood(ax1.lines[:2], ax2.lines[:2])
self._compare_stacked_y_cood(ax1.lines[2:], ax2.lines[2:])
_check_plot_works(mixed_df.plot, stacked=False)
with tm.assertRaises(ValueError):
mixed_df.plot(stacked=True)
_check_plot_works(df.plot, kind=kind, logx=True, stacked=True)
def test_line_area_nan_df(self):
values1 = [1, 2, np.nan, 3]
values2 = [3, np.nan, 2, 1]
df = DataFrame({'a': values1, 'b': values2})
tdf = DataFrame({'a': values1, 'b': values2}, index=tm.makeDateIndex(k=4))
for d in [df, tdf]:
ax = _check_plot_works(d.plot)
masked1 = ax.lines[0].get_ydata()
masked2 = ax.lines[1].get_ydata()
# remove nan for comparison purpose
self.assert_numpy_array_equal(np.delete(masked1.data, 2), np.array([1, 2, 3]))
self.assert_numpy_array_equal(np.delete(masked2.data, 1), np.array([3, 2, 1]))
self.assert_numpy_array_equal(masked1.mask, np.array([False, False, True, False]))
self.assert_numpy_array_equal(masked2.mask, np.array([False, True, False, False]))
expected1 = np.array([1, 2, 0, 3])
expected2 = np.array([3, 0, 2, 1])
ax = _check_plot_works(d.plot, stacked=True)
self.assert_numpy_array_equal(ax.lines[0].get_ydata(), expected1)
self.assert_numpy_array_equal(ax.lines[1].get_ydata(), expected1 + expected2)
ax = _check_plot_works(d.plot.area)
self.assert_numpy_array_equal(ax.lines[0].get_ydata(), expected1)
self.assert_numpy_array_equal(ax.lines[1].get_ydata(), expected1 + expected2)
ax = _check_plot_works(d.plot.area, stacked=False)
self.assert_numpy_array_equal(ax.lines[0].get_ydata(), expected1)
self.assert_numpy_array_equal(ax.lines[1].get_ydata(), expected2)
def test_line_lim(self):
df = DataFrame(rand(6, 3), columns=['x', 'y', 'z'])
ax = df.plot()
xmin, xmax = ax.get_xlim()
lines = ax.get_lines()
self.assertEqual(xmin, lines[0].get_data()[0][0])
self.assertEqual(xmax, lines[0].get_data()[0][-1])
ax = df.plot(secondary_y=True)
xmin, xmax = ax.get_xlim()
lines = ax.get_lines()
self.assertEqual(xmin, lines[0].get_data()[0][0])
self.assertEqual(xmax, lines[0].get_data()[0][-1])
axes = df.plot(secondary_y=True, subplots=True)
self._check_axes_shape(axes, axes_num=3, layout=(3, 1))
for ax in axes:
self.assertTrue(hasattr(ax, 'left_ax'))
self.assertFalse(hasattr(ax, 'right_ax'))
xmin, xmax = ax.get_xlim()
lines = ax.get_lines()
self.assertEqual(xmin, lines[0].get_data()[0][0])
self.assertEqual(xmax, lines[0].get_data()[0][-1])
def test_area_lim(self):
df = DataFrame(rand(6, 4),
columns=['x', 'y', 'z', 'four'])
neg_df = - df
for stacked in [True, False]:
ax = _check_plot_works(df.plot.area, stacked=stacked)
xmin, xmax = ax.get_xlim()
ymin, ymax = ax.get_ylim()
lines = ax.get_lines()
self.assertEqual(xmin, lines[0].get_data()[0][0])
self.assertEqual(xmax, lines[0].get_data()[0][-1])
self.assertEqual(ymin, 0)
ax = _check_plot_works(neg_df.plot.area, stacked=stacked)
ymin, ymax = ax.get_ylim()
self.assertEqual(ymax, 0)
@slow
def test_bar_colors(self):
import matplotlib.pyplot as plt
default_colors = self._maybe_unpack_cycler(plt.rcParams)
df = DataFrame(randn(5, 5))
ax = df.plot.bar()
self._check_colors(ax.patches[::5], facecolors=default_colors[:5])
tm.close()
custom_colors = 'rgcby'
ax = df.plot.bar(color=custom_colors)
self._check_colors(ax.patches[::5], facecolors=custom_colors)
tm.close()
from matplotlib import cm
# Test str -> colormap functionality
ax = df.plot.bar(colormap='jet')
rgba_colors = lmap(cm.jet, np.linspace(0, 1, 5))
self._check_colors(ax.patches[::5], facecolors=rgba_colors)
tm.close()
# Test colormap functionality
ax = df.plot.bar(colormap=cm.jet)
rgba_colors = lmap(cm.jet, np.linspace(0, 1, 5))
self._check_colors(ax.patches[::5], facecolors=rgba_colors)
tm.close()
ax = df.ix[:, [0]].plot.bar(color='DodgerBlue')
self._check_colors([ax.patches[0]], facecolors=['DodgerBlue'])
tm.close()
ax = df.plot(kind='bar', color='green')
self._check_colors(ax.patches[::5], facecolors=['green'] * 5)
tm.close()
@slow
def test_bar_linewidth(self):
df = DataFrame(randn(5, 5))
# regular
ax = df.plot.bar(linewidth=2)
for r in ax.patches:
self.assertEqual(r.get_linewidth(), 2)
# stacked
ax = df.plot.bar(stacked=True, linewidth=2)
for r in ax.patches:
self.assertEqual(r.get_linewidth(), 2)
# subplots
axes = df.plot.bar(linewidth=2, subplots=True)
self._check_axes_shape(axes, axes_num=5, layout=(5, 1))
for ax in axes:
for r in ax.patches:
self.assertEqual(r.get_linewidth(), 2)
@slow
def test_bar_barwidth(self):
df = DataFrame(randn(5, 5))
width = 0.9
# regular
ax = df.plot.bar(width=width)
for r in ax.patches:
self.assertEqual(r.get_width(), width / len(df.columns))
# stacked
ax = df.plot.bar(stacked=True, width=width)
for r in ax.patches:
self.assertEqual(r.get_width(), width)
# horizontal regular
ax = df.plot.barh(width=width)
for r in ax.patches:
self.assertEqual(r.get_height(), width / len(df.columns))
# horizontal stacked
ax = df.plot.barh(stacked=True, width=width)
for r in ax.patches:
self.assertEqual(r.get_height(), width)
# subplots
axes = df.plot.bar(width=width, subplots=True)
for ax in axes:
for r in ax.patches:
self.assertEqual(r.get_width(), width)
# horizontal subplots
axes = df.plot.barh(width=width, subplots=True)
for ax in axes:
for r in ax.patches:
self.assertEqual(r.get_height(), width)
@slow
def test_bar_barwidth_position(self):
df = DataFrame(randn(5, 5))
self._check_bar_alignment(df, kind='bar', stacked=False, width=0.9, position=0.2)
self._check_bar_alignment(df, kind='bar', stacked=True, width=0.9, position=0.2)
self._check_bar_alignment(df, kind='barh', stacked=False, width=0.9, position=0.2)
self._check_bar_alignment(df, kind='barh', stacked=True, width=0.9, position=0.2)
self._check_bar_alignment(df, kind='bar', subplots=True, width=0.9, position=0.2)
self._check_bar_alignment(df, kind='barh', subplots=True, width=0.9, position=0.2)
@slow
def test_bar_bottom_left(self):
df = DataFrame(rand(5, 5))
ax = df.plot.bar(stacked=False, bottom=1)
result = [p.get_y() for p in ax.patches]
self.assertEqual(result, [1] * 25)
ax = df.plot.bar(stacked=True, bottom=[-1, -2, -3, -4, -5])
result = [p.get_y() for p in ax.patches[:5]]
self.assertEqual(result, [-1, -2, -3, -4, -5])
ax = df.plot.barh(stacked=False, left=np.array([1, 1, 1, 1, 1]))
result = [p.get_x() for p in ax.patches]
self.assertEqual(result, [1] * 25)
ax = df.plot.barh(stacked=True, left=[1, 2, 3, 4, 5])
result = [p.get_x() for p in ax.patches[:5]]
self.assertEqual(result, [1, 2, 3, 4, 5])
axes = df.plot.bar(subplots=True, bottom=-1)
for ax in axes:
result = [p.get_y() for p in ax.patches]
self.assertEqual(result, [-1] * 5)
axes = df.plot.barh(subplots=True, left=np.array([1, 1, 1, 1, 1]))
for ax in axes:
result = [p.get_x() for p in ax.patches]
self.assertEqual(result, [1] * 5)
@slow
def test_bar_nan(self):
df = DataFrame({'A': [10, np.nan, 20], 'B': [5, 10, 20],
'C': [1, 2, 3]})
ax = df.plot.bar()
expected = [10, 0, 20, 5, 10, 20, 1, 2, 3]
result = [p.get_height() for p in ax.patches]
self.assertEqual(result, expected)
ax = df.plot.bar(stacked=True)
result = [p.get_height() for p in ax.patches]
self.assertEqual(result, expected)
result = [p.get_y() for p in ax.patches]
expected = [0.0, 0.0, 0.0, 10.0, 0.0, 20.0, 15.0, 10.0, 40.0]
self.assertEqual(result, expected)
@slow
def test_plot_scatter(self):
df = DataFrame(randn(6, 4),
index=list(string.ascii_letters[:6]),
columns=['x', 'y', 'z', 'four'])
_check_plot_works(df.plot.scatter, x='x', y='y')
_check_plot_works(df.plot.scatter, x=1, y=2)
with tm.assertRaises(TypeError):
df.plot.scatter(x='x')
with tm.assertRaises(TypeError):
df.plot.scatter(y='y')
# GH 6951
axes = df.plot(x='x', y='y', kind='scatter', subplots=True)
self._check_axes_shape(axes, axes_num=1, layout=(1, 1))
@slow
def test_plot_scatter_with_c(self):
df = DataFrame(randn(6, 4),
index=list(string.ascii_letters[:6]),
columns=['x', 'y', 'z', 'four'])
axes = [df.plot.scatter(x='x', y='y', c='z'),
df.plot.scatter(x=0, y=1, c=2)]
for ax in axes:
# default to Greys
self.assertEqual(ax.collections[0].cmap.name, 'Greys')
if self.mpl_ge_1_3_1:
# n.b. there appears to be no public method to get the colorbar
# label
self.assertEqual(ax.collections[0].colorbar._label, 'z')
cm = 'cubehelix'
ax = df.plot.scatter(x='x', y='y', c='z', colormap=cm)
self.assertEqual(ax.collections[0].cmap.name, cm)
# verify turning off colorbar works
ax = df.plot.scatter(x='x', y='y', c='z', colorbar=False)
self.assertIs(ax.collections[0].colorbar, None)
# verify that we can still plot a solid color
ax = df.plot.scatter(x=0, y=1, c='red')
self.assertIs(ax.collections[0].colorbar, None)
self._check_colors(ax.collections, facecolors=['r'])
# Ensure that we can pass an np.array straight through to matplotlib,
# this functionality was accidentally removed previously.
# See https://github.com/pydata/pandas/issues/8852 for bug report
#
# Exercise colormap path and non-colormap path as they are independent
#
df = DataFrame({'A': [1, 2], 'B': [3, 4]})
red_rgba = [1.0, 0.0, 0.0, 1.0]
green_rgba = [0.0, 1.0, 0.0, 1.0]
rgba_array = np.array([red_rgba, green_rgba])
ax = df.plot.scatter(x='A', y='B', c=rgba_array)
# expect the face colors of the points in the non-colormap path to be
# identical to the values we supplied, normally we'd be on shaky ground
# comparing floats for equality but here we expect them to be
# identical.
self.assertTrue(
np.array_equal(
ax.collections[0].get_facecolor(),
rgba_array))
# we don't test the colors of the faces in this next plot because they
# are dependent on the spring colormap, which may change its colors
# later.
float_array = np.array([0.0, 1.0])
df.plot.scatter(x='A', y='B', c=float_array, cmap='spring')
def test_scatter_colors(self):
df = DataFrame({'a': [1, 2, 3], 'b': [1, 2, 3], 'c': [1, 2, 3]})
with tm.assertRaises(TypeError):
df.plot.scatter(x='a', y='b', c='c', color='green')
ax = df.plot.scatter(x='a', y='b', c='c')
tm.assert_numpy_array_equal(ax.collections[0].get_facecolor()[0],
(0, 0, 1, 1))
ax = df.plot.scatter(x='a', y='b', color='white')
tm.assert_numpy_array_equal(ax.collections[0].get_facecolor()[0],
(1, 1, 1, 1))
@slow
def test_plot_bar(self):
df = DataFrame(randn(6, 4),
index=list(string.ascii_letters[:6]),
columns=['one', 'two', 'three', 'four'])
_check_plot_works(df.plot.bar)
_check_plot_works(df.plot.bar, legend=False)
_check_plot_works(df.plot.bar, filterwarnings='ignore', subplots=True)
_check_plot_works(df.plot.bar, stacked=True)
df = DataFrame(randn(10, 15),
index=list(string.ascii_letters[:10]),
columns=lrange(15))
_check_plot_works(df.plot.bar)
df = DataFrame({'a': [0, 1], 'b': [1, 0]})
ax = _check_plot_works(df.plot.bar)
self._check_ticks_props(ax, xrot=90)
ax = df.plot.bar(rot=35, fontsize=10)
self._check_ticks_props(ax, xrot=35, xlabelsize=10, ylabelsize=10)
ax = _check_plot_works(df.plot.barh)
self._check_ticks_props(ax, yrot=0)
ax = df.plot.barh(rot=55, fontsize=11)
self._check_ticks_props(ax, yrot=55, ylabelsize=11, xlabelsize=11)
def _check_bar_alignment(self, df, kind='bar', stacked=False,
subplots=False, align='center',
width=0.5, position=0.5):
axes = df.plot(kind=kind, stacked=stacked, subplots=subplots,
align=align, width=width, position=position,
grid=True)
axes = self._flatten_visible(axes)
for ax in axes:
if kind == 'bar':
axis = ax.xaxis
ax_min, ax_max = ax.get_xlim()
min_edge = min([p.get_x() for p in ax.patches])
max_edge = max([p.get_x() + p.get_width() for p in ax.patches])
elif kind == 'barh':
axis = ax.yaxis
ax_min, ax_max = ax.get_ylim()
min_edge = min([p.get_y() for p in ax.patches])
max_edge = max([p.get_y() + p.get_height() for p in ax.patches])
else:
raise ValueError
# GH 7498
# compare margins between lim and bar edges
self.assertAlmostEqual(ax_min, min_edge - 0.25)
self.assertAlmostEqual(ax_max, max_edge + 0.25)
p = ax.patches[0]
if kind == 'bar' and (stacked is True or subplots is True):
edge = p.get_x()
center = edge + p.get_width() * position
elif kind == 'bar' and stacked is False:
center = p.get_x() + p.get_width() * len(df.columns) * position
edge = p.get_x()
elif kind == 'barh' and (stacked is True or subplots is True):
center = p.get_y() + p.get_height() * position
edge = p.get_y()
elif kind == 'barh' and stacked is False:
center = p.get_y() + p.get_height() * len(df.columns) * position
edge = p.get_y()
else:
raise ValueError
# Check the ticks locates on integer
self.assertTrue((axis.get_ticklocs() == np.arange(len(df))).all())
if align == 'center':
# Check whether the bar locates on center
self.assertAlmostEqual(axis.get_ticklocs()[0], center)
elif align == 'edge':
# Check whether the bar's edge starts from the tick
self.assertAlmostEqual(axis.get_ticklocs()[0], edge)
else:
raise ValueError
return axes
@slow
def test_bar_stacked_center(self):
# GH2157
df = DataFrame({'A': [3] * 5, 'B': lrange(5)}, index=lrange(5))
self._check_bar_alignment(df, kind='bar', stacked=True)
self._check_bar_alignment(df, kind='bar', stacked=True, width=0.9)
self._check_bar_alignment(df, kind='barh', stacked=True)
self._check_bar_alignment(df, kind='barh', stacked=True, width=0.9)
@slow
def test_bar_center(self):
df = DataFrame({'A': [3] * 5, 'B': lrange(5)}, index=lrange(5))
self._check_bar_alignment(df, kind='bar', stacked=False)
self._check_bar_alignment(df, kind='bar', stacked=False, width=0.9)
self._check_bar_alignment(df, kind='barh', stacked=False)
self._check_bar_alignment(df, kind='barh', stacked=False, width=0.9)
@slow
def test_bar_subplots_center(self):
df = DataFrame({'A': [3] * 5, 'B': lrange(5)}, index=lrange(5))
self._check_bar_alignment(df, kind='bar', subplots=True)
self._check_bar_alignment(df, kind='bar', subplots=True, width=0.9)
self._check_bar_alignment(df, kind='barh', subplots=True)
self._check_bar_alignment(df, kind='barh', subplots=True, width=0.9)
@slow
def test_bar_align_single_column(self):
df = DataFrame(randn(5))
self._check_bar_alignment(df, kind='bar', stacked=False)
self._check_bar_alignment(df, kind='bar', stacked=True)
self._check_bar_alignment(df, kind='barh', stacked=False)
self._check_bar_alignment(df, kind='barh', stacked=True)
self._check_bar_alignment(df, kind='bar', subplots=True)
self._check_bar_alignment(df, kind='barh', subplots=True)
@slow
def test_bar_edge(self):
df = DataFrame({'A': [3] * 5, 'B': lrange(5)}, index=lrange(5))
self._check_bar_alignment(df, kind='bar', stacked=True, align='edge')
self._check_bar_alignment(df, kind='bar', stacked=True,
width=0.9, align='edge')
self._check_bar_alignment(df, kind='barh', stacked=True, align='edge')
self._check_bar_alignment(df, kind='barh', stacked=True,
width=0.9, align='edge')
self._check_bar_alignment(df, kind='bar', stacked=False, align='edge')
self._check_bar_alignment(df, kind='bar', stacked=False,
width=0.9, align='edge')
self._check_bar_alignment(df, kind='barh', stacked=False, align='edge')
self._check_bar_alignment(df, kind='barh', stacked=False,
width=0.9, align='edge')
self._check_bar_alignment(df, kind='bar', subplots=True, align='edge')
self._check_bar_alignment(df, kind='bar', subplots=True,
width=0.9, align='edge')
self._check_bar_alignment(df, kind='barh', subplots=True, align='edge')
self._check_bar_alignment(df, kind='barh', subplots=True,
width=0.9, align='edge')
@slow
def test_bar_log_no_subplots(self):
# GH3254, GH3298 matplotlib/matplotlib#1882, #1892
# regressions in 1.2.1
expected = np.array([1., 10.])
if not self.mpl_le_1_2_1:
expected = np.hstack((.1, expected, 100))
# no subplots
df = DataFrame({'A': [3] * 5, 'B': lrange(1, 6)}, index=lrange(5))
ax = df.plot.bar(grid=True, log=True)
tm.assert_numpy_array_equal(ax.yaxis.get_ticklocs(), expected)
@slow
def test_bar_log_subplots(self):
expected = np.array([1., 10., 100., 1000.])
if not self.mpl_le_1_2_1:
expected = np.hstack((.1, expected, 1e4))
ax = DataFrame([Series([200, 300]),
Series([300, 500])]).plot.bar(log=True, subplots=True)
tm.assert_numpy_array_equal(ax[0].yaxis.get_ticklocs(), expected)
tm.assert_numpy_array_equal(ax[1].yaxis.get_ticklocs(), expected)
@slow
def test_boxplot(self):
df = self.hist_df
series = df['height']
numeric_cols = df._get_numeric_data().columns
labels = [com.pprint_thing(c) for c in numeric_cols]
ax = _check_plot_works(df.plot.box)
self._check_text_labels(ax.get_xticklabels(), labels)
tm.assert_numpy_array_equal(ax.xaxis.get_ticklocs(),
np.arange(1, len(numeric_cols) + 1))
self.assertEqual(len(ax.lines),
self.bp_n_objects * len(numeric_cols))
# different warning on py3
if not PY3:
axes = _check_plot_works(df.plot.box,
subplots=True, logy=True)
self._check_axes_shape(axes, axes_num=3, layout=(1, 3))
self._check_ax_scales(axes, yaxis='log')
for ax, label in zip(axes, labels):
self._check_text_labels(ax.get_xticklabels(), [label])
self.assertEqual(len(ax.lines), self.bp_n_objects)
axes = series.plot.box(rot=40)
self._check_ticks_props(axes, xrot=40, yrot=0)
tm.close()
ax = _check_plot_works(series.plot.box)
positions = np.array([1, 6, 7])
ax = df.plot.box(positions=positions)
numeric_cols = df._get_numeric_data().columns
labels = [com.pprint_thing(c) for c in numeric_cols]
self._check_text_labels(ax.get_xticklabels(), labels)
tm.assert_numpy_array_equal(ax.xaxis.get_ticklocs(), positions)
self.assertEqual(len(ax.lines), self.bp_n_objects * len(numeric_cols))
@slow
def test_boxplot_vertical(self):
df = self.hist_df
numeric_cols = df._get_numeric_data().columns
labels = [com.pprint_thing(c) for c in numeric_cols]
# if horizontal, yticklabels are rotated
ax = df.plot.box(rot=50, fontsize=8, vert=False)
self._check_ticks_props(ax, xrot=0, yrot=50, ylabelsize=8)
self._check_text_labels(ax.get_yticklabels(), labels)
self.assertEqual(len(ax.lines), self.bp_n_objects * len(numeric_cols))
axes = _check_plot_works(df.plot.box, filterwarnings='ignore', subplots=True,
vert=False, logx=True)
self._check_axes_shape(axes, axes_num=3, layout=(1, 3))
self._check_ax_scales(axes, xaxis='log')
for ax, label in zip(axes, labels):
self._check_text_labels(ax.get_yticklabels(), [label])
self.assertEqual(len(ax.lines), self.bp_n_objects)
positions = np.array([3, 2, 8])
ax = df.plot.box(positions=positions, vert=False)
self._check_text_labels(ax.get_yticklabels(), labels)
tm.assert_numpy_array_equal(ax.yaxis.get_ticklocs(), positions)
self.assertEqual(len(ax.lines), self.bp_n_objects * len(numeric_cols))
@slow
def test_boxplot_return_type(self):
df = DataFrame(randn(6, 4),
index=list(string.ascii_letters[:6]),
columns=['one', 'two', 'three', 'four'])
with tm.assertRaises(ValueError):
df.plot.box(return_type='NOTATYPE')
result = df.plot.box(return_type='dict')
self._check_box_return_type(result, 'dict')
result = df.plot.box(return_type='axes')
self._check_box_return_type(result, 'axes')
result = df.plot.box(return_type='both')
self._check_box_return_type(result, 'both')
@slow
def test_boxplot_subplots_return_type(self):
df = self.hist_df
# normal style: return_type=None
result = df.plot.box(subplots=True)
self.assertIsInstance(result, np.ndarray)
self._check_box_return_type(result, None,
expected_keys=['height', 'weight', 'category'])
for t in ['dict', 'axes', 'both']:
returned = df.plot.box(return_type=t, subplots=True)
self._check_box_return_type(returned, t,
expected_keys=['height', 'weight', 'category'],
check_ax_title=False)
@slow
def test_kde_df(self):
tm._skip_if_no_scipy()
_skip_if_no_scipy_gaussian_kde()
df = DataFrame(randn(100, 4))
ax = _check_plot_works(df.plot, kind='kde')
expected = [com.pprint_thing(c) for c in df.columns]
self._check_legend_labels(ax, labels=expected)
self._check_ticks_props(ax, xrot=0)
ax = df.plot(kind='kde', rot=20, fontsize=5)
self._check_ticks_props(ax, xrot=20, xlabelsize=5, ylabelsize=5)
axes = _check_plot_works(df.plot, filterwarnings='ignore', kind='kde', subplots=True)
self._check_axes_shape(axes, axes_num=4, layout=(4, 1))
axes = df.plot(kind='kde', logy=True, subplots=True)
self._check_ax_scales(axes, yaxis='log')
@slow
def test_kde_missing_vals(self):
tm._skip_if_no_scipy()
_skip_if_no_scipy_gaussian_kde()
df = DataFrame(np.random.uniform(size=(100, 4)))
df.loc[0, 0] = np.nan
ax = _check_plot_works(df.plot, kind='kde')
@slow
def test_hist_df(self):
from matplotlib.patches import Rectangle
if self.mpl_le_1_2_1:
raise nose.SkipTest("not supported in matplotlib <= 1.2.x")
df = DataFrame(randn(100, 4))
series = df[0]
ax = _check_plot_works(df.plot.hist)
expected = [com.pprint_thing(c) for c in df.columns]
self._check_legend_labels(ax, labels=expected)
axes = _check_plot_works(df.plot.hist, filterwarnings='ignore', subplots=True, logy=True)
self._check_axes_shape(axes, axes_num=4, layout=(4, 1))
self._check_ax_scales(axes, yaxis='log')
axes = series.plot.hist(rot=40)
self._check_ticks_props(axes, xrot=40, yrot=0)
tm.close()
ax = series.plot.hist(normed=True, cumulative=True, bins=4)
# height of last bin (index 5) must be 1.0
rects = [x for x in ax.get_children() if isinstance(x, Rectangle)]
self.assertAlmostEqual(rects[-1].get_height(), 1.0)
tm.close()
ax = series.plot.hist(cumulative=True, bins=4)
rects = [x for x in ax.get_children() if isinstance(x, Rectangle)]
self.assertAlmostEqual(rects[-2].get_height(), 100.0)
tm.close()
# if horizontal, yticklabels are rotated
axes = df.plot.hist(rot=50, fontsize=8, orientation='horizontal')
self._check_ticks_props(axes, xrot=0, yrot=50, ylabelsize=8)
def _check_box_coord(self, patches, expected_y=None, expected_h=None,
expected_x=None, expected_w=None):
result_y = np.array([p.get_y() for p in patches])
result_height = np.array([p.get_height() for p in patches])
result_x = np.array([p.get_x() for p in patches])
result_width = np.array([p.get_width() for p in patches])
if expected_y is not None:
self.assert_numpy_array_equal(result_y, expected_y)
if expected_h is not None:
self.assert_numpy_array_equal(result_height, expected_h)
if expected_x is not None:
self.assert_numpy_array_equal(result_x, expected_x)
if expected_w is not None:
self.assert_numpy_array_equal(result_width, expected_w)
@slow
def test_hist_df_coord(self):
normal_df = DataFrame({'A': np.repeat(np.array([1, 2, 3, 4, 5]),
np.array([10, 9, 8, 7, 6])),
'B': np.repeat(np.array([1, 2, 3, 4, 5]),
np.array([8, 8, 8, 8, 8])),
'C': np.repeat(np.array([1, 2, 3, 4, 5]),
np.array([6, 7, 8, 9, 10]))},
columns=['A', 'B', 'C'])
nan_df = DataFrame({'A': np.repeat(np.array([np.nan, 1, 2, 3, 4, 5]),
np.array([3, 10, 9, 8, 7, 6])),
'B': np.repeat(np.array([1, np.nan, 2, 3, 4, 5]),
np.array([8, 3, 8, 8, 8, 8])),
'C': np.repeat(np.array([1, 2, 3, np.nan, 4, 5]),
np.array([6, 7, 8, 3, 9, 10]))},
columns=['A', 'B', 'C'])
for df in [normal_df, nan_df]:
ax = df.plot.hist(bins=5)
self._check_box_coord(ax.patches[:5], expected_y=np.array([0, 0, 0, 0, 0]),
expected_h=np.array([10, 9, 8, 7, 6]))
self._check_box_coord(ax.patches[5:10], expected_y=np.array([0, 0, 0, 0, 0]),
expected_h=np.array([8, 8, 8, 8, 8]))
self._check_box_coord(ax.patches[10:], expected_y=np.array([0, 0, 0, 0, 0]),
expected_h=np.array([6, 7, 8, 9, 10]))
ax = df.plot.hist(bins=5, stacked=True)
self._check_box_coord(ax.patches[:5], expected_y=np.array([0, 0, 0, 0, 0]),
expected_h=np.array([10, 9, 8, 7, 6]))
self._check_box_coord(ax.patches[5:10], expected_y=np.array([10, 9, 8, 7, 6]),
expected_h=np.array([8, 8, 8, 8, 8]))
self._check_box_coord(ax.patches[10:], expected_y=np.array([18, 17, 16, 15, 14]),
expected_h=np.array([6, 7, 8, 9, 10]))
axes = df.plot.hist(bins=5, stacked=True, subplots=True)
self._check_box_coord(axes[0].patches, expected_y=np.array([0, 0, 0, 0, 0]),
expected_h=np.array([10, 9, 8, 7, 6]))
self._check_box_coord(axes[1].patches, expected_y=np.array([0, 0, 0, 0, 0]),
expected_h=np.array([8, 8, 8, 8, 8]))
self._check_box_coord(axes[2].patches, expected_y=np.array([0, 0, 0, 0, 0]),
expected_h=np.array([6, 7, 8, 9, 10]))
if self.mpl_ge_1_3_1:
# horizontal
ax = df.plot.hist(bins=5, orientation='horizontal')
self._check_box_coord(ax.patches[:5], expected_x=np.array([0, 0, 0, 0, 0]),
expected_w=np.array([10, 9, 8, 7, 6]))
self._check_box_coord(ax.patches[5:10], expected_x=np.array([0, 0, 0, 0, 0]),
expected_w=np.array([8, 8, 8, 8, 8]))
self._check_box_coord(ax.patches[10:], expected_x=np.array([0, 0, 0, 0, 0]),
expected_w=np.array([6, 7, 8, 9, 10]))
ax = df.plot.hist(bins=5, stacked=True, orientation='horizontal')
self._check_box_coord(ax.patches[:5], expected_x=np.array([0, 0, 0, 0, 0]),
expected_w=np.array([10, 9, 8, 7, 6]))
self._check_box_coord(ax.patches[5:10], expected_x=np.array([10, 9, 8, 7, 6]),
expected_w=np.array([8, 8, 8, 8, 8]))
self._check_box_coord(ax.patches[10:], expected_x=np.array([18, 17, 16, 15, 14]),
expected_w=np.array([6, 7, 8, 9, 10]))
axes = df.plot.hist(bins=5, stacked=True,
subplots=True, orientation='horizontal')
self._check_box_coord(axes[0].patches, expected_x=np.array([0, 0, 0, 0, 0]),
expected_w=np.array([10, 9, 8, 7, 6]))
self._check_box_coord(axes[1].patches, expected_x=np.array([0, 0, 0, 0, 0]),
expected_w=np.array([8, 8, 8, 8, 8]))
self._check_box_coord(axes[2].patches, expected_x=np.array([0, 0, 0, 0, 0]),
expected_w=np.array([6, 7, 8, 9, 10]))
@slow
def test_plot_int_columns(self):
df = DataFrame(randn(100, 4)).cumsum()
_check_plot_works(df.plot, legend=True)
@slow
def test_df_legend_labels(self):
kinds = ['line', 'bar', 'barh', 'kde', 'area', 'hist']
df = DataFrame(rand(3, 3), columns=['a', 'b', 'c'])
df2 = DataFrame(rand(3, 3), columns=['d', 'e', 'f'])
df3 = DataFrame(rand(3, 3), columns=['g', 'h', 'i'])
df4 = DataFrame(rand(3, 3), columns=['j', 'k', 'l'])
for kind in kinds:
if not _ok_for_gaussian_kde(kind):
continue
ax = df.plot(kind=kind, legend=True)
self._check_legend_labels(ax, labels=df.columns)
ax = df2.plot(kind=kind, legend=False, ax=ax)
self._check_legend_labels(ax, labels=df.columns)
ax = df3.plot(kind=kind, legend=True, ax=ax)
self._check_legend_labels(ax, labels=df.columns.union(df3.columns))
ax = df4.plot(kind=kind, legend='reverse', ax=ax)
expected = list(df.columns.union(df3.columns)) + list(reversed(df4.columns))
self._check_legend_labels(ax, labels=expected)
# Secondary Y
ax = df.plot(legend=True, secondary_y='b')
self._check_legend_labels(ax, labels=['a', 'b (right)', 'c'])
ax = df2.plot(legend=False, ax=ax)
self._check_legend_labels(ax, labels=['a', 'b (right)', 'c'])
ax = df3.plot(kind='bar', legend=True, secondary_y='h', ax=ax)
self._check_legend_labels(ax, labels=['a', 'b (right)', 'c', 'g', 'h (right)', 'i'])
# Time Series
ind = date_range('1/1/2014', periods=3)
df = DataFrame(randn(3, 3), columns=['a', 'b', 'c'], index=ind)
df2 = DataFrame(randn(3, 3), columns=['d', 'e', 'f'], index=ind)
df3 = DataFrame(randn(3, 3), columns=['g', 'h', 'i'], index=ind)
ax = df.plot(legend=True, secondary_y='b')
self._check_legend_labels(ax, labels=['a', 'b (right)', 'c'])
ax = df2.plot(legend=False, ax=ax)
self._check_legend_labels(ax, labels=['a', 'b (right)', 'c'])
ax = df3.plot(legend=True, ax=ax)
self._check_legend_labels(ax, labels=['a', 'b (right)', 'c', 'g', 'h', 'i'])
# scatter
ax = df.plot.scatter(x='a', y='b', label='data1')
self._check_legend_labels(ax, labels=['data1'])
ax = df2.plot.scatter(x='d', y='e', legend=False,
label='data2', ax=ax)
self._check_legend_labels(ax, labels=['data1'])
ax = df3.plot.scatter(x='g', y='h', label='data3', ax=ax)
self._check_legend_labels(ax, labels=['data1', 'data3'])
# ensure label args pass through and
# index name does not mutate
# column names don't mutate
df5 = df.set_index('a')
ax = df5.plot(y='b')
self._check_legend_labels(ax, labels=['b'])
ax = df5.plot(y='b', label='LABEL_b')
self._check_legend_labels(ax, labels=['LABEL_b'])
self._check_text_labels(ax.xaxis.get_label(), 'a')
ax = df5.plot(y='c', label='LABEL_c', ax=ax)
self._check_legend_labels(ax, labels=['LABEL_b','LABEL_c'])
self.assertTrue(df5.columns.tolist() == ['b','c'])
def test_legend_name(self):
multi = DataFrame(randn(4, 4),
columns=[np.array(['a', 'a', 'b', 'b']),
np.array(['x', 'y', 'x', 'y'])])
multi.columns.names = ['group', 'individual']
ax = multi.plot()
leg_title = ax.legend_.get_title()
self._check_text_labels(leg_title, 'group,individual')
df = DataFrame(randn(5, 5))
ax = df.plot(legend=True, ax=ax)
leg_title = ax.legend_.get_title()
self._check_text_labels(leg_title, 'group,individual')
df.columns.name = 'new'
ax = df.plot(legend=False, ax=ax)
leg_title = ax.legend_.get_title()
self._check_text_labels(leg_title, 'group,individual')
ax = df.plot(legend=True, ax=ax)
leg_title = ax.legend_.get_title()
self._check_text_labels(leg_title, 'new')
@slow
def test_no_legend(self):
kinds = ['line', 'bar', 'barh', 'kde', 'area', 'hist']
df = DataFrame(rand(3, 3), columns=['a', 'b', 'c'])
for kind in kinds:
if not _ok_for_gaussian_kde(kind):
continue
ax = df.plot(kind=kind, legend=False)
self._check_legend_labels(ax, visible=False)
@slow
def test_style_by_column(self):
import matplotlib.pyplot as plt
fig = plt.gcf()
df = DataFrame(randn(100, 3))
for markers in [{0: '^', 1: '+', 2: 'o'},
{0: '^', 1: '+'},
['^', '+', 'o'],
['^', '+']]:
fig.clf()
fig.add_subplot(111)
ax = df.plot(style=markers)
for i, l in enumerate(ax.get_lines()[:len(markers)]):
self.assertEqual(l.get_marker(), markers[i])
@slow
def test_line_label_none(self):
s = Series([1, 2])
ax = s.plot()
self.assertEqual(ax.get_legend(), None)
ax = s.plot(legend=True)
self.assertEqual(ax.get_legend().get_texts()[0].get_text(),
'None')
@slow
def test_line_colors(self):
import sys
from matplotlib import cm
custom_colors = 'rgcby'
df = DataFrame(randn(5, 5))
ax = df.plot(color=custom_colors)
self._check_colors(ax.get_lines(), linecolors=custom_colors)
tmp = sys.stderr
sys.stderr = StringIO()
try:
tm.close()
ax2 = df.plot(colors=custom_colors)
lines2 = ax2.get_lines()
for l1, l2 in zip(ax.get_lines(), lines2):
self.assertEqual(l1.get_color(), l2.get_color())
finally:
sys.stderr = tmp
tm.close()
ax = df.plot(colormap='jet')
rgba_colors = lmap(cm.jet, np.linspace(0, 1, len(df)))
self._check_colors(ax.get_lines(), linecolors=rgba_colors)
tm.close()
ax = df.plot(colormap=cm.jet)
rgba_colors = lmap(cm.jet, np.linspace(0, 1, len(df)))
self._check_colors(ax.get_lines(), linecolors=rgba_colors)
tm.close()
# make color a list if plotting one column frame
# handles cases like df.plot(color='DodgerBlue')
ax = df.ix[:, [0]].plot(color='DodgerBlue')
self._check_colors(ax.lines, linecolors=['DodgerBlue'])
ax = df.plot(color='red')
self._check_colors(ax.get_lines(), linecolors=['red'] * 5)
tm.close()
# GH 10299
custom_colors = ['#FF0000', '#0000FF', '#FFFF00', '#000000', '#FFFFFF']
ax = df.plot(color=custom_colors)
self._check_colors(ax.get_lines(), linecolors=custom_colors)
tm.close()
with tm.assertRaises(ValueError):
# Color contains shorthand hex value results in ValueError
custom_colors = ['#F00', '#00F', '#FF0', '#000', '#FFF']
# Forced show plot
_check_plot_works(df.plot, color=custom_colors)
@slow
def test_line_colors_and_styles_subplots(self):
# GH 9894
from matplotlib import cm
default_colors = self._maybe_unpack_cycler(self.plt.rcParams)
df = DataFrame(randn(5, 5))
axes = df.plot(subplots=True)
for ax, c in zip(axes, list(default_colors)):
self._check_colors(ax.get_lines(), linecolors=c)
tm.close()
# single color char
axes = df.plot(subplots=True, color='k')
for ax in axes:
self._check_colors(ax.get_lines(), linecolors=['k'])
tm.close()
# single color str
axes = df.plot(subplots=True, color='green')
for ax in axes:
self._check_colors(ax.get_lines(), linecolors=['green'])
tm.close()
custom_colors = 'rgcby'
axes = df.plot(color=custom_colors, subplots=True)
for ax, c in zip(axes, list(custom_colors)):
self._check_colors(ax.get_lines(), linecolors=[c])
tm.close()
axes = df.plot(color=list(custom_colors), subplots=True)
for ax, c in zip(axes, list(custom_colors)):
self._check_colors(ax.get_lines(), linecolors=[c])
tm.close()
# GH 10299
custom_colors = ['#FF0000', '#0000FF', '#FFFF00', '#000000', '#FFFFFF']
axes = df.plot(color=custom_colors, subplots=True)
for ax, c in zip(axes, list(custom_colors)):
self._check_colors(ax.get_lines(), linecolors=[c])
tm.close()
with tm.assertRaises(ValueError):
# Color contains shorthand hex value results in ValueError
custom_colors = ['#F00', '#00F', '#FF0', '#000', '#FFF']
# Forced show plot
_check_plot_works(df.plot, color=custom_colors, subplots=True,
filterwarnings='ignore')
rgba_colors = lmap(cm.jet, np.linspace(0, 1, len(df)))
for cmap in ['jet', cm.jet]:
axes = df.plot(colormap=cmap, subplots=True)
for ax, c in zip(axes, rgba_colors):
self._check_colors(ax.get_lines(), linecolors=[c])
tm.close()
# make color a list if plotting one column frame
# handles cases like df.plot(color='DodgerBlue')
axes = df.ix[:, [0]].plot(color='DodgerBlue', subplots=True)
self._check_colors(axes[0].lines, linecolors=['DodgerBlue'])
# single character style
axes = df.plot(style='r', subplots=True)
for ax in axes:
self._check_colors(ax.get_lines(), linecolors=['r'])
tm.close()
# list of styles
styles = list('rgcby')
axes = df.plot(style=styles, subplots=True)
for ax, c in zip(axes, styles):
self._check_colors(ax.get_lines(), linecolors=[c])
tm.close()
@slow
def test_area_colors(self):
from matplotlib import cm
from matplotlib.collections import PolyCollection
custom_colors = 'rgcby'
df = DataFrame(rand(5, 5))
ax = df.plot.area(color=custom_colors)
self._check_colors(ax.get_lines(), linecolors=custom_colors)
poly = [o for o in ax.get_children() if isinstance(o, PolyCollection)]
self._check_colors(poly, facecolors=custom_colors)
handles, labels = ax.get_legend_handles_labels()
# legend is stored as Line2D, thus check linecolors
linehandles = [x for x in handles if not isinstance(x, PolyCollection)]
self._check_colors(linehandles, linecolors=custom_colors)
for h in handles:
self.assertTrue(h.get_alpha() is None)
tm.close()
ax = df.plot.area(colormap='jet')
jet_colors = lmap(cm.jet, np.linspace(0, 1, len(df)))
self._check_colors(ax.get_lines(), linecolors=jet_colors)
poly = [o for o in ax.get_children() if isinstance(o, PolyCollection)]
self._check_colors(poly, facecolors=jet_colors)
handles, labels = ax.get_legend_handles_labels()
linehandles = [x for x in handles if not isinstance(x, PolyCollection)]
self._check_colors(linehandles, linecolors=jet_colors)
for h in handles:
self.assertTrue(h.get_alpha() is None)
tm.close()
# When stacked=False, alpha is set to 0.5
ax = df.plot.area(colormap=cm.jet, stacked=False)
self._check_colors(ax.get_lines(), linecolors=jet_colors)
poly = [o for o in ax.get_children() if isinstance(o, PolyCollection)]
jet_with_alpha = [(c[0], c[1], c[2], 0.5) for c in jet_colors]
self._check_colors(poly, facecolors=jet_with_alpha)
handles, labels = ax.get_legend_handles_labels()
# Line2D can't have alpha in its linecolor
self._check_colors(handles[:len(jet_colors)], linecolors=jet_colors)
for h in handles:
self.assertEqual(h.get_alpha(), 0.5)
@slow
def test_hist_colors(self):
default_colors = self._maybe_unpack_cycler(self.plt.rcParams)
df = DataFrame(randn(5, 5))
ax = df.plot.hist()
self._check_colors(ax.patches[::10], facecolors=default_colors[:5])
tm.close()
custom_colors = 'rgcby'
ax = df.plot.hist( color=custom_colors)
self._check_colors(ax.patches[::10], facecolors=custom_colors)
tm.close()
from matplotlib import cm
# Test str -> colormap functionality
ax = df.plot.hist( colormap='jet')
rgba_colors = lmap(cm.jet, np.linspace(0, 1, 5))
self._check_colors(ax.patches[::10], facecolors=rgba_colors)
tm.close()
# Test colormap functionality
ax = df.plot.hist( colormap=cm.jet)
rgba_colors = lmap(cm.jet, np.linspace(0, 1, 5))
self._check_colors(ax.patches[::10], facecolors=rgba_colors)
tm.close()
ax = df.ix[:, [0]].plot.hist(color='DodgerBlue')
self._check_colors([ax.patches[0]], facecolors=['DodgerBlue'])
ax = df.plot(kind='hist', color='green')
self._check_colors(ax.patches[::10], facecolors=['green'] * 5)
tm.close()
@slow
def test_kde_colors(self):
tm._skip_if_no_scipy()
_skip_if_no_scipy_gaussian_kde()
from matplotlib import cm
custom_colors = 'rgcby'
df = DataFrame(rand(5, 5))
ax = df.plot.kde(color=custom_colors)
self._check_colors(ax.get_lines(), linecolors=custom_colors)
tm.close()
ax = df.plot.kde(colormap='jet')
rgba_colors = lmap(cm.jet, np.linspace(0, 1, len(df)))
self._check_colors(ax.get_lines(), linecolors=rgba_colors)
tm.close()
ax = df.plot.kde(colormap=cm.jet)
rgba_colors = lmap(cm.jet, np.linspace(0, 1, len(df)))
self._check_colors(ax.get_lines(), linecolors=rgba_colors)
@slow
def test_kde_colors_and_styles_subplots(self):
tm._skip_if_no_scipy()
_skip_if_no_scipy_gaussian_kde()
from matplotlib import cm
default_colors = self._maybe_unpack_cycler(self.plt.rcParams)
df = DataFrame(randn(5, 5))
axes = df.plot(kind='kde', subplots=True)
for ax, c in zip(axes, list(default_colors)):
self._check_colors(ax.get_lines(), linecolors=[c])
tm.close()
# single color char
axes = df.plot(kind='kde', color='k', subplots=True)
for ax in axes:
self._check_colors(ax.get_lines(), linecolors=['k'])
tm.close()
# single color str
axes = df.plot(kind='kde', color='red', subplots=True)
for ax in axes:
self._check_colors(ax.get_lines(), linecolors=['red'])
tm.close()
custom_colors = 'rgcby'
axes = df.plot(kind='kde', color=custom_colors, subplots=True)
for ax, c in zip(axes, list(custom_colors)):
self._check_colors(ax.get_lines(), linecolors=[c])
tm.close()
rgba_colors = lmap(cm.jet, np.linspace(0, 1, len(df)))
for cmap in ['jet', cm.jet]:
axes = df.plot(kind='kde', colormap=cmap, subplots=True)
for ax, c in zip(axes, rgba_colors):
self._check_colors(ax.get_lines(), linecolors=[c])
tm.close()
# make color a list if plotting one column frame
# handles cases like df.plot(color='DodgerBlue')
axes = df.ix[:, [0]].plot(kind='kde', color='DodgerBlue', subplots=True)
self._check_colors(axes[0].lines, linecolors=['DodgerBlue'])
# single character style
axes = df.plot(kind='kde', style='r', subplots=True)
for ax in axes:
self._check_colors(ax.get_lines(), linecolors=['r'])
tm.close()
# list of styles
styles = list('rgcby')
axes = df.plot(kind='kde', style=styles, subplots=True)
for ax, c in zip(axes, styles):
self._check_colors(ax.get_lines(), linecolors=[c])
tm.close()
@slow
def test_boxplot_colors(self):
def _check_colors(bp, box_c, whiskers_c, medians_c, caps_c='k', fliers_c='b'):
self._check_colors(bp['boxes'], linecolors=[box_c] * len(bp['boxes']))
self._check_colors(bp['whiskers'], linecolors=[whiskers_c] * len(bp['whiskers']))
self._check_colors(bp['medians'], linecolors=[medians_c] * len(bp['medians']))
self._check_colors(bp['fliers'], linecolors=[fliers_c] * len(bp['fliers']))
self._check_colors(bp['caps'], linecolors=[caps_c] * len(bp['caps']))
default_colors = self._maybe_unpack_cycler(self.plt.rcParams)
df = DataFrame(randn(5, 5))
bp = df.plot.box(return_type='dict')
_check_colors(bp, default_colors[0], default_colors[0], default_colors[2])
tm.close()
dict_colors = dict(boxes='#572923', whiskers='#982042',
medians='#804823', caps='#123456')
bp = df.plot.box(color=dict_colors, sym='r+', return_type='dict')
_check_colors(bp, dict_colors['boxes'], dict_colors['whiskers'],
dict_colors['medians'], dict_colors['caps'], 'r')
tm.close()
# partial colors
dict_colors = dict(whiskers='c', medians='m')
bp = df.plot.box(color=dict_colors, return_type='dict')
_check_colors(bp, default_colors[0], 'c', 'm')
tm.close()
from matplotlib import cm
# Test str -> colormap functionality
bp = df.plot.box(colormap='jet', return_type='dict')
jet_colors = lmap(cm.jet, np.linspace(0, 1, 3))
_check_colors(bp, jet_colors[0], jet_colors[0], jet_colors[2])
tm.close()
# Test colormap functionality
bp = df.plot.box(colormap=cm.jet, return_type='dict')
_check_colors(bp, jet_colors[0], jet_colors[0], jet_colors[2])
tm.close()
# string color is applied to all artists except fliers
bp = df.plot.box(color='DodgerBlue', return_type='dict')
_check_colors(bp, 'DodgerBlue', 'DodgerBlue', 'DodgerBlue',
'DodgerBlue')
# tuple is also applied to all artists except fliers
bp = df.plot.box(color=(0, 1, 0), sym='#123456', return_type='dict')
_check_colors(bp, (0, 1, 0), (0, 1, 0), (0, 1, 0), (0, 1, 0), '#123456')
with tm.assertRaises(ValueError):
# Color contains invalid key results in ValueError
df.plot.box(color=dict(boxes='red', xxxx='blue'))
def test_default_color_cycle(self):
import matplotlib.pyplot as plt
colors = list('rgbk')
if self.mpl_ge_1_5_0:
import cycler
plt.rcParams['axes.prop_cycle'] = cycler.cycler('color', colors)
else:
plt.rcParams['axes.color_cycle'] = colors
df = DataFrame(randn(5, 3))
ax = df.plot()
expected = self._maybe_unpack_cycler(plt.rcParams)[:3]
self._check_colors(ax.get_lines(), linecolors=expected)
def test_unordered_ts(self):
df = DataFrame(np.array([3.0, 2.0, 1.0]),
index=[date(2012, 10, 1),
date(2012, 9, 1),
date(2012, 8, 1)],
columns=['test'])
ax = df.plot()
xticks = ax.lines[0].get_xdata()
self.assertTrue(xticks[0] < xticks[1])
ydata = ax.lines[0].get_ydata()
tm.assert_numpy_array_equal(ydata, np.array([1.0, 2.0, 3.0]))
def test_kind_both_ways(self):
df = DataFrame({'x': [1, 2, 3]})
for kind in plotting._common_kinds:
if not _ok_for_gaussian_kde(kind):
continue
df.plot(kind=kind)
getattr(df.plot, kind)()
for kind in ['scatter', 'hexbin']:
df.plot('x', 'x', kind=kind)
getattr(df.plot, kind)('x', 'x')
def test_all_invalid_plot_data(self):
df = DataFrame(list('abcd'))
for kind in plotting._common_kinds:
if not _ok_for_gaussian_kde(kind):
continue
with tm.assertRaises(TypeError):
df.plot(kind=kind)
@slow
def test_partially_invalid_plot_data(self):
with tm.RNGContext(42):
df = DataFrame(randn(10, 2), dtype=object)
df[np.random.rand(df.shape[0]) > 0.5] = 'a'
for kind in plotting._common_kinds:
if not _ok_for_gaussian_kde(kind):
continue
with tm.assertRaises(TypeError):
df.plot(kind=kind)
with tm.RNGContext(42):
# area plot doesn't support positive/negative mixed data
kinds = ['area']
df = DataFrame(rand(10, 2), dtype=object)
df[np.random.rand(df.shape[0]) > 0.5] = 'a'
for kind in kinds:
with tm.assertRaises(TypeError):
df.plot(kind=kind)
def test_invalid_kind(self):
df = DataFrame(randn(10, 2))
with tm.assertRaises(ValueError):
df.plot(kind='aasdf')
@slow
def test_hexbin_basic(self):
df = self.hexbin_df
ax = df.plot.hexbin(x='A', y='B', gridsize=10)
# TODO: need better way to test. This just does existence.
self.assertEqual(len(ax.collections), 1)
# GH 6951
axes = df.plot.hexbin(x='A', y='B', subplots=True)
# hexbin should have 2 axes in the figure, 1 for plotting and another is colorbar
self.assertEqual(len(axes[0].figure.axes), 2)
# return value is single axes
self._check_axes_shape(axes, axes_num=1, layout=(1, 1))
@slow
def test_hexbin_with_c(self):
df = self.hexbin_df
ax = df.plot.hexbin(x='A', y='B', C='C')
self.assertEqual(len(ax.collections), 1)
ax = df.plot.hexbin(x='A', y='B', C='C', reduce_C_function=np.std)
self.assertEqual(len(ax.collections), 1)
@slow
def test_hexbin_cmap(self):
df = self.hexbin_df
# Default to BuGn
ax = df.plot.hexbin(x='A', y='B')
self.assertEqual(ax.collections[0].cmap.name, 'BuGn')
cm = 'cubehelix'
ax = df.plot.hexbin(x='A', y='B', colormap=cm)
self.assertEqual(ax.collections[0].cmap.name, cm)
@slow
def test_no_color_bar(self):
df = self.hexbin_df
ax = df.plot.hexbin(x='A', y='B', colorbar=None)
self.assertIs(ax.collections[0].colorbar, None)
@slow
def test_allow_cmap(self):
df = self.hexbin_df
ax = df.plot.hexbin(x='A', y='B', cmap='YlGn')
self.assertEqual(ax.collections[0].cmap.name, 'YlGn')
with tm.assertRaises(TypeError):
df.plot.hexbin(x='A', y='B', cmap='YlGn',
colormap='BuGn')
@slow
def test_pie_df(self):
df = DataFrame(np.random.rand(5, 3), columns=['X', 'Y', 'Z'],
index=['a', 'b', 'c', 'd', 'e'])
with tm.assertRaises(ValueError):
df.plot.pie()
ax = _check_plot_works(df.plot.pie, y='Y')
self._check_text_labels(ax.texts, df.index)
ax = _check_plot_works(df.plot.pie, y=2)
self._check_text_labels(ax.texts, df.index)
axes = _check_plot_works(df.plot.pie, filterwarnings='ignore', subplots=True)
self.assertEqual(len(axes), len(df.columns))
for ax in axes:
self._check_text_labels(ax.texts, df.index)
for ax, ylabel in zip(axes, df.columns):
self.assertEqual(ax.get_ylabel(), ylabel)
labels = ['A', 'B', 'C', 'D', 'E']
color_args = ['r', 'g', 'b', 'c', 'm']
axes = _check_plot_works(df.plot.pie, filterwarnings='ignore', subplots=True,
labels=labels, colors=color_args)
self.assertEqual(len(axes), len(df.columns))
for ax in axes:
self._check_text_labels(ax.texts, labels)
self._check_colors(ax.patches, facecolors=color_args)
def test_pie_df_nan(self):
df = DataFrame(np.random.rand(4, 4))
for i in range(4):
df.iloc[i, i] = np.nan
fig, axes = self.plt.subplots(ncols=4)
df.plot.pie(subplots=True, ax=axes, legend=True)
base_expected = ['0', '1', '2', '3']
for i, ax in enumerate(axes):
expected = list(base_expected) # force copy
expected[i] = ''
result = [x.get_text() for x in ax.texts]
self.assertEqual(result, expected)
# legend labels
# NaN's not included in legend with subplots
# see https://github.com/pydata/pandas/issues/8390
self.assertEqual([x.get_text() for x in
ax.get_legend().get_texts()],
base_expected[:i] + base_expected[i+1:])
@slow
def test_errorbar_plot(self):
d = {'x': np.arange(12), 'y': np.arange(12, 0, -1)}
df = DataFrame(d)
d_err = {'x': np.ones(12)*0.2, 'y': np.ones(12)*0.4}
df_err = DataFrame(d_err)
# check line plots
ax = _check_plot_works(df.plot, yerr=df_err, logy=True)
self._check_has_errorbars(ax, xerr=0, yerr=2)
ax = _check_plot_works(df.plot, yerr=df_err, logx=True, logy=True)
self._check_has_errorbars(ax, xerr=0, yerr=2)
ax = _check_plot_works(df.plot, yerr=df_err, loglog=True)
self._check_has_errorbars(ax, xerr=0, yerr=2)
kinds = ['line', 'bar', 'barh']
for kind in kinds:
ax = _check_plot_works(df.plot, yerr=df_err['x'], kind=kind)
self._check_has_errorbars(ax, xerr=0, yerr=2)
ax = _check_plot_works(df.plot, yerr=d_err, kind=kind)
self._check_has_errorbars(ax, xerr=0, yerr=2)
ax = _check_plot_works(df.plot, yerr=df_err, xerr=df_err, kind=kind)
self._check_has_errorbars(ax, xerr=2, yerr=2)
ax = _check_plot_works(df.plot, yerr=df_err['x'], xerr=df_err['x'], kind=kind)
self._check_has_errorbars(ax, xerr=2, yerr=2)
ax = _check_plot_works(df.plot, xerr=0.2, yerr=0.2, kind=kind)
self._check_has_errorbars(ax, xerr=2, yerr=2)
axes = _check_plot_works(df.plot, filterwarnings='ignore', yerr=df_err,
xerr=df_err, subplots=True, kind=kind)
self._check_has_errorbars(axes, xerr=1, yerr=1)
ax = _check_plot_works((df+1).plot, yerr=df_err, xerr=df_err, kind='bar', log=True)
self._check_has_errorbars(ax, xerr=2, yerr=2)
# yerr is raw error values
ax = _check_plot_works(df['y'].plot, yerr=np.ones(12)*0.4)
self._check_has_errorbars(ax, xerr=0, yerr=1)
ax = _check_plot_works(df.plot, yerr=np.ones((2, 12))*0.4)
self._check_has_errorbars(ax, xerr=0, yerr=2)
# yerr is iterator
import itertools
ax = _check_plot_works(df.plot, yerr=itertools.repeat(0.1, len(df)))
self._check_has_errorbars(ax, xerr=0, yerr=2)
# yerr is column name
for yerr in ['yerr', u('誤差')]:
s_df = df.copy()
s_df[yerr] = np.ones(12)*0.2
ax = _check_plot_works(s_df.plot, yerr=yerr)
self._check_has_errorbars(ax, xerr=0, yerr=2)
ax = _check_plot_works(s_df.plot, y='y', x='x', yerr=yerr)
self._check_has_errorbars(ax, xerr=0, yerr=1)
with tm.assertRaises(ValueError):
df.plot(yerr=np.random.randn(11))
df_err = DataFrame({'x': ['zzz']*12, 'y': ['zzz']*12})
with tm.assertRaises((ValueError, TypeError)):
df.plot(yerr=df_err)
@slow
def test_errorbar_with_integer_column_names(self):
# test with integer column names
df = DataFrame(np.random.randn(10, 2))
df_err = DataFrame(np.random.randn(10, 2))
ax = _check_plot_works(df.plot, yerr=df_err)
self._check_has_errorbars(ax, xerr=0, yerr=2)
ax = _check_plot_works(df.plot, y=0, yerr=1)
self._check_has_errorbars(ax, xerr=0, yerr=1)
@slow
def test_errorbar_with_partial_columns(self):
df = DataFrame(np.random.randn(10, 3))
df_err = DataFrame(np.random.randn(10, 2), columns=[0, 2])
kinds = ['line', 'bar']
for kind in kinds:
ax = _check_plot_works(df.plot, yerr=df_err, kind=kind)
self._check_has_errorbars(ax, xerr=0, yerr=2)
ix = date_range('1/1/2000', periods=10, freq='M')
df.set_index(ix, inplace=True)
df_err.set_index(ix, inplace=True)
ax = _check_plot_works(df.plot, yerr=df_err, kind='line')
self._check_has_errorbars(ax, xerr=0, yerr=2)
d = {'x': np.arange(12), 'y': np.arange(12, 0, -1)}
df = DataFrame(d)
d_err = {'x': np.ones(12)*0.2, 'z': np.ones(12)*0.4}
df_err = DataFrame(d_err)
for err in [d_err, df_err]:
ax = _check_plot_works(df.plot, yerr=err)
self._check_has_errorbars(ax, xerr=0, yerr=1)
@slow
def test_errorbar_timeseries(self):
d = {'x': np.arange(12), 'y': np.arange(12, 0, -1)}
d_err = {'x': np.ones(12)*0.2, 'y': np.ones(12)*0.4}
# check time-series plots
ix = date_range('1/1/2000', '1/1/2001', freq='M')
tdf = DataFrame(d, index=ix)
tdf_err = DataFrame(d_err, index=ix)
kinds = ['line', 'bar', 'barh']
for kind in kinds:
ax = _check_plot_works(tdf.plot, yerr=tdf_err, kind=kind)
self._check_has_errorbars(ax, xerr=0, yerr=2)
ax = _check_plot_works(tdf.plot, yerr=d_err, kind=kind)
self._check_has_errorbars(ax, xerr=0, yerr=2)
ax = _check_plot_works(tdf.plot, y='y', yerr=tdf_err['x'], kind=kind)
self._check_has_errorbars(ax, xerr=0, yerr=1)
ax = _check_plot_works(tdf.plot, y='y', yerr='x', kind=kind)
self._check_has_errorbars(ax, xerr=0, yerr=1)
ax = _check_plot_works(tdf.plot, yerr=tdf_err, kind=kind)
self._check_has_errorbars(ax, xerr=0, yerr=2)
axes = _check_plot_works(tdf.plot, filterwarnings='ignore', kind=kind,
yerr=tdf_err, subplots=True)
self._check_has_errorbars(axes, xerr=0, yerr=1)
def test_errorbar_asymmetrical(self):
np.random.seed(0)
err = np.random.rand(3, 2, 5)
data = np.random.randn(5, 3)
df = DataFrame(data)
ax = df.plot(yerr=err, xerr=err/2)
self.assertEqual(ax.lines[7].get_ydata()[0], data[0,1]-err[1,0,0])
self.assertEqual(ax.lines[8].get_ydata()[0], data[0,1]+err[1,1,0])
self.assertEqual(ax.lines[5].get_xdata()[0], -err[1,0,0]/2)
self.assertEqual(ax.lines[6].get_xdata()[0], err[1,1,0]/2)
with tm.assertRaises(ValueError):
df.plot(yerr=err.T)
tm.close()
def test_table(self):
df = DataFrame(np.random.rand(10, 3),
index=list(string.ascii_letters[:10]))
_check_plot_works(df.plot, table=True)
_check_plot_works(df.plot, table=df)
ax = df.plot()
self.assertTrue(len(ax.tables) == 0)
plotting.table(ax, df.T)
self.assertTrue(len(ax.tables) == 1)
def test_errorbar_scatter(self):
df = DataFrame(np.random.randn(5, 2), index=range(5), columns=['x', 'y'])
df_err = DataFrame(np.random.randn(5, 2) / 5,
index=range(5), columns=['x', 'y'])
ax = _check_plot_works(df.plot.scatter, x='x', y='y')
self._check_has_errorbars(ax, xerr=0, yerr=0)
ax = _check_plot_works(df.plot.scatter, x='x', y='y', xerr=df_err)
self._check_has_errorbars(ax, xerr=1, yerr=0)
ax = _check_plot_works(df.plot.scatter, x='x', y='y', yerr=df_err)
self._check_has_errorbars(ax, xerr=0, yerr=1)
ax = _check_plot_works(df.plot.scatter, x='x', y='y',
xerr=df_err, yerr=df_err)
self._check_has_errorbars(ax, xerr=1, yerr=1)
def _check_errorbar_color(containers, expected, has_err='has_xerr'):
errs = [c.lines[1][0] for c in ax.containers if getattr(c, has_err, False)]
self._check_colors(errs, linecolors=[expected] * len(errs))
# GH 8081
df = DataFrame(np.random.randn(10, 5), columns=['a', 'b', 'c', 'd', 'e'])
ax = df.plot.scatter(x='a', y='b', xerr='d', yerr='e', c='red')
self._check_has_errorbars(ax, xerr=1, yerr=1)
_check_errorbar_color(ax.containers, 'red', has_err='has_xerr')
_check_errorbar_color(ax.containers, 'red', has_err='has_yerr')
ax = df.plot.scatter(x='a', y='b', yerr='e', color='green')
self._check_has_errorbars(ax, xerr=0, yerr=1)
_check_errorbar_color(ax.containers, 'green', has_err='has_yerr')
@slow
def test_sharex_and_ax(self):
# https://github.com/pydata/pandas/issues/9737
# using gridspec, the axis in fig.get_axis() are sorted differently than pandas expected
# them, so make sure that only the right ones are removed
import matplotlib.pyplot as plt
plt.close('all')
gs, axes = _generate_4_axes_via_gridspec()
df = DataFrame({"a": [1, 2, 3, 4, 5, 6],
"b": [1, 2, 3, 4, 5, 6],
"c": [1, 2, 3, 4, 5, 6],
"d": [1, 2, 3, 4, 5, 6]})
def _check(axes):
for ax in axes:
self.assertEqual(len(ax.lines), 1)
self._check_visible(ax.get_yticklabels(), visible=True)
for ax in [axes[0], axes[2]]:
self._check_visible(ax.get_xticklabels(), visible=False)
self._check_visible(ax.get_xticklabels(minor=True), visible=False)
for ax in [axes[1], axes[3]]:
self._check_visible(ax.get_xticklabels(), visible=True)
self._check_visible(ax.get_xticklabels(minor=True), visible=True)
for ax in axes:
df.plot(x="a", y="b", title="title", ax=ax, sharex=True)
gs.tight_layout(plt.gcf())
_check(axes)
tm.close()
gs, axes = _generate_4_axes_via_gridspec()
with tm.assert_produces_warning(UserWarning):
axes = df.plot(subplots=True, ax=axes, sharex=True)
_check(axes)
tm.close()
gs, axes = _generate_4_axes_via_gridspec()
# without sharex, no labels should be touched!
for ax in axes:
df.plot(x="a", y="b", title="title", ax=ax)
gs.tight_layout(plt.gcf())
for ax in axes:
self.assertEqual(len(ax.lines), 1)
self._check_visible(ax.get_yticklabels(), visible=True)
self._check_visible(ax.get_xticklabels(), visible=True)
self._check_visible(ax.get_xticklabels(minor=True), visible=True)
tm.close()
@slow
def test_sharey_and_ax(self):
# https://github.com/pydata/pandas/issues/9737
# using gridspec, the axis in fig.get_axis() are sorted differently than pandas expected
# them, so make sure that only the right ones are removed
import matplotlib.pyplot as plt
gs, axes = _generate_4_axes_via_gridspec()
df = DataFrame({"a": [1, 2, 3, 4, 5, 6],
"b": [1, 2, 3, 4, 5, 6],
"c": [1, 2, 3, 4, 5, 6],
"d": [1, 2, 3, 4, 5, 6]})
def _check(axes):
for ax in axes:
self.assertEqual(len(ax.lines), 1)
self._check_visible(ax.get_xticklabels(), visible=True)
self._check_visible(ax.get_xticklabels(minor=True), visible=True)
for ax in [axes[0], axes[1]]:
self._check_visible(ax.get_yticklabels(), visible=True)
for ax in [axes[2], axes[3]]:
self._check_visible(ax.get_yticklabels(), visible=False)
for ax in axes:
df.plot(x="a", y="b", title="title", ax=ax, sharey=True)
gs.tight_layout(plt.gcf())
_check(axes)
tm.close()
gs, axes = _generate_4_axes_via_gridspec()
with tm.assert_produces_warning(UserWarning):
axes = df.plot(subplots=True, ax=axes, sharey=True)
gs.tight_layout(plt.gcf())
_check(axes)
tm.close()
gs, axes = _generate_4_axes_via_gridspec()
# without sharex, no labels should be touched!
for ax in axes:
df.plot(x="a", y="b", title="title", ax=ax)
gs.tight_layout(plt.gcf())
for ax in axes:
self.assertEqual(len(ax.lines), 1)
self._check_visible(ax.get_yticklabels(), visible=True)
self._check_visible(ax.get_xticklabels(), visible=True)
self._check_visible(ax.get_xticklabels(minor=True), visible=True)
def test_memory_leak(self):
""" Check that every plot type gets properly collected. """
import weakref
import gc
results = {}
for kind in plotting._plot_klass.keys():
if not _ok_for_gaussian_kde(kind):
continue
args = {}
if kind in ['hexbin', 'scatter', 'pie']:
df = self.hexbin_df
args = {'x': 'A', 'y': 'B'}
elif kind == 'area':
df = self.tdf.abs()
else:
df = self.tdf
# Use a weakref so we can see if the object gets collected without
# also preventing it from being collected
results[kind] = weakref.proxy(df.plot(kind=kind, **args))
# have matplotlib delete all the figures
tm.close()
# force a garbage collection
gc.collect()
for key in results:
# check that every plot was collected
with tm.assertRaises(ReferenceError):
# need to actually access something to get an error
results[key].lines
@slow
def test_df_subplots_patterns_minorticks(self):
# GH 10657
import matplotlib.pyplot as plt
df = DataFrame(np.random.randn(10, 2),
index=date_range('1/1/2000', periods=10),
columns=list('AB'))
# shared subplots
fig, axes = plt.subplots(2, 1, sharex=True)
axes = df.plot(subplots=True, ax=axes)
for ax in axes:
self.assertEqual(len(ax.lines), 1)
self._check_visible(ax.get_yticklabels(), visible=True)
# xaxis of 1st ax must be hidden
self._check_visible(axes[0].get_xticklabels(), visible=False)
self._check_visible(axes[0].get_xticklabels(minor=True), visible=False)
self._check_visible(axes[1].get_xticklabels(), visible=True)
self._check_visible(axes[1].get_xticklabels(minor=True), visible=True)
tm.close()
fig, axes = plt.subplots(2, 1)
with tm.assert_produces_warning(UserWarning):
axes = df.plot(subplots=True, ax=axes, sharex=True)
for ax in axes:
self.assertEqual(len(ax.lines), 1)
self._check_visible(ax.get_yticklabels(), visible=True)
# xaxis of 1st ax must be hidden
self._check_visible(axes[0].get_xticklabels(), visible=False)
self._check_visible(axes[0].get_xticklabels(minor=True), visible=False)
self._check_visible(axes[1].get_xticklabels(), visible=True)
self._check_visible(axes[1].get_xticklabels(minor=True), visible=True)
tm.close()
# not shared
fig, axes = plt.subplots(2, 1)
axes = df.plot(subplots=True, ax=axes)
for ax in axes:
self.assertEqual(len(ax.lines), 1)
self._check_visible(ax.get_yticklabels(), visible=True)
self._check_visible(ax.get_xticklabels(), visible=True)
self._check_visible(ax.get_xticklabels(minor=True), visible=True)
tm.close()
@slow
def test_df_gridspec_patterns(self):
# GH 10819
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
ts = Series(np.random.randn(10),
index=date_range('1/1/2000', periods=10))
df = DataFrame(np.random.randn(10, 2), index=ts.index,
columns=list('AB'))
def _get_vertical_grid():
gs = gridspec.GridSpec(3, 1)
fig = plt.figure()
ax1 = fig.add_subplot(gs[:2, :])
ax2 = fig.add_subplot(gs[2, :])
return ax1, ax2
def _get_horizontal_grid():
gs = gridspec.GridSpec(1, 3)
fig = plt.figure()
ax1 = fig.add_subplot(gs[:, :2])
ax2 = fig.add_subplot(gs[:, 2])
return ax1, ax2
for ax1, ax2 in [_get_vertical_grid(), _get_horizontal_grid()]:
ax1 = ts.plot(ax=ax1)
self.assertEqual(len(ax1.lines), 1)
ax2 = df.plot(ax=ax2)
self.assertEqual(len(ax2.lines), 2)
for ax in [ax1, ax2]:
self._check_visible(ax.get_yticklabels(), visible=True)
self._check_visible(ax.get_xticklabels(), visible=True)
self._check_visible(ax.get_xticklabels(minor=True), visible=True)
tm.close()
# subplots=True
for ax1, ax2 in [_get_vertical_grid(), _get_horizontal_grid()]:
axes = df.plot(subplots=True, ax=[ax1, ax2])
self.assertEqual(len(ax1.lines), 1)
self.assertEqual(len(ax2.lines), 1)
for ax in axes:
self._check_visible(ax.get_yticklabels(), visible=True)
self._check_visible(ax.get_xticklabels(), visible=True)
self._check_visible(ax.get_xticklabels(minor=True), visible=True)
tm.close()
# vertical / subplots / sharex=True / sharey=True
ax1, ax2 = _get_vertical_grid()
with tm.assert_produces_warning(UserWarning):
axes = df.plot(subplots=True, ax=[ax1, ax2],
sharex=True, sharey=True)
self.assertEqual(len(axes[0].lines), 1)
self.assertEqual(len(axes[1].lines), 1)
for ax in [ax1, ax2]:
# yaxis are visible because there is only one column
self._check_visible(ax.get_yticklabels(), visible=True)
# xaxis of axes0 (top) are hidden
self._check_visible(axes[0].get_xticklabels(), visible=False)
self._check_visible(axes[0].get_xticklabels(minor=True), visible=False)
self._check_visible(axes[1].get_xticklabels(), visible=True)
self._check_visible(axes[1].get_xticklabels(minor=True), visible=True)
tm.close()
# horizontal / subplots / sharex=True / sharey=True
ax1, ax2 = _get_horizontal_grid()
with tm.assert_produces_warning(UserWarning):
axes = df.plot(subplots=True, ax=[ax1, ax2],
sharex=True, sharey=True)
self.assertEqual(len(axes[0].lines), 1)
self.assertEqual(len(axes[1].lines), 1)
self._check_visible(axes[0].get_yticklabels(), visible=True)
# yaxis of axes1 (right) are hidden
self._check_visible(axes[1].get_yticklabels(), visible=False)
for ax in [ax1, ax2]:
# xaxis are visible because there is only one column
self._check_visible(ax.get_xticklabels(), visible=True)
self._check_visible(ax.get_xticklabels(minor=True), visible=True)
tm.close()
# boxed
def _get_boxed_grid():
gs = gridspec.GridSpec(3,3)
fig = plt.figure()
ax1 = fig.add_subplot(gs[:2, :2])
ax2 = fig.add_subplot(gs[:2, 2])
ax3 = fig.add_subplot(gs[2, :2])
ax4 = fig.add_subplot(gs[2, 2])
return ax1, ax2, ax3, ax4
axes = _get_boxed_grid()
df = DataFrame(np.random.randn(10, 4),
index=ts.index, columns=list('ABCD'))
axes = df.plot(subplots=True, ax=axes)
for ax in axes:
self.assertEqual(len(ax.lines), 1)
# axis are visible because these are not shared
self._check_visible(ax.get_yticklabels(), visible=True)
self._check_visible(ax.get_xticklabels(), visible=True)
self._check_visible(ax.get_xticklabels(minor=True), visible=True)
tm.close()
# subplots / sharex=True / sharey=True
axes = _get_boxed_grid()
with tm.assert_produces_warning(UserWarning):
axes = df.plot(subplots=True, ax=axes, sharex=True, sharey=True)
for ax in axes:
self.assertEqual(len(ax.lines), 1)
for ax in [axes[0], axes[2]]: # left column
self._check_visible(ax.get_yticklabels(), visible=True)
for ax in [axes[1], axes[3]]: # right column
self._check_visible(ax.get_yticklabels(), visible=False)
for ax in [axes[0], axes[1]]: # top row
self._check_visible(ax.get_xticklabels(), visible=False)
self._check_visible(ax.get_xticklabels(minor=True), visible=False)
for ax in [axes[2], axes[3]]: # bottom row
self._check_visible(ax.get_xticklabels(), visible=True)
self._check_visible(ax.get_xticklabels(minor=True), visible=True)
tm.close()
@slow
def test_df_grid_settings(self):
# Make sure plot defaults to rcParams['axes.grid'] setting, GH 9792
self._check_grid_settings(DataFrame({'a':[1,2,3],'b':[2,3,4]}),
plotting._dataframe_kinds, kws={'x':'a','y':'b'})
def test_option_mpl_style(self):
set_option('display.mpl_style', 'default')
set_option('display.mpl_style', None)
set_option('display.mpl_style', False)
with tm.assertRaises(ValueError):
set_option('display.mpl_style', 'default2')
def test_invalid_colormap(self):
df = DataFrame(randn(3, 2), columns=['A', 'B'])
with tm.assertRaises(ValueError):
df.plot(colormap='invalid_colormap')
def test_plain_axes(self):
# supplied ax itself is a SubplotAxes, but figure contains also
# a plain Axes object (GH11556)
fig, ax = self.plt.subplots()
fig.add_axes([0.2, 0.2, 0.2, 0.2])
Series(rand(10)).plot(ax=ax)
# suppliad ax itself is a plain Axes, but because the cmap keyword
# a new ax is created for the colorbar -> also multiples axes (GH11520)
df = DataFrame({'a': randn(8), 'b': randn(8)})
fig = self.plt.figure()
ax = fig.add_axes((0,0,1,1))
df.plot(kind='scatter', ax=ax, x='a', y='b', c='a', cmap='hsv')
# other examples
fig, ax = self.plt.subplots()
from mpl_toolkits.axes_grid1 import make_axes_locatable
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
Series(rand(10)).plot(ax=ax)
Series(rand(10)).plot(ax=cax)
fig, ax = self.plt.subplots()
from mpl_toolkits.axes_grid.inset_locator import inset_axes
iax = inset_axes(ax, width="30%", height=1., loc=3)
Series(rand(10)).plot(ax=ax)
Series(rand(10)).plot(ax=iax)
@tm.mplskip
class TestDataFrameGroupByPlots(TestPlotBase):
def test_series_groupby_plotting_nominally_works(self):
n = 10
weight = Series(np.random.normal(166, 20, size=n))
height = Series(np.random.normal(60, 10, size=n))
with tm.RNGContext(42):
gender = tm.choice(['male', 'female'], size=n)
weight.groupby(gender).plot()
tm.close()
height.groupby(gender).hist()
tm.close()
#Regression test for GH8733
height.groupby(gender).plot(alpha=0.5)
tm.close()
def test_plotting_with_float_index_works(self):
# GH 7025
df = DataFrame({'def': [1,1,1,2,2,2,3,3,3],
'val': np.random.randn(9)},
index=[1.0,2.0,3.0,1.0,2.0,3.0,1.0,2.0,3.0])
df.groupby('def')['val'].plot()
tm.close()
df.groupby('def')['val'].apply(lambda x: x.plot())
tm.close()
def test_hist_single_row(self):
# GH10214
bins = np.arange(80, 100 + 2, 1)
df = DataFrame({"Name": ["AAA", "BBB"], "ByCol": [1, 2], "Mark": [85, 89]})
df["Mark"].hist(by=df["ByCol"], bins=bins)
df = DataFrame({"Name": ["AAA"], "ByCol": [1], "Mark": [85]})
df["Mark"].hist(by=df["ByCol"], bins=bins)
def test_plot_submethod_works(self):
df = DataFrame({'x': [1, 2, 3, 4, 5],
'y': [1, 2, 3, 2, 1],
'z': list('ababa')})
df.groupby('z').plot.scatter('x', 'y')
tm.close()
df.groupby('z')['x'].plot.line()
tm.close()
def assert_is_valid_plot_return_object(objs):
import matplotlib.pyplot as plt
if isinstance(objs, np.ndarray):
for el in objs.flat:
assert isinstance(el, plt.Axes), ('one of \'objs\' is not a '
'matplotlib Axes instance, '
'type encountered {0!r}'
''.format(el.__class__.__name__))
else:
assert isinstance(objs, (plt.Artist, tuple, dict)), \
('objs is neither an ndarray of Artist instances nor a '
'single Artist instance, tuple, or dict, "objs" is a {0!r} '
''.format(objs.__class__.__name__))
def _check_plot_works(f, filterwarnings='always', **kwargs):
import matplotlib.pyplot as plt
ret = None
with warnings.catch_warnings():
warnings.simplefilter(filterwarnings)
try:
try:
fig = kwargs['figure']
except KeyError:
fig = plt.gcf()
plt.clf()
ax = kwargs.get('ax', fig.add_subplot(211))
ret = f(**kwargs)
assert_is_valid_plot_return_object(ret)
try:
kwargs['ax'] = fig.add_subplot(212)
ret = f(**kwargs)
except Exception:
pass
else:
assert_is_valid_plot_return_object(ret)
with ensure_clean(return_filelike=True) as path:
plt.savefig(path)
finally:
tm.close(fig)
return ret
def _generate_4_axes_via_gridspec():
import matplotlib.pyplot as plt
import matplotlib as mpl
import matplotlib.gridspec
gs = mpl.gridspec.GridSpec(2, 2)
ax_tl = plt.subplot(gs[0,0])
ax_ll = plt.subplot(gs[1,0])
ax_tr = plt.subplot(gs[0,1])
ax_lr = plt.subplot(gs[1,1])
return gs, [ax_tl, ax_ll, ax_tr, ax_lr]
def curpath():
pth, _ = os.path.split(os.path.abspath(__file__))
return pth
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
| 39.636435 | 106 | 0.573975 |
acf6f68cf2f3b7c298a26fd3925db7bb9ec73a6d | 8,425 | py | Python | docs/conf.py | mcgibbon/pintarray | 444171cbdc5c352d1ec159ce6333d2bc36b42c61 | [
"MIT"
] | null | null | null | docs/conf.py | mcgibbon/pintarray | 444171cbdc5c352d1ec159ce6333d2bc36b42c61 | [
"MIT"
] | null | null | null | docs/conf.py | mcgibbon/pintarray | 444171cbdc5c352d1ec159ce6333d2bc36b42c61 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# pintarray documentation build configuration file, created by
# sphinx-quickstart on Tue Jul 9 22:26:36 2013.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory is
# relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# Get the project root dir, which is the parent dir of this
cwd = os.getcwd()
project_root = os.path.dirname(cwd)
# Insert the project root dir as the first element in the PYTHONPATH.
# This lets us ensure that the source package is imported, and that its
# version is used.
sys.path.insert(0, project_root)
import pintarray
# -- General configuration ---------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'PintArray'
copyright = u"2017, Jeremy McGibbon"
# The version info for the project you're documenting, acts as replacement
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
version = pintarray.__version__
# The full version, including alpha/beta/rc tags.
release = pintarray.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to
# some non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built
# documents.
#keep_warnings = False
# -- Options for HTML output -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as
# html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the
# top of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon
# of the docs. This file should be a Windows icon file (.ico) being
# 16x16 or 32x32 pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets)
# here, relative to this directory. They are copied after the builtin
# static files, so a file named "default.css" will overwrite the builtin
# "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names
# to template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer.
# Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer.
# Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages
# will contain a <link> tag referring to it. The value of this option
# must be the base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'pintarraydoc'
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index', 'pintarray.tex',
u'PintArray Documentation',
u'Jeremy McGibbon', 'manual'),
]
# The name of an image file (relative to this directory) to place at
# the top of the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings
# are parts, not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'pintarray',
u'PintArray Documentation',
[u'Jeremy McGibbon'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'pintarray',
u'PintArray Documentation',
u'Jeremy McGibbon',
'pintarray',
'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| 30.525362 | 76 | 0.71632 |
acf6f6ff0fb9872ce98b87e64e54afd161d1d3de | 168 | py | Python | tottle/http/session/__init__.py | muffleo/tottle | 69a5bdda879ab56d43505d517d3369a687c135a2 | [
"MIT"
] | 12 | 2020-09-06T15:31:34.000Z | 2021-02-27T20:30:34.000Z | tottle/http/session/__init__.py | cyanlabs-org/tottle | 6cf02022ed7b445c9b5af475c6e854b91780d792 | [
"MIT"
] | 2 | 2021-04-13T06:43:42.000Z | 2021-07-07T20:52:39.000Z | tottle/http/session/__init__.py | cyanlabs-org/tottle | 6cf02022ed7b445c9b5af475c6e854b91780d792 | [
"MIT"
] | 4 | 2020-09-12T03:09:25.000Z | 2021-03-22T08:52:04.000Z | from .abc import ABCSessionManager
# from .manager import SessionManager
from .many_session import ManySessionManager
from .single_session import SingleSessionManager
| 28 | 48 | 0.863095 |
acf6f76a2c4881a02657da7095bf8789199953a6 | 56,387 | py | Python | keylime/tpm2.py | cjustacoder/keylime | 351fa1d0f7b87da57ae2522974bb737356ed6352 | [
"BSD-2-Clause"
] | null | null | null | keylime/tpm2.py | cjustacoder/keylime | 351fa1d0f7b87da57ae2522974bb737356ed6352 | [
"BSD-2-Clause"
] | null | null | null | keylime/tpm2.py | cjustacoder/keylime | 351fa1d0f7b87da57ae2522974bb737356ed6352 | [
"BSD-2-Clause"
] | null | null | null | '''DISTRIBUTION STATEMENT A. Approved for public release: distribution unlimited.
This material is based upon work supported by the Assistant Secretary of Defense for
Research and Engineering under Air Force Contract No. FA8721-05-C-0002 and/or
FA8702-15-D-0001. Any opinions, findings, conclusions or recommendations expressed in this
material are those of the author(s) and do not necessarily reflect the views of the
Assistant Secretary of Defense for Research and Engineering.
Copyright 2015 Massachusetts Institute of Technology.
The software/firmware is provided to you on an As-Is basis
Delivered to the US Government with Unlimited Rights, as defined in DFARS Part
252.227-7013 or 7014 (Feb 2014). Notwithstanding any copyright notice, U.S. Government
rights in this work are defined by DFARS 252.227-7013 or DFARS 252.227-7014 as detailed
above. Use of this work other than as specifically authorized by the U.S. Government may
violate any copyrights that exist in this work.
'''
import base64
import binascii
import configparser
import distutils.spawn
import hashlib
import os
import re
import sys
import tempfile
import threading
import time
import zlib
import yaml
from distutils.version import LooseVersion, StrictVersion
try:
from yaml import CSafeLoader as SafeLoader, CSafeDumper as SafeDumper
except ImportError:
from yaml import SafeLoader as SafeLoader, SafeDumper as SafeDumper
try:
import simplejson as json
except ImportError:
raise("Simplejson is mandatory, please install")
import M2Crypto
from M2Crypto import m2
from keylime import cmd_exec
from keylime import common
from keylime import keylime_logging
from keylime import secure_mount
from keylime import tpm_abstract
from keylime import tpm_ek_ca
logger = keylime_logging.init_logging('tpm2')
# Read the config file
config = configparser.RawConfigParser()
config.read(common.CONFIG_FILE)
class tpm2(tpm_abstract.AbstractTPM):
def __init__(self, need_hw_tpm=False):
tpm_abstract.AbstractTPM.__init__(self, need_hw_tpm)
# Shared lock to serialize access to tools
self.tpmutilLock = threading.Lock()
self.__get_tpm2_tools()
# We don't know which algs the TPM supports yet
self.supported['encrypt'] = set()
self.supported['hash'] = set()
self.supported['sign'] = set()
# Grab which default algs the config requested
defaultHash = config.get('cloud_agent', "tpm_hash_alg")
defaultEncrypt = config.get('cloud_agent', "tpm_encryption_alg")
defaultSign = config.get('cloud_agent', "tpm_signing_alg")
if self.need_hw_tpm:
# Start up the TPM
self.__startup_tpm()
# Figure out which algorithms the TPM supports
self.__get_tpm_algorithms()
# Ensure TPM supports the defaults requested
if defaultHash not in self.supported['hash']:
raise Exception('Unsupported hash algorithm specified: %s!'%(defaultHash))
if defaultEncrypt not in self.supported['encrypt']:
raise Exception('Unsupported encryption algorithm specified: %s!'%(defaultEncrypt))
if defaultSign not in self.supported['sign']:
raise Exception('Unsupported signing algorithm specified: %s!'%(defaultSign))
else:
# Assume their defaults are sane?
pass
self.defaults['hash'] = defaultHash
self.defaults['encrypt'] = defaultEncrypt
self.defaults['sign'] = defaultSign
def get_tpm_version(self):
return 2
def __get_tpm2_tools(self):
global tools_version
retDict = self.__run("tpm2_startup --version")
code = retDict['code']
output = ''.join(common.list_convert(retDict['retout']))
errout = ''.join(common.list_convert(retDict['reterr']))
if code != tpm_abstract.AbstractTPM.EXIT_SUCESS:
raise Exception("Error establishing tpm2-tools version using TPM2_Startup: %s"+str(code)+": "+str(errout))
# Extract the `version="x.x.x"` from tools
version_str = re.search(r'version="([^"]+)"', output).group(1)
# Extract the full semver release number.
tools_version = version_str.split("-")
if StrictVersion(tools_version[0]) >= StrictVersion("4.0.0"):
logger.info(f"TPM2-TOOLS Version: {tools_version[0]}")
tools_version = "4.0"
elif StrictVersion(tools_version[0]) >= StrictVersion("3.2.0"):
logger.info(f"TPM2-TOOLS Version: {tools_version[0]}")
tools_version = "3.2"
else:
logger.error(f"TPM2-TOOLS Version {tools_version[0]} is not supported.")
exit()
def __get_tpm_algorithms(self):
vendorStr = None
if tools_version == "3.2":
retDict = self.__run("tpm2_getcap -c algorithms")
elif tools_version == "4.0":
retDict = self.__run("tpm2_getcap algorithms")
output = common.list_convert(retDict['retout'])
errout = common.list_convert(retDict['reterr'])
code = retDict['code']
if code != tpm_abstract.AbstractTPM.EXIT_SUCESS:
raise Exception("get_tpm_algorithms failed with code "+str(code)+": "+str(errout))
if tools_version == "3.2":
# output, human-readable -> json
output = "".join(output)
output = re.sub(r'TPMA_ALGORITHM for ALG_ID: 0x[0-9a-f]+\s+-\s+([a-z0-9_]+)', r'\1:', output)
output = output.replace("set", "1")
output = output.replace("clear", "0")
output = [output]
retyaml = common.yaml_to_dict(output)
for algorithm,details in retyaml.items():
if details["asymmetric"] == 1 and details["object"] == 1 and tpm_abstract.Encrypt_Algorithms.is_recognized(algorithm):
self.supported['encrypt'].add(algorithm)
elif details["hash"] == 1 and tpm_abstract.Hash_Algorithms.is_recognized(algorithm):
self.supported['hash'].add(algorithm)
elif details["asymmetric"] == 1 and details["signing"] == 1 and tpm_abstract.Sign_Algorithms.is_recognized(algorithm):
self.supported['sign'].add(algorithm)
#tpm_exec
@staticmethod
def __fingerprint(cmd):
# Creates a unique-enough ID from the given command
fprt = cmd.split()[0]
if fprt == 'tpm2_nvread':
if '0x1c00002' in cmd: # read_ekcert_nvram
fprt += '-ekcert'
else: # read_key_nvram
fprt += '-key'
elif fprt == "tpm2_getcap":
if 'handles-persistent' in cmd:
fprt += '-handles'
elif 'properties-fixed' in cmd:
fprt += '-props'
else:
# other commands are already unique
pass
return fprt
def __run(self, cmd, expectedcode=tpm_abstract.AbstractTPM.EXIT_SUCESS, raiseOnError=True, lock=True, outputpaths=None):
env = os.environ.copy()
lib_path = ""
if 'LD_LIBRARY_PATH' in env:
lib_path = env['LD_LIBRARY_PATH']
if 'TPM2TOOLS_TCTI' not in env:
# Don't clobber existing setting (if present)
env['TPM2TOOLS_TCTI'] = 'tabrmd:bus_name=com.intel.tss2.Tabrmd'
# Other (not recommended) options are direct emulator and chardev communications:
#env['TPM2TOOLS_TCTI'] = 'mssim:port=2321'
#env['TPM2TOOLS_TCTI'] = 'device:/dev/tpm0'
env['PATH'] = env['PATH']+":%s"%common.TPM_TOOLS_PATH
env['LD_LIBRARY_PATH'] = lib_path+":%s"%common.TPM_LIBS_PATH
# Convert single outputpath to list
if isinstance(outputpaths, str):
outputpaths = [outputpaths]
# Handle stubbing the TPM out
fprt = tpm2.__fingerprint(cmd)
if common.STUB_TPM and common.TPM_CANNED_VALUES is not None:
# Use canned values for stubbing
jsonIn = common.TPM_CANNED_VALUES
if fprt in jsonIn:
# The value we're looking for has been canned!
thisTiming = jsonIn[fprt]['timing']
thisRetout = jsonIn[fprt]['retout']
thisCode = jsonIn[fprt]['code']
thisFileout = jsonIn[fprt]['fileout']
fileoutEncoded = {}
# Decode files that are supplied (and requested)
if outputpaths is not None and len(outputpaths) > 0:
if len(thisFileout) == 1 and len(outputpaths) == 1:
#fileoutEncoded[outputpaths[0]] = base64.b64decode(next(iter(thisFileout.values()))).decode("zlib")
fileoutEncoded[outputpaths[0]] = zlib.decompress(base64.b64decode(next(iter(thisFileout.values()))))
elif fprt == "tpm2_deluxequote":
# quotes need 3 outputs, so we need a consistent way to match them back up when reading
quote_msg = ""
match = re.search("-m ([^\s]+)", cmd)
if match:
quote_msg = match.group(1)
if "file://quoteMessage" in thisFileout:
#fileoutEncoded[quote_msg] = base64.b64decode(thisFileout["file://quoteMessage"]).decode("zlib")
fileoutEncoded[quote_msg] = zlib.decompress(base64.b64decode(thisFileout["file://quoteMessage"]))
quote_sig = ""
match = re.search("-s ([^\s]+)", cmd)
if match:
quote_sig = match.group(1)
if "file://quoteSignature" in thisFileout:
#fileoutEncoded[quote_sig] = base64.b64decode(thisFileout["file://quoteSignature"]).decode("zlib")
fileoutEncoded[quote_sig] = zlib.decompress(base64.b64decode(thisFileout["file://quoteSignature"]))
quote_pcr = ""
match = re.search("-p ([^\s]+)", cmd)
if match:
quote_pcr = match.group(1)
if "file://quotePCR" in thisFileout:
#fileoutEncoded[quote_pcr] = base64.b64decode(thisFileout["file://quotePCR"]).decode("zlib")
fileoutEncoded[quote_pcr] = zlib.decompress(base64.b64decode(thisFileout["file://quotePCR"]))
else:
raise Exception("Command %s is using multiple files unexpectedly!"%(fprt))
logger.debug("TPM call '%s' was stubbed out, with a simulated delay of %f sec"%(fprt, thisTiming))
time.sleep(thisTiming)
# Package for return
returnDict = {
'retout': thisRetout,
'reterr': [],
'code': thisCode,
'fileouts': fileoutEncoded,
'timing': thisTiming,
}
return returnDict
elif not lock:
# non-lock calls don't go to the TPM (just let it pass through)
pass
else:
# Our command hasn't been canned!
raise Exception("Command %s not found in canned YAML!"%(fprt))
numtries = 0
while True:
if lock:
with self.tpmutilLock:
retDict = cmd_exec.run(cmd=cmd, expectedcode=expectedcode, raiseOnError=False, lock=lock, outputpaths=outputpaths, env=env)
else:
retDict = cmd_exec.run(cmd=cmd, expectedcode=expectedcode, raiseOnError=False, lock=lock, outputpaths=outputpaths, env=env)
t0 = retDict['timing']['t0']
t1 = retDict['timing']['t1']
code = retDict['code']
retout = retDict['retout']
reterr = retDict['reterr']
fileouts = retDict['fileouts']
# keep trying to get quote if a PCR race condition occurred in deluxe quote
if fprt == "tpm2_quote" and "Error validating calculated PCR composite with quote" in reterr:
numtries += 1
maxr = self.config.getint('cloud_agent', 'max_retries')
if numtries >= maxr:
logger.error("Agent did not return proper quote due to PCR race condition.")
break
retry = self.config.getfloat('cloud_agent', 'retry_interval')
logger.info("Failed to get quote %d/%d times, trying again in %f seconds..."%(numtries, maxr, retry))
time.sleep(retry)
continue
else:
break
# Don't bother continuing if TPM call failed and we're raising on error
if code != expectedcode and raiseOnError:
raise Exception("Command: %s returned %d, expected %d, output %s, stderr %s"%(cmd, code, expectedcode, retout, reterr))
# Metric output
if lock or self.tpmutilLock.locked():
pad = ""
if len(fprt) < 8:
pad += "\t"
if len(fprt) < 16:
pad += "\t"
if len(fprt) < 24:
pad += "\t"
filelen = 0
if fileouts is not None:
filelen = len(fileouts)
# Print out benchmarking information for TPM (if requested)
#print "\033[95mTIMING: %s%s\t:%f\toutlines:%d\tfilelines:%d\t%s\033[0m" % (fprt, pad, t1-t0, len(retout), filelen, cmd)
if common.TPM_BENCHMARK_PATH is not None:
with open(common.TPM_BENCHMARK_PATH, "ab") as f:
f.write("TIMING: %s%s\t:%f\toutlines:%d\tfilelines:%d\t%s\n" % (fprt, pad, t1-t0, len(retout), filelen, cmd))
# Print out YAML canned values (if requested)
# NOTE: resulting file will be missing the surrounding braces! (must add '{' and '}' for reading)
if common.TPM_CANNED_VALUES_PATH is not None:
with open(common.TPM_CANNED_VALUES_PATH, "ab") as can:
fileoutEncoded = {}
# Process files
if outputpaths is not None and len(outputpaths) > 0:
if len(fileouts) == 1 and len(outputpaths) == 1:
#fileoutEncoded[outputpaths[0]] = base64.b64encode(iter(fileouts.values()).next().encode("zlib"))
fileoutEncoded[outputpaths[0]] = zlib.compress(base64.b64decode(iter(fileouts.values()).next()))
elif fprt == "tpm2_deluxequote":
# quotes need 3 outputs, so we need a consistent way to match them back up when reading
quote_msg = ""
match = re.search("-m ([^\s]+)", cmd)
if match:
quote_msg = match.group(1)
if quote_msg in fileouts:
# fileoutEncoded["file://quoteMessage"] = base64.b64encode(fileouts[quote_msg].encode("zlib"))
fileoutEncoded["file://quoteMessage"] = zlib.compress(base64.b64decode(fileouts[quote_msg]))
quote_sig = ""
match = re.search("-s ([^\s]+)", cmd)
if match:
quote_sig = match.group(1)
if quote_sig in fileouts:
# fileoutEncoded["file://quoteSignature"] = base64.b64encode(fileouts[quote_sig].encode("zlib"))
fileoutEncoded["file://quoteSignature"] = zlib.compress(base64.b64decode(fileouts[quote_sig]))
quote_pcr = ""
match = re.search("-p ([^\s]+)", cmd)
if match:
quote_pcr = match.group(1)
if quote_pcr in fileouts:
# fileoutEncoded["file://quotePCR"] = base64.b64encode(fileouts[quote_pcr].encode("zlib"))
fileoutEncoded["file://quotePCR"] = zlib.compress(base64.b64decode(fileouts[quote_pcr]))
else:
raise Exception("Command %s is using multiple files unexpectedly!"%(fprt))
# tpm_cexec will need to know the nonce
nonce = ""
match = re.search("-q ([\w]+)", cmd)
if match:
nonce = binascii.a2b_hex(match.group(1))
jsonObj = {'type':fprt, 'retout':retout, 'fileout':fileoutEncoded, 'cmd':cmd, 'timing':t1-t0, 'code':code, 'nonce':nonce}
can.write("\"%s\": %s,\n"%(fprt, json.dumps(jsonObj, indent=4, sort_keys=True)))
return retDict
#tpm_initialize
def __startup_tpm(self):
retDict = self.__run("tpm2_startup -c")
output = common.list_convert(retDict['retout'])
errout = common.list_convert(retDict['reterr'])
code = retDict['code']
if code != tpm_abstract.AbstractTPM.EXIT_SUCESS:
raise Exception("Error initializing emulated TPM with TPM2_Startup: %s"+str(code)+": "+str(errout))
def __create_ek(self, asym_alg=None):
# this function is intended to be idempotent
if asym_alg is None:
asym_alg = self.defaults['encrypt']
current_handle = self.get_tpm_metadata("ek_handle")
owner_pw = self.get_tpm_metadata("owner_pw")
# clear out old handle before starting again (give idempotence)
if current_handle is not None and owner_pw is not None:
logger.info("Flushing old ek handle: %s"%hex(current_handle))
if tools_version == "3.2":
retDict = self.__run("tpm2_getcap -c handles-persistent", raiseOnError=False)
elif tools_version == "4.0":
retDict = self.__run("tpm2_getcap handles-persistent", raiseOnError=False)
output = retDict['retout']
reterr = retDict['reterr']
code = retDict['code']
if code != tpm_abstract.AbstractTPM.EXIT_SUCESS:
raise Exception("tpm2_getcap failed with code "+str(code)+": "+str(reterr))
outjson = common.yaml_to_dict(output)
if outjson is not None and current_handle in outjson:
if tools_version == "3.2":
retDict = self.__run("tpm2_evictcontrol -A o -c %s -P %s"%(hex(current_handle), owner_pw), raiseOnError=False)
else:
retDict = self.__run("tpm2_evictcontrol -C o -c %s -P %s"%(hex(current_handle), owner_pw), raiseOnError=False)
output = retDict['retout']
reterr = retDict['reterr']
code = retDict['code']
if code != tpm_abstract.AbstractTPM.EXIT_SUCESS:
logger.info("Failed to flush old ek handle: %s. Code %s"%(hex(current_handle), str(code)+": "+str(reterr)))
self._set_tpm_metadata('ek_handle', None)
self._set_tpm_metadata('ek_pw', None)
# make sure an ownership pw is set
if owner_pw is None:
owner_pw = tpm_abstract.TPM_Utilities.random_password(20)
self._set_tpm_metadata('owner_pw', owner_pw)
ek_pw = tpm_abstract.TPM_Utilities.random_password(20)
# create a new ek
with tempfile.NamedTemporaryFile() as tmppath:
cmdargs = {
'asymalg': asym_alg,
'ekpubfile': tmppath.name,
'ekpw': ek_pw,
'opw': owner_pw,
'epw': owner_pw
}
if tools_version == "3.2":
command = "tpm2_getpubek -H 0x81010007 -g {asymalg} -f {ekpubfile} -P {ekpw} -o {opw} -e {epw}".format(**cmdargs)
elif tools_version == "4.0":
command = "tpm2_createek -c - -G {asymalg} -u {ekpubfile} -p {ekpw} -w {opw} -P {epw}".format(**cmdargs)
retDict = self.__run(command, raiseOnError=False, outputpaths=tmppath.name)
output = retDict['retout']
reterr = retDict['reterr']
code = retDict['code']
ek_tpm = retDict['fileouts'][tmppath.name]
if code != tpm_abstract.AbstractTPM.EXIT_SUCESS:
raise Exception("createek failed with code "+str(code)+": "+str(reterr))
if tools_version == "3.2":
handle = int(0x81010007)
else:
handle = None
retyaml = common.yaml_to_dict(output)
if "persistent-handle" in retyaml:
handle = retyaml["persistent-handle"]
self._set_tpm_metadata('ek_handle', handle)
self._set_tpm_metadata('ek_pw', ek_pw)
self._set_tpm_metadata('ek_tpm', base64.b64encode(ek_tpm))
return
def __take_ownership(self, config_pw):
# if no ownerpassword
if config_pw == 'generate':
logger.info("Generating random TPM owner password")
owner_pw = tpm_abstract.TPM_Utilities.random_password(20)
else:
logger.info("Taking ownership with config provided TPM owner password: %s"%config_pw)
owner_pw = config_pw
if tools_version == "3.2":
retDict = self.__run("tpm2_takeownership -c", raiseOnError=False)
retDict = self.__run("tpm2_takeownership -o %s -e %s"%(owner_pw, owner_pw), raiseOnError=False)
elif tools_version == "4.0":
retDict = self.__run("tpm2_changeauth -c o %s"%(owner_pw), raiseOnError=False)
retDict = self.__run("tpm2_changeauth -c e %s"%(owner_pw), raiseOnError=False)
output = retDict['retout']
code = retDict['code']
if code != tpm_abstract.AbstractTPM.EXIT_SUCESS:
# if we fail, see if already owned with this pw
if tools_version == "3.2":
retDict = self.__run("tpm2_takeownership -o %s -e %s -O %s -E %s"%(owner_pw, owner_pw, owner_pw, owner_pw), raiseOnError=False)
elif tools_version == "4.0":
retDict = self.__run("tpm2_changeauth -c o -p %s %s"%(owner_pw, owner_pw), raiseOnError=False)
retDict = self.__run("tpm2_changeauth -c e -p %s %s"%(owner_pw, owner_pw), raiseOnError=False)
output = retDict['retout']
reterr = retDict['reterr']
code = retDict['code']
if code != tpm_abstract.AbstractTPM.EXIT_SUCESS:
# ut-oh, already owned but not with provided pw!
raise Exception("Owner password unknown, TPM reset required. Code %s"+str(code)+": "+str(reterr))
self._set_tpm_metadata('owner_pw', owner_pw)
logger.info("TPM Owner password confirmed: %s"%owner_pw)
def __get_pub_ek(self): # assumes that owner_pw is correct at this point
handle = self.get_tpm_metadata('ek_handle')
if handle is None:
raise Exception("create_ek has not been run yet?")
#make a temp file for the output
with tempfile.NamedTemporaryFile() as tmppath:
# generates pubek.pem
if tools_version == "3.2":
retDict = self.__run("tpm2_readpublic -H %s -o %s -f pem"%(hex(handle), tmppath.name), raiseOnError=False, outputpaths=tmppath.name)
else:
retDict = self.__run("tpm2_readpublic -c %s -o %s -f pem"%(hex(handle), tmppath.name), raiseOnError=False, outputpaths=tmppath.name)
output = retDict['retout']
reterr = retDict['reterr']
code = retDict['code']
ek = retDict['fileouts'][tmppath.name]
if code != tpm_abstract.AbstractTPM.EXIT_SUCESS:
raise Exception("tpm2_readpublic failed with code "+str(code)+": "+str(reterr))
self._set_tpm_metadata('ek', ek)
def __get_pub_aik(self):
"""Retrieves the PEM version of the public AIK.
Helper function for '__create_aik', required for legacy (v3) of
tpm2-tools since tpm2_getpubak does not support outputting public AIK
in the required PEM format. Note that 'aik_handle' metadata must
have been set before running this function. Function sets the
'aik' metadata.
"""
if not tools_version == "3.2":
logger.error("The get_pub_aik method does not apply to modern tpm2-tools!")
return
handle = self.get_tpm_metadata('aik_handle')
if handle is None:
raise Exception("tpm2_getpubak has not been run yet?")
#make a temp file for the output
with tempfile.NamedTemporaryFile() as akpubfile:
# generates pubak.pem
retDict = self.__run("tpm2_readpublic -H %s -o %s -f pem"%(hex(handle), akpubfile.name), raiseOnError=False, outputpaths=akpubfile.name)
output = retDict['retout']
reterr = retDict['reterr']
code = retDict['code']
pem = retDict['fileouts'][akpubfile.name]
if code != tpm_abstract.AbstractTPM.EXIT_SUCESS:
raise Exception("tpm2_readpublic failed with code "+str(code)+": "+str(reterr))
if pem == "":
raise Exception("unable to read public aik from create identity. Is your tpm2-tools installation up to date?")
self._set_tpm_metadata('aik', pem)
def __create_aik(self, activate, asym_alg=None, hash_alg=None, sign_alg=None):
if hash_alg is None:
hash_alg = self.defaults['hash']
if asym_alg is None:
asym_alg = self.defaults['encrypt']
if sign_alg is None:
sign_alg = self.defaults['sign']
owner_pw = self.get_tpm_metadata('owner_pw')
# clear out old handle before starting again (give idempotence)
if self.get_tpm_metadata('aik') is not None and self.get_tpm_metadata('aik_name') is not None:
aik_handle = self.get_tpm_metadata('aik_handle')
if tools_version == "3.2":
logger.info("Flushing old ak handle: %s"%hex(aik_handle))
retDict = self.__run("tpm2_getcap -c handles-persistent", raiseOnError=False)
elif tools_version == "4.0":
logger.info("Flushing old ak handle: %s"%aik_handle)
retDict = self.__run("tpm2_getcap handles-persistent", raiseOnError=False)
output = common.list_convert(retDict['retout'])
errout = common.list_convert(retDict['reterr'])
code = retDict['code']
if code != tpm_abstract.AbstractTPM.EXIT_SUCESS:
raise Exception("tpm2_getcap failed with code "+str(code)+": "+str(errout))
if tools_version == "3.2":
# output, human-readable -> json
output = "".join(output)
output = output.replace("0x", " - 0x")
output = [output]
outjson = common.yaml_to_dict(output)
if outjson is not None and aik_handle in outjson:
if tools_version == "3.2":
retDict = self.__run("tpm2_evictcontrol -A o -c %s -P %s"%(hex(aik_handle), owner_pw), raiseOnError=False)
else:
retDict = self.__run("tpm2_evictcontrol -C o -c %s -P %s"%(aik_handle, owner_pw), raiseOnError=False)
output = retDict['retout']
reterr = retDict['reterr']
code = retDict['code']
if code != tpm_abstract.AbstractTPM.EXIT_SUCESS:
if tools_version == "3.2":
logger.info("Failed to flush old ak handle: %s. Code %s"%(hex(aik_handle), str(code)+": "+str(reterr)))
elif tools_version == "4.0":
logger.info("Failed to flush old ak handle: %s. Code %s"%(aik_handle, str(code)+": "+str(reterr)))
self._set_tpm_metadata('aik', None)
self._set_tpm_metadata('aik_name', None)
self._set_tpm_metadata('aik_pw', None)
self._set_tpm_metadata('aik_handle', None)
logger.debug("Creating a new AIK identity")
# We need an ek handle to make an aik
ek_handle = self.get_tpm_metadata("ek_handle")
if ek_handle is None:
raise Exception("Failed to create AIK, since EK has not yet been created!")
aik_pw = tpm_abstract.TPM_Utilities.random_password(20)
#make a temp file for the output
with tempfile.NamedTemporaryFile() as akpubfile:
secpath = ""
if tools_version == "4.0":
# ok lets write out the key now
secdir = secure_mount.mount() # confirm that storage is still securely mounted
secfd, secpath = tempfile.mkstemp(dir=secdir)
cmdargs = {
'ekhandle': hex(ek_handle),
'aksession': secpath,
'akpubfile': akpubfile.name,
'asymalg': asym_alg,
'hashalg': hash_alg,
'signalg': sign_alg,
'epw': owner_pw,
'opw': owner_pw,
'apw': aik_pw
}
if tools_version == "3.2":
command = "tpm2_getpubak -E {ekhandle} -k 0x81010008 -g {asymalg} -D {hashalg} -s {signalg} -f {akpubfile} -e {epw} -P {apw} -o {opw}".format(**cmdargs)
elif tools_version == "4.0":
command = "tpm2_createak -C {ekhandle} -c {aksession} -G {asymalg} -g {hashalg} -s {signalg} -u {akpubfile} -f pem -p {apw} -P {epw}".format(**cmdargs)
retDict = self.__run(command, outputpaths=akpubfile.name)
retout = retDict['retout']
reterr = retDict['reterr']
code = retDict['code']
if code != tpm_abstract.AbstractTPM.EXIT_SUCESS:
raise Exception("tpm2_createak failed with code "+str(code)+": "+str(reterr))
jsonout = common.yaml_to_dict(retout)
akname = jsonout['loaded-key']['name']
if tools_version == "3.2":
if 'loaded-key' not in jsonout or 'name' not in jsonout['loaded-key']:
raise Exception("tpm2_createak failed to create aik: return "+str(reterr))
handle = int(0x81010008)
# get and persist the pem (not returned by tpm2_getpubak)
self._set_tpm_metadata('aik_handle', handle)
self.__get_pub_aik()
else:
if 'loaded-key' not in jsonout:
raise Exception("tpm2_createak failed to create aik: return "+str(reterr))
handle = secpath
pem = retDict['fileouts'][akpubfile.name]
if pem == "":
raise Exception("unable to read public aik from create identity. Is your tpm2-tools installation up to date?")
# persist the pem
self._set_tpm_metadata('aik_handle', handle)
self._set_tpm_metadata('aik', pem)
# persist common results
self._set_tpm_metadata('aik_name', akname)
self._set_tpm_metadata('aik_pw', aik_pw)
def flush_keys(self):
logger.debug("Flushing keys from TPM...")
if tools_version == "3.2":
retDict = self.__run("tpm2_getcap -c handles-persistent")
elif tools_version == "4.0":
retDict = self.__run("tpm2_getcap handles-persistent")
# retout = retDict['retout']
retout = common.list_convert(retDict['retout'])
errout = common.list_convert(retDict['reterr'])
code = retDict['code']
if code != tpm_abstract.AbstractTPM.EXIT_SUCESS:
logger.debug("tpm2_getcap failed with code "+str(code)+": "+str(errout))
if tools_version == "3.2":
# output, human-readable -> json
retout = "".join(retout)
retout = retout.replace("0x", " - 0x")
retout = [retout]
owner_pw = self.get_tpm_metadata("owner_pw")
jsonout = common.yaml_to_dict(retout)
for key in jsonout:
logger.debug("Flushing key handle %s"%hex(key))
if tools_version == "3.2":
self.__run("tpm2_evictcontrol -A o -c %s -P %s"%(hex(key), owner_pw), raiseOnError=False)
else:
self.__run("tpm2_evictcontrol -C o -c %s -P %s"%(hex(key), owner_pw), raiseOnError=False)
def encryptAIK(self, uuid, pubaik, pubek, ek_tpm, aik_name):
pubaikFile = None
pubekFile = None
challengeFile = None
keyblob = None
blobpath = None
if ek_tpm is None or aik_name is None:
logger.error("Missing parameters for encryptAIK")
return None
try:
# write out the public EK
efd, etemp = tempfile.mkstemp()
pubekFile = open(etemp, "wb")
pubekFile.write(base64.b64decode(ek_tpm))
pubekFile.close()
os.close(efd)
# write out the challenge
challenge = tpm_abstract.TPM_Utilities.random_password(32)
challenge = challenge.encode()
keyfd, keypath = tempfile.mkstemp()
challengeFile = open(keypath, "wb")
challengeFile.write(challenge)
challengeFile.close()
os.close(keyfd)
# create temp file for the blob
blobfd, blobpath = tempfile.mkstemp()
cmdargs = {
'akname': aik_name,
'ekpub': pubekFile.name,
'blobout': blobpath,
'challenge': challengeFile.name
}
if tools_version == "3.2":
command = "tpm2_makecredential -T none -e {ekpub} -s {challenge} -n {akname} -o {blobout}".format(**cmdargs)
else:
command = "tpm2_makecredential -T none -e {ekpub} -s {challenge} -n {akname} -o {blobout}".format(**cmdargs)
self.__run(command, lock=False)
logger.info("Encrypting AIK for UUID %s"%uuid)
# read in the blob
f = open(blobpath, "rb")
keyblob = base64.b64encode(f.read())
f.close()
os.close(blobfd)
# read in the aes key
key = base64.b64encode(challenge)
except Exception as e:
logger.error("Error encrypting AIK: "+str(e))
logger.exception(e)
return None
finally:
if pubekFile is not None:
os.remove(pubekFile.name)
if challengeFile is not None:
os.remove(challengeFile.name)
if blobpath is not None:
os.remove(blobpath)
return (keyblob, key)
def activate_identity(self, keyblob):
owner_pw = self.get_tpm_metadata('owner_pw')
aik_keyhandle = self.get_tpm_metadata('aik_handle')
ek_keyhandle = self.get_tpm_metadata('ek_handle')
keyblobFile = None
secpath = None
sesspath = None
try:
# write out key blob
kfd, ktemp = tempfile.mkstemp()
keyblobFile = open(ktemp, "wb")
# the below is a coroutine?
keyblobFile.write(base64.b64decode(keyblob))
keyblobFile.close()
os.close(kfd)
# ok lets write out the key now
secdir = secure_mount.mount() # confirm that storage is still securely mounted
secfd, secpath = tempfile.mkstemp(dir=secdir)
sessfd, sesspath = tempfile.mkstemp(dir=secdir)
if tools_version == "3.2":
cmdargs = {
'akhandle': hex(aik_keyhandle),
'ekhandle': hex(ek_keyhandle),
'keyblobfile': keyblobFile.name,
'credfile': secpath,
'apw': self.get_tpm_metadata('aik_pw'),
'epw': owner_pw
}
command = "tpm2_activatecredential -H {akhandle} -k {ekhandle} -f {keyblobfile} -o {credfile} -P {apw} -e {epw}".format(**cmdargs)
retDict = self.__run(command, outputpaths=secpath)
else:
cmdargs = {
'akhandle': aik_keyhandle,
'ekhandle': hex(ek_keyhandle),
'keyblobfile': keyblobFile.name,
'sessfile': sesspath,
'credfile': secpath,
'apw': self.get_tpm_metadata('aik_pw'),
'epw': owner_pw
}
self.__run("tpm2_startauthsession --policy-session -S {sessfile}".format(**cmdargs))
self.__run("tpm2_policysecret -S {sessfile} -c 0x4000000B {epw}".format(**cmdargs))
command = "tpm2_activatecredential -c {akhandle} -C {ekhandle} -i {keyblobfile} -o {credfile} -p {apw} -P \"session:{sessfile}\"".format(**cmdargs)
retDict = self.__run(command, outputpaths=secpath)
self.__run("tpm2_flushcontext {sessfile}".format(**cmdargs))
retout = retDict['retout']
code = retDict['code']
fileout = retDict['fileouts'][secpath]
logger.info("AIK activated.")
key = base64.b64encode(fileout)
os.close(secfd)
os.remove(secpath)
except Exception as e:
logger.error("Error decrypting AIK: "+str(e))
logger.exception(e)
return False
finally:
if keyblobFile is not None:
os.remove(keyblobFile.name)
if secpath is not None and os.path.exists(secpath):
os.remove(secpath)
if sesspath is not None and os.path.exists(sesspath):
os.remove(sesspath)
return key
def verify_ek(self, ekcert, ekpem):
"""Verify that the provided EK certificate is signed by a trusted root
:param ekcert: The Endorsement Key certificate in DER format
:param ekpem: the endorsement public key in PEM format
:returns: True if the certificate can be verified, false otherwise
"""
#openssl x509 -inform der -in certificate.cer -out certificate.pem
try:
ek509 = M2Crypto.X509.load_cert_der_string(ekcert)
ekcertpem = ek509.get_pubkey().get_rsa().as_pem(cipher=None)
# Make sure given ekcert is for their ek
if str(ekpem) != str(ekcertpem):
logger.error("Public EK does not match EK certificate")
return False
for signer in tpm_ek_ca.trusted_certs:
signcert = M2Crypto.X509.load_cert_string(tpm_ek_ca.trusted_certs[signer])
signkey = signcert.get_pubkey()
if ek509.verify(signkey) == 1:
logger.debug("EK cert matched signer %s"%signer)
return True
except Exception as e:
# Log the exception so we don't lose the raw message
logger.exception(e)
raise Exception("Error processing ek/ekcert. Does this TPM have a valid EK?").with_traceback(sys.exc_info()[2])
logger.error("No Root CA matched EK Certificate")
return False
def get_tpm_manufacturer(self):
vendorStr = None
if tools_version == "3.2":
retDict = self.__run("tpm2_getcap -c properties-fixed")
elif tools_version == "4.0":
retDict = self.__run("tpm2_getcap properties-fixed")
output = retDict['retout']
reterr = retDict['reterr']
code = retDict['code']
if code != tpm_abstract.AbstractTPM.EXIT_SUCESS:
raise Exception("get_tpm_manufacturer failed with code "+str(code)+": "+str(reterr))
retyaml = common.yaml_to_dict(output)
if "TPM2_PT_VENDOR_STRING_1" in retyaml:
vendorStr = retyaml["TPM2_PT_VENDOR_STRING_1"]["value"]
elif "TPM_PT_VENDOR_STRING_1" in retyaml:
vendorStr = retyaml["TPM_PT_VENDOR_STRING_1"]["as string"].strip()
return vendorStr
def is_emulator(self):
return self.get_tpm_manufacturer() == 'SW'
def is_vtpm(self):
return False
def tpm_init(self, self_activate=False, config_pw=None):
# this was called tpm_initialize.init before
self.warn_emulator()
self.__take_ownership(config_pw)
self.__create_ek()
self.__get_pub_ek()
ekcert = self.read_ekcert_nvram()
self._set_tpm_metadata('ekcert', ekcert)
# if no AIK created, then create one
self.__create_aik(self_activate)
return self.get_tpm_metadata('ek'), self.get_tpm_metadata('ekcert'), self.get_tpm_metadata('aik'), self.get_tpm_metadata('ek_tpm'), self.get_tpm_metadata('aik_name')
#tpm_quote
def __pcr_mask_to_list(self, mask, hash_alg):
pcr_list = []
ima_appended = ""
for pcr in range(24):
if tpm_abstract.TPM_Utilities.check_mask(mask, pcr):
if hash_alg != tpm_abstract.Hash_Algorithms.SHA1 and pcr == common.IMA_PCR:
# IMA is only in SHA1 format
ima_appended = "+sha1:"+str(pcr)
else:
pcr_list.append(str(pcr))
return ",".join(pcr_list)+ima_appended
def create_deep_quote(self, nonce, data=None, vpcrmask=tpm_abstract.AbstractTPM.EMPTYMASK, pcrmask=tpm_abstract.AbstractTPM.EMPTYMASK):
raise Exception("vTPM support and deep quotes not yet implemented with TPM 2.0!")
def create_quote(self, nonce, data=None, pcrmask=tpm_abstract.AbstractTPM.EMPTYMASK, hash_alg=None):
if hash_alg is None:
hash_alg = self.defaults['hash']
quote = ""
with tempfile.NamedTemporaryFile() as quotepath:
with tempfile.NamedTemporaryFile() as sigpath:
with tempfile.NamedTemporaryFile() as pcrpath:
keyhandle = self.get_tpm_metadata('aik_handle')
aik_pw = self.get_tpm_metadata('aik_pw')
if pcrmask is None:
pcrmask = tpm_abstract.AbstractTPM.EMPTYMASK
pcrlist = self.__pcr_mask_to_list(pcrmask, hash_alg)
with self.tpmutilLock:
if data is not None:
self.__run("tpm2_pcrreset %d"%common.TPM_DATA_PCR, lock=False)
self.extendPCR(pcrval=common.TPM_DATA_PCR, hashval=self.hashdigest(data), lock=False)
if tools_version == "3.2":
cmdargs = {
'aik_handle': hex(keyhandle),
'hashalg' : hash_alg,
'pcrlist': pcrlist,
'nonce': bytes(nonce, encoding="utf8").hex(),
'outquote': quotepath.name,
'outsig': sigpath.name,
'outpcr': pcrpath.name,
'akpw': aik_pw
}
command = "tpm2_quote -k {aik_handle} -L {hashalg}:{pcrlist} -q {nonce} -m {outquote} -s {outsig} -p {outpcr} -G {hashalg} -P {akpw}".format(**cmdargs)
else:
cmdargs = {
'aik_handle': keyhandle,
'hashalg' : hash_alg,
'pcrlist': pcrlist,
'nonce': bytes(nonce, encoding="utf8").hex(),
'outquote': quotepath.name,
'outsig': sigpath.name,
'outpcr': pcrpath.name,
'akpw': aik_pw
}
command = "tpm2_quote -c {aik_handle} -l {hashalg}:{pcrlist} -q {nonce} -m {outquote} -s {outsig} -o {outpcr} -g {hashalg} -p {akpw}".format(**cmdargs)
retDict = self.__run(command, lock=False, outputpaths=[quotepath.name, sigpath.name, pcrpath.name])
retout = retDict['retout']
code = retDict['code']
quoteraw = retDict['fileouts'][quotepath.name]
quote_b64encode = base64.b64encode(zlib.compress(quoteraw))
sigraw = retDict['fileouts'][sigpath.name]
sigraw_b64encode = base64.b64encode(zlib.compress(sigraw))
pcrraw = retDict['fileouts'][pcrpath.name]
pcrraw_b64encode = base64.b64encode(zlib.compress(pcrraw))
quote = quote_b64encode.decode('utf-8')+":"+sigraw_b64encode.decode('utf-8')+":"+pcrraw_b64encode.decode('utf-8')
return 'r'+quote
def __checkdeepquote_c(self, hAIK, vAIK, deepquoteFile, nonce):
raise Exception("vTPM support and deep quotes not yet implemented with TPM 2.0!")
def check_deep_quote(self, nonce, data, quote, vAIK, hAIK, vtpm_policy={}, tpm_policy={}, ima_measurement_list=None, ima_whitelist={}):
raise Exception("vTPM support and deep quotes not yet implemented with TPM 2.0!")
def __check_quote_c(self, pubaik, nonce, quoteFile, sigFile, pcrFile, hash_alg):
if common.STUB_TPM and common.TPM_CANNED_VALUES is not None:
jsonIn = common.TPM_CANNED_VALUES
if 'tpm2_deluxequote' in jsonIn and 'nonce' in jsonIn['tpm2_deluxequote']:
# YAML unicode-ifies strings, and C calls require byte strings (str)
nonce = str(jsonIn['tpm2_deluxequote']['nonce'])
else:
raise Exception("Could not get quote nonce from canned JSON!")
cmdargs = {
'pubak': pubaik,
'quotefile' : quoteFile,
'sigfile': sigFile,
'pcrfile': pcrFile,
'hashalg': hash_alg,
'nonce': bytes(nonce, encoding="utf8").hex()
}
if tools_version == "3.2":
command = "tpm2_checkquote -c {pubak} -m {quotefile} -s {sigfile} -p {pcrfile} -G {hashalg} -q {nonce}"
else:
command = "tpm2_checkquote -u {pubak} -m {quotefile} -s {sigfile} -f {pcrfile} -g {hashalg} -q {nonce}"
retDict = self.__run(command.format(**cmdargs), lock=False)
return retDict
def check_quote(self, nonce, data, quote, aikFromRegistrar, tpm_policy={}, ima_measurement_list=None, ima_whitelist={}, hash_alg=None):
if hash_alg is None:
hash_alg = self.defaults['hash']
quoteFile = None
aikFile = None
sigFile = None
pcrFile = None
if quote[0] != 'r':
raise Exception("Invalid quote type %s"%quote[0])
quote = quote[1:]
quote_tokens = quote.split(":")
if len(quote_tokens) < 3:
raise Exception("Quote is not compound! %s"%quote)
quoteblob = zlib.decompress(base64.b64decode(quote_tokens[0]))
sigblob = zlib.decompress(base64.b64decode(quote_tokens[1]))
pcrblob = zlib.decompress(base64.b64decode(quote_tokens[2]))
try:
# write out quote
qfd, qtemp = tempfile.mkstemp()
quoteFile = open(qtemp, "wb")
quoteFile.write(quoteblob)
quoteFile.close()
os.close(qfd)
# write out sig
sfd, stemp = tempfile.mkstemp()
sigFile = open(stemp, "wb")
sigFile.write(sigblob)
sigFile.close()
os.close(sfd)
# write out pcr
pfd, ptemp = tempfile.mkstemp()
pcrFile = open(ptemp, "wb")
pcrFile.write(pcrblob)
pcrFile.close()
os.close(pfd)
afd, atemp = tempfile.mkstemp()
aikFile = open(atemp, "wb")
aikFile.write(aikFromRegistrar.encode('utf-8'))
aikFile.close()
os.close(afd)
retDict = self.__check_quote_c(aikFile.name, nonce, quoteFile.name, sigFile.name, pcrFile.name, hash_alg)
retout = retDict['retout']
reterr = retDict['reterr']
code = retDict['code']
except Exception as e:
logger.error("Error verifying quote: "+str(e))
logger.exception(e)
return False
finally:
if aikFile is not None:
os.remove(aikFile.name)
if quoteFile is not None:
os.remove(quoteFile.name)
if sigFile is not None:
os.remove(sigFile.name)
if pcrFile is not None:
os.remove(pcrFile.name)
if len(retout) < 1 or code != tpm_abstract.AbstractTPM.EXIT_SUCESS:
logger.error("Failed to validate signature, output: %s"%reterr)
return False
pcrs = []
jsonout = common.yaml_to_dict(retout)
if "pcrs" in jsonout:
if hash_alg in jsonout["pcrs"]:
alg_size = tpm_abstract.Hash_Algorithms.get_hash_size(hash_alg) // 4
for pcrval, hashval in jsonout["pcrs"][hash_alg].items():
pcrs.append("PCR " + str(pcrval) + " " + '{0:0{1}x}'.format(hashval, alg_size))
# IMA is always in SHA1 format, so don't leave it behind!
if hash_alg != tpm_abstract.Hash_Algorithms.SHA1:
if tpm_abstract.Hash_Algorithms.SHA1 in jsonout["pcrs"] and common.IMA_PCR in jsonout["pcrs"][tpm_abstract.Hash_Algorithms.SHA1]:
sha1_size = tpm_abstract.Hash_Algorithms.get_hash_size(tpm_abstract.Hash_Algorithms.SHA1) // 4
ima_val = jsonout["pcrs"][tpm_abstract.Hash_Algorithms.SHA1][common.IMA_PCR]
pcrs.append("PCR " + str(common.IMA_PCR) + " " + '{0:0{1}x}'.format(ima_val, sha1_size))
if len(pcrs) == 0:
pcrs = None
return self.check_pcrs(tpm_policy, pcrs, data, False, ima_measurement_list, ima_whitelist)
def extendPCR(self, pcrval, hashval, hash_alg=None, lock=True):
if hash_alg is None:
hash_alg = self.defaults['hash']
self.__run("tpm2_pcrextend %d:%s=%s"%(pcrval, hash_alg, hashval), lock=lock)
def readPCR(self, pcrval, hash_alg=None):
if hash_alg is None:
hash_alg = self.defaults['hash']
if tools_version == "3.2":
output = common.list_convert(self.__run("tpm2_pcrlist")['retout'])
elif tools_version == "4.0":
output = common.list_convert(self.__run("tpm2_pcrread")['retout'])
jsonout = common.yaml_to_dict(output)
if hash_alg not in jsonout:
raise Exception("Invalid hashing algorithm '%s' for reading PCR number %d."%(hash_alg, pcrval))
# alg_size = Hash_Algorithms.get_hash_size(hash_alg)/4
alg_size = tpm_abstract.Hash_Algorithms.get_hash_size(hash_alg) // 4
return '{0:0{1}x}'.format(jsonout[hash_alg][pcrval], alg_size)
#tpm_random
def _get_tpm_rand_block(self, size=32):
#make a temp file for the output
rand = None
with tempfile.NamedTemporaryFile() as randpath:
try:
command = "tpm2_getrandom -o %s %d" % (randpath.name, size)
retDict = self.__run(command, outputpaths=randpath.name)
retout = retDict['retout']
code = retDict['code']
rand = retDict['fileouts'][randpath.name]
except Exception as e:
if not self.tpmrand_warned:
logger.warn("TPM randomness not available: %s"%e)
self.tpmrand_warned = True
return None
return rand
#tpm_nvram
def write_key_nvram(self, key):
owner_pw = self.get_tpm_metadata('owner_pw')
# write out quote
with tempfile.NamedTemporaryFile() as keyFile:
keyFile.write(key)
keyFile.flush()
attrs = "ownerread|ownerwrite"
if tools_version == "3.2":
self.__run("tpm2_nvdefine -x 0x1500018 -a 0x40000001 -s %s -t \"%s\" -I %s -P %s"%(common.BOOTSTRAP_KEY_SIZE, attrs, owner_pw, owner_pw), raiseOnError=False)
self.__run("tpm2_nvwrite -x 0x1500018 -a 0x40000001 -P %s %s"%(owner_pw, keyFile.name), raiseOnError=False)
else:
self.__run("tpm2_nvdefine 0x1500018 -C 0x40000001 -s %s -a \"%s\" -p %s -P %s"%(common.BOOTSTRAP_KEY_SIZE, attrs, owner_pw, owner_pw), raiseOnError=False)
self.__run("tpm2_nvwrite 0x1500018 -C 0x40000001 -P %s -i %s"%(owner_pw, keyFile.name), raiseOnError=False)
return
def read_ekcert_nvram(self):
#make a temp file for the quote
with tempfile.NamedTemporaryFile() as nvpath:
# Check for RSA EK cert in NVRAM (and get length)
if tools_version == "3.2":
retDict = self.__run("tpm2_nvlist", raiseOnError=False)
elif tools_version == "4.0":
retDict = self.__run("tpm2_nvreadpublic", raiseOnError=False)
output = retDict['retout']
reterr = retDict['reterr']
code = retDict['code']
if code != tpm_abstract.AbstractTPM.EXIT_SUCESS:
if tools_version == "3.2":
raise Exception("tpm2_nvlist for ekcert failed with code "+str(code)+": "+str(reterr))
elif tools_version == "4.0":
raise Exception("tpm2_nvreadpublic for ekcert failed with code "+str(code)+": "+str(reterr))
outjson = common.yaml_to_dict(output)
if outjson is None or 0x1c00002 not in outjson or "size" not in outjson[0x1c00002]:
logger.warn("No EK certificate found in TPM NVRAM")
return None
ekcert_size = outjson[0x1c00002]["size"]
# Read the RSA EK cert from NVRAM (DER format)
if tools_version == "3.2":
retDict = self.__run("tpm2_nvread -x 0x1c00002 -s %s -f %s"%(ekcert_size, nvpath.name), raiseOnError=False, outputpaths=nvpath.name)
elif tools_version == "4.0":
retDict = self.__run("tpm2_nvread 0x1c00002 -s %s -o %s"%(ekcert_size, nvpath.name), raiseOnError=False, outputpaths=nvpath.name)
output = common.list_convert(retDict['retout'])
errout = common.list_convert(retDict['reterr'])
code = retDict['code']
ekcert = retDict['fileouts'][nvpath.name]
if code != tpm_abstract.AbstractTPM.EXIT_SUCESS:
raise Exception("tpm2_nvread for ekcert failed with code "+str(code)+": "+str(errout))
return base64.b64encode(ekcert)
def read_key_nvram(self):
owner_pw = self.get_tpm_metadata('owner_pw')
if tools_version == "3.2":
retDict = self.__run("tpm2_nvread -x 0x1500018 -a 0x40000001 -s %s -P %s"%(common.BOOTSTRAP_KEY_SIZE, owner_pw), raiseOnError=False)
else:
retDict = self.__run("tpm2_nvread 0x1500018 -C 0x40000001 -s %s -P %s"%(common.BOOTSTRAP_KEY_SIZE, owner_pw), raiseOnError=False)
output = common.list_convert(retDict['retout'])
errout = common.list_convert(retDict['reterr'])
code = retDict['code']
if code != tpm_abstract.AbstractTPM.EXIT_SUCESS:
if len(errout) > 0 and "handle does not exist" in "\n".join(errout):
logger.debug("No stored U in TPM NVRAM")
return None
elif len(errout) > 0 and "ERROR: Failed to read NVRAM public area at index" in "\n".join(errout):
logger.debug("No stored U in TPM NVRAM")
return None
else:
raise Exception("nv_readvalue failed with code "+str(code)+": "+str(errout))
if len(output) != common.BOOTSTRAP_KEY_SIZE:
logger.debug("Invalid key length from NVRAM: %d"%(len(output)))
return None
return output
| 45.65749 | 179 | 0.570717 |
acf6f7c6c18ef2f6b8f58c8c6f8f97b04ba0f45f | 1,599 | py | Python | src/ssh/setup.py | haroonf/azure-cli-extensions | 61c044d34c224372f186934fa7c9313f1cd3a525 | [
"MIT"
] | null | null | null | src/ssh/setup.py | haroonf/azure-cli-extensions | 61c044d34c224372f186934fa7c9313f1cd3a525 | [
"MIT"
] | null | null | null | src/ssh/setup.py | haroonf/azure-cli-extensions | 61c044d34c224372f186934fa7c9313f1cd3a525 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from setuptools import setup, find_packages
VERSION = "1.1.0"
CLASSIFIERS = [
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'License :: OSI Approved :: MIT License',
]
DEPENDENCIES = [
'cryptography',
'oschmod==0.3.12'
]
setup(
name='ssh',
version=VERSION,
description='SSH into Azure VMs using RBAC and AAD OpenSSH Certificates',
long_description='SSH into Azure VMs using RBAC and AAD OpenSSH Certificates. The client generates (or uses existing) OpenSSH keys that are then signed by AAD into OpenSSH certificates for access to Azure VMs with the AAD Extension installed.',
license='MIT',
author='Microsoft Corporation',
author_email='azpycli@microsoft.com',
url='https://github.com/Azure/azure-cli-extensions/tree/main/src/ssh',
classifiers=CLASSIFIERS,
packages=find_packages(),
install_requires=DEPENDENCIES,
package_data={'azext_ssh': ['azext_metadata.json']}
)
| 37.186047 | 249 | 0.621013 |
acf6f82bc781114f400db2eb0040124ea4ea8826 | 292 | py | Python | examples/scrapy/firstproject/firstproject/items.py | hektormisplon/web-scraping-tutorial | f577e06f1122caa14c490593aed5fcc11b33afc9 | [
"MIT"
] | null | null | null | examples/scrapy/firstproject/firstproject/items.py | hektormisplon/web-scraping-tutorial | f577e06f1122caa14c490593aed5fcc11b33afc9 | [
"MIT"
] | null | null | null | examples/scrapy/firstproject/firstproject/items.py | hektormisplon/web-scraping-tutorial | f577e06f1122caa14c490593aed5fcc11b33afc9 | [
"MIT"
] | 1 | 2020-04-20T07:48:07.000Z | 2020-04-20T07:48:07.000Z | # -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class FirstprojectItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
pass
| 19.466667 | 52 | 0.691781 |
acf6f8bbcab7fee1bf0a208d4a4c468a9798720f | 1,666 | py | Python | scripts/demo_rot_2d.py | jejjohnson/rbig_jax | 112e064d5b62631aa03b7563c9eb9f115ab23eb0 | [
"MIT"
] | null | null | null | scripts/demo_rot_2d.py | jejjohnson/rbig_jax | 112e064d5b62631aa03b7563c9eb9f115ab23eb0 | [
"MIT"
] | null | null | null | scripts/demo_rot_2d.py | jejjohnson/rbig_jax | 112e064d5b62631aa03b7563c9eb9f115ab23eb0 | [
"MIT"
] | null | null | null | # JAX SETTINGS
import jax
import jax.numpy as np
# Plot Functions
import matplotlib.pyplot as plt
import numpy as onp
import seaborn as sns
from scipy.stats import beta
from rbig_jax.data import get_classic
from rbig_jax.transforms.linear import init_pca_params
sns.reset_defaults()
sns.set_context(context="talk", font_scale=0.7)
# =========================
# Original Data
# =========================
data = get_classic(10_000).T
# ========================
# PLOT
# ========================
fig = plt.figure(figsize=(5, 5))
color = "blue"
title = "Original Data"
g = sns.jointplot(x=data[0], y=data[1], kind="hex", color=color)
plt.xlabel("X")
plt.ylabel("Y")
plt.suptitle(title)
plt.tight_layout()
plt.savefig("scripts/demo2d_rot_x.png")
# ========================
# Forward Transformation
# ========================
data = np.array(data)
# initialize parameters
params, forward_func, inverse_func = init_pca_params(data.T)
# forward transformation
X_transform, X_ldj = forward_func(data.T)
X_transform = X_transform.T
color = "Red"
title = "Transformed Data"
g = sns.jointplot(x=X_transform[0], y=X_transform[1], kind="hex", color=color)
plt.xlabel("X")
plt.ylabel("Y")
plt.suptitle(title)
plt.tight_layout()
plt.savefig("scripts/demo2d_rot_mg_xg.png")
# ===========================
# Inverse Transformation
# ===========================
X_transform = X_transform.T
X_approx = inverse_func(X_transform).T
color = "Red"
title = "Approximate Original Data"
g = sns.jointplot(x=X_approx[0], y=X_approx[1], kind="hex", color=color)
plt.xlabel("X")
plt.ylabel("Y")
plt.suptitle(title)
plt.tight_layout()
plt.savefig("scripts/demo2d_rot_mg_x_approx.png")
| 23.138889 | 78 | 0.654862 |
acf6f977b3fb6317944d608b5452e00562a9ef14 | 522 | py | Python | process_times.py | RyanTorant/AGIO | 634e38cf5013821c78dad53d242da8202f632aa0 | [
"MIT"
] | null | null | null | process_times.py | RyanTorant/AGIO | 634e38cf5013821c78dad53d242da8202f632aa0 | [
"MIT"
] | null | null | null | process_times.py | RyanTorant/AGIO | 634e38cf5013821c78dad53d242da8202f632aa0 | [
"MIT"
] | 1 | 2019-11-27T00:57:27.000Z | 2019-11-27T00:57:27.000Z | import csv
import numpy as np
import scipy.stats as stats
def process(name):
print(name)
times = []
with open('bin/' + name + '.csv') as csvfile:
reader = csv.reader(csvfile)
for row in reader:
for column in row:
try:
times.append(float(column))
except:
pass
avg = np.mean(times)
dev = np.std(times)
print(avg)
print(dev)
process('decision_times')
process('total_times')
process('evo_time') | 20.88 | 49 | 0.544061 |
acf6f994bc82507f144cbede3c6f33baa0b3dfaa | 23 | py | Python | kfda/__init__.py | concavegit/kfda | f2634c4188d63274596c9d697594181f43833ab0 | [
"MIT"
] | 12 | 2020-05-22T09:00:01.000Z | 2022-03-13T02:00:15.000Z | kfda/__init__.py | concavegit/kfda | f2634c4188d63274596c9d697594181f43833ab0 | [
"MIT"
] | 1 | 2020-10-06T22:32:17.000Z | 2020-11-25T03:37:48.000Z | kfda/__init__.py | concavegit/kfda | f2634c4188d63274596c9d697594181f43833ab0 | [
"MIT"
] | 6 | 2020-04-26T06:42:00.000Z | 2022-01-23T15:07:03.000Z | from .kfda import Kfda
| 11.5 | 22 | 0.782609 |
acf6f9a20d30371f09e89b9f2a3cd13c9a163bb5 | 1,426 | py | Python | member/views/login.py | bmackley/ancientassyrian | baaf973a1162765d964f613e8bd839ef8cc3ea60 | [
"Apache-2.0"
] | null | null | null | member/views/login.py | bmackley/ancientassyrian | baaf973a1162765d964f613e8bd839ef8cc3ea60 | [
"Apache-2.0"
] | null | null | null | member/views/login.py | bmackley/ancientassyrian | baaf973a1162765d964f613e8bd839ef8cc3ea60 | [
"Apache-2.0"
] | null | null | null | from django import forms
from django.conf import settings
from django.http import HttpResponse, HttpResponseRedirect, Http404
from django.contrib.auth import authenticate, login
from Manager import models as m
from . import templater
def process_request(request):
'''Logs a user in'''
form = LoginForm()
if request.method == 'POST':
form = LoginForm(request.POST)
if form.is_valid():
user = authenticate(lusername = form.cleaned_data['username'].lower(), password = form.cleaned_data['password'])
if user is not None:
if not request.user.is_superuser:
login(request, user)
redirect = request.META.get('HTTP_REFERER')
return HttpResponse('<script>window.location.href="'+ redirect +'"</script>')
else:
login(request, user)
redirect = request.META.get('HTTP_REFERER')
return HttpResponse('<script>window.location.href="'+ redirect +'"</script>')
tvars = {
'form' : form,
}
return templater.render_to_response(request, 'login.html', tvars)
class LoginForm(forms.Form):
username = forms.CharField()
password = forms.CharField(widget=forms.PasswordInput)
def clean(self):
user = authenticate(username = self.cleaned_data['username'], password = self.cleaned_data['password'])
if user == None:
raise forms.ValidationError("Incorrect password or Username")
return self.cleaned_data | 34.780488 | 118 | 0.68864 |
acf6f9d2c36aeb1cbf2d8ab8a7c7b75d28eb31f3 | 3,547 | py | Python | chj/util/svgutil.py | aemcgraw/CodeHawk-Java | 8e43877d0357579f6509d3fc52c69c2d4568d288 | [
"MIT"
] | null | null | null | chj/util/svgutil.py | aemcgraw/CodeHawk-Java | 8e43877d0357579f6509d3fc52c69c2d4568d288 | [
"MIT"
] | null | null | null | chj/util/svgutil.py | aemcgraw/CodeHawk-Java | 8e43877d0357579f6509d3fc52c69c2d4568d288 | [
"MIT"
] | null | null | null | # ------------------------------------------------------------------------------
# CodeHawk Java Analyzer
# Author: Andrew McGraw
# ------------------------------------------------------------------------------
# The MIT License (MIT)
#
# Copyright (c) 2016-2020 Kestrel Technology LLC
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ------------------------------------------------------------------------------
import os
import string
import subprocess
import difflib
import xml.etree.ElementTree as ET
import chj.util.dotutil as UD
def svg_namespace():
return {'svg' : 'http://www.w3.org/2000/svg'}
def _get_graph_nodes(svg):
ns = svg_namespace()
nodes = []
elems = svg.findall('svg:g', namespaces=ns)
for elem in elems:
subelems = elem.findall('svg:g', namespaces=ns)
for subelem in subelems:
if subelem.attrib['class'] and subelem.attrib['class'] == 'node':
nodes.append(subelem)
return nodes
def append_loop_levels(svg, loop_levels):
nodes = _get_graph_nodes(svg)
for node in nodes:
pc = int(node.attrib['pc'])
node.attrib['ldepth'] = str(loop_levels[pc])
def append_cmsixs(svg, cmsix_dict):
nodes = _get_graph_nodes(svg)
for node in nodes:
ns = svg_namespace()
title = node.findtext('svg:title', namespaces=ns)
if title in cmsix_dict:
node.attrib['cmsix'] = str(cmsix_dict[title])
def append_pcs(svg, node_pcs):
nodes = _get_graph_nodes(svg)
for node in nodes:
ns = svg_namespace()
title = node.findtext('svg:title', namespaces=ns)
if title in node_pcs:
node.attrib['pc'] = str(node_pcs[title])
def save_svg(path, g):
dotfilename = os.path.join(path,g.name + '.dot')
svgfilename = os.path.join(path,g.name + '.svg')
if os.path.isfile(dotfilename):
cmd = [ 'dot', '-Tsvg', '-o', svgfilename, dotfilename ]
subprocess.call(cmd, stderr=subprocess.STDOUT)
def get_svg(path, g):
graphsdir = os.path.join(path, 'graphs')
if not os.path.isdir(graphsdir):
os.mkdir(graphsdir)
UD.save_dot(graphsdir, g)
save_svg(graphsdir, g)
svgfilename = os.path.join(graphsdir, g.name + '.svg')
tree = ET.parse(svgfilename)
#Removes the namespace prefixes from elements in the svg
#This is necessary because HTML implicitly recognizes elements
#from the svg namespace but does not handle namespace prefixes
ET.register_namespace("","http://www.w3.org/2000/svg")
return tree
| 35.828283 | 80 | 0.647308 |
acf6f9e2f1915d0e1c8a35428b8745e7ee28d471 | 245 | py | Python | scikit-learn/iris-imports.py | latika18/learning | a57c9aacc0157bf7c318f46c1e7c4971d1d55aea | [
"Unlicense"
] | null | null | null | scikit-learn/iris-imports.py | latika18/learning | a57c9aacc0157bf7c318f46c1e7c4971d1d55aea | [
"Unlicense"
] | null | null | null | scikit-learn/iris-imports.py | latika18/learning | a57c9aacc0157bf7c318f46c1e7c4971d1d55aea | [
"Unlicense"
] | null | null | null | %matplotlib inline
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier
df = pd.read_csv('Iris.csv')
| 20.416667 | 52 | 0.82449 |
acf6fa8af226f8b5bd64e72f299345c4c2dde6e3 | 679 | py | Python | setup.py | affanv14/keras-unet | 84b1b735327d424ab6dcf242b6537cd47901f0b5 | [
"MIT"
] | null | null | null | setup.py | affanv14/keras-unet | 84b1b735327d424ab6dcf242b6537cd47901f0b5 | [
"MIT"
] | null | null | null | setup.py | affanv14/keras-unet | 84b1b735327d424ab6dcf242b6537cd47901f0b5 | [
"MIT"
] | null | null | null | from setuptools import setup
from setuptools import find_packages
with open("README.md") as f:
long_description = f.read()
setup(
name="keras-unet",
version="0.0.7",
description="Helper package with multiple U-Net implementations in Keras as well as useful utility tools helpful when working with image segmentation tasks",
long_description=long_description,
long_description_content_type="text/markdown", # This is important!
url="http://github.com/karolzak/keras-unet",
author="Karol Zak",
author_email="karol.zak@hotmail.com",
license="MIT",
packages=find_packages(),
zip_safe=False,
install_requires=["keras==2.3.0"],
)
| 32.333333 | 161 | 0.723122 |
acf6fadeefd97b20114b95d52ab4a4a3d1d5cffd | 766 | py | Python | hard/python3/c0012_52_n-queens-ii/00_leetcode_0012.py | drunkwater/leetcode | 8cc4a07763e71efbaedb523015f0c1eff2927f60 | [
"Ruby"
] | null | null | null | hard/python3/c0012_52_n-queens-ii/00_leetcode_0012.py | drunkwater/leetcode | 8cc4a07763e71efbaedb523015f0c1eff2927f60 | [
"Ruby"
] | null | null | null | hard/python3/c0012_52_n-queens-ii/00_leetcode_0012.py | drunkwater/leetcode | 8cc4a07763e71efbaedb523015f0c1eff2927f60 | [
"Ruby"
] | 3 | 2018-02-09T02:46:48.000Z | 2021-02-20T08:32:03.000Z | # DRUNKWATER TEMPLATE(add description and prototypes)
# Question Title and Description on leetcode.com
# Function Declaration and Function Prototypes on leetcode.com
#52. N-Queens II
#The n-queens puzzle is the problem of placing n queens on an n×n chessboard such that no two queens attack each other.
#Given an integer n, return the number of distinct solutions to the n-queens puzzle.
#Example:
#Input: 4
#Output: 2
#Explanation: There are two distinct solutions to the 4-queens puzzle as shown below.
#[
# [".Q..", // Solution 1
# "...Q",
# "Q...",
# "..Q."],
# ["..Q.", // Solution 2
# "Q...",
# "...Q",
# ".Q.."]
#]
#class Solution:
# def totalNQueens(self, n):
# """
# :type n: int
# :rtype: int
# """
# Time Is Money | 25.533333 | 119 | 0.633159 |
acf6fc17230249ba81a6fd956fde849b009a34a3 | 27,413 | py | Python | tests/wallet/did_wallet/test_did.py | inan0812/chia-blockchain | 8de40989f56fb64d6ff1690ae0c2169cc11ad18b | [
"Apache-2.0"
] | 1 | 2021-09-19T18:59:19.000Z | 2021-09-19T18:59:19.000Z | tests/wallet/did_wallet/test_did.py | inan0812/chia-blockchain | 8de40989f56fb64d6ff1690ae0c2169cc11ad18b | [
"Apache-2.0"
] | null | null | null | tests/wallet/did_wallet/test_did.py | inan0812/chia-blockchain | 8de40989f56fb64d6ff1690ae0c2169cc11ad18b | [
"Apache-2.0"
] | 1 | 2022-02-08T19:58:12.000Z | 2022-02-08T19:58:12.000Z | """
import asyncio
import time
import pytest
from inan.simulator.simulator_protocol import FarmNewBlockProtocol
from inan.types.peer_info import PeerInfo
from inan.util.ints import uint16, uint32, uint64
from tests.setup_nodes import setup_simulators_and_wallets
from inan.wallet.did_wallet.did_wallet import DIDWallet
from inan.wallet.did_wallet import did_wallet_puzzles
from clvm_tools import binutils
from inan.types.blockchain_format.program import Program
from inan.wallet.derivation_record import DerivationRecord
from inan.types.coin_solution import CoinSolution
from blspy import AugSchemeMPL
from inan.types.spend_bundle import SpendBundle
from inan.wallet.transaction_record import TransactionRecord
from inan.wallet.derive_keys import master_sk_to_wallet_sk
from inan.consensus.block_rewards import calculate_pool_reward, calculate_base_farmer_reward
from tests.time_out_assert import time_out_assert
from secrets import token_bytes
from inan.wallet.util.transaction_type import TransactionType
from inan.consensus.default_constants import DEFAULT_CONSTANTS
@pytest.fixture(scope="module")
def event_loop():
loop = asyncio.get_event_loop()
yield loop
class XTestDIDWallet:
@pytest.fixture(scope="function")
async def wallet_node(self):
async for _ in setup_simulators_and_wallets(1, 1, {}):
yield _
@pytest.fixture(scope="function")
async def two_wallet_nodes(self):
async for _ in setup_simulators_and_wallets(1, 2, {}):
yield _
@pytest.fixture(scope="function")
async def two_wallet_nodes_five_freeze(self):
async for _ in setup_simulators_and_wallets(1, 2, {}):
yield _
@pytest.fixture(scope="function")
async def three_sim_two_wallets(self):
async for _ in setup_simulators_and_wallets(3, 2, {}):
yield _
@pytest.mark.asyncio
async def test_creation_from_backup_file(self, two_wallet_nodes):
num_blocks = 5
full_nodes, wallets = two_wallet_nodes
full_node_api = full_nodes[0]
full_node_server = full_node_api.server
wallet_node_0, server_2 = wallets[0]
wallet_node_1, server_3 = wallets[1]
wallet_0 = wallet_node_0.wallet_state_manager.main_wallet
wallet_1 = wallet_node_1.wallet_state_manager.main_wallet
ph = await wallet_0.get_new_puzzlehash()
await server_2.start_client(PeerInfo("localhost", uint16(full_node_server._port)), None)
await server_3.start_client(PeerInfo("localhost", uint16(full_node_server._port)), None)
for i in range(1, num_blocks):
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(ph))
funds = sum(
[
calculate_pool_reward(uint32(i)) + calculate_base_farmer_reward(uint32(i))
for i in range(1, num_blocks - 1)
]
)
await time_out_assert(10, wallet_0.get_unconfirmed_balance, funds)
await time_out_assert(10, wallet_0.get_confirmed_balance, funds)
# Wallet1 sets up DIDWallet1 without any backup set
did_wallet_0: DIDWallet = await DIDWallet.create_new_did_wallet(
wallet_node_0.wallet_state_manager, wallet_0, uint64(101)
)
for i in range(1, num_blocks):
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(ph))
await time_out_assert(15, did_wallet_0.get_confirmed_balance, 101)
await time_out_assert(15, did_wallet_0.get_unconfirmed_balance, 101)
await time_out_assert(15, did_wallet_0.get_pending_change_balance, 0)
# Wallet1 sets up DIDWallet_1 with DIDWallet_0 as backup
backup_ids = [bytes.fromhex(did_wallet_0.get_my_DID())]
did_wallet_1: DIDWallet = await DIDWallet.create_new_did_wallet(
wallet_node_0.wallet_state_manager, wallet_0, uint64(201), backup_ids
)
for i in range(1, num_blocks):
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(ph))
await time_out_assert(15, did_wallet_1.get_confirmed_balance, 201)
await time_out_assert(15, did_wallet_1.get_unconfirmed_balance, 201)
await time_out_assert(15, did_wallet_1.get_pending_change_balance, 0)
filename = "test.backup"
did_wallet_1.create_backup(filename)
# Wallet2 recovers DIDWallet2 to a new set of keys
did_wallet_2 = await DIDWallet.create_new_did_wallet_from_recovery(
wallet_node_1.wallet_state_manager, wallet_1, filename
)
coins = await did_wallet_1.select_coins(1)
coin = coins.copy().pop()
assert did_wallet_2.did_info.temp_coin == coin
newpuzhash = await did_wallet_2.get_new_inner_hash()
pubkey = bytes(
(await did_wallet_2.wallet_state_manager.get_unused_derivation_record(did_wallet_2.wallet_info.id)).pubkey
)
message_spend_bundle = await did_wallet_0.create_attestment(
did_wallet_2.did_info.temp_coin.name(), newpuzhash, pubkey, "test.attest"
)
print(f"pubkey: {pubkey}")
for i in range(1, num_blocks):
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(ph))
(
test_info_list,
test_message_spend_bundle,
) = await did_wallet_2.load_attest_files_for_recovery_spend(["test.attest"])
assert message_spend_bundle == test_message_spend_bundle
await did_wallet_2.recovery_spend(
did_wallet_2.did_info.temp_coin,
newpuzhash,
test_info_list,
pubkey,
test_message_spend_bundle,
)
print(f"pubkey: {did_wallet_2}")
for i in range(1, num_blocks):
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(ph))
await time_out_assert(45, did_wallet_2.get_confirmed_balance, 201)
await time_out_assert(45, did_wallet_2.get_unconfirmed_balance, 201)
# DIDWallet3 spends the money back to itself
ph2 = await wallet_1.get_new_puzzlehash()
await did_wallet_2.create_exit_spend(ph2)
for i in range(1, num_blocks):
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(ph))
await time_out_assert(15, wallet_1.get_confirmed_balance, 200)
await time_out_assert(15, wallet_1.get_unconfirmed_balance, 200)
await time_out_assert(45, did_wallet_2.get_confirmed_balance, 0)
await time_out_assert(45, did_wallet_2.get_unconfirmed_balance, 0)
@pytest.mark.asyncio
async def test_did_recovery_with_multiple_backup_dids(self, two_wallet_nodes):
num_blocks = 5
full_nodes, wallets = two_wallet_nodes
full_node_1 = full_nodes[0]
server_1 = full_node_1.server
wallet_node, server_2 = wallets[0]
wallet_node_2, server_3 = wallets[1]
wallet = wallet_node.wallet_state_manager.main_wallet
wallet2 = wallet_node_2.wallet_state_manager.main_wallet
ph = await wallet.get_new_puzzlehash()
await server_2.start_client(PeerInfo("localhost", uint16(server_1._port)), None)
await server_3.start_client(PeerInfo("localhost", uint16(server_1._port)), None)
for i in range(1, num_blocks):
await full_node_1.farm_new_transaction_block(FarmNewBlockProtocol(ph))
funds = sum(
[
calculate_pool_reward(uint32(i)) + calculate_base_farmer_reward(uint32(i))
for i in range(1, num_blocks - 1)
]
)
await time_out_assert(15, wallet.get_confirmed_balance, funds)
did_wallet: DIDWallet = await DIDWallet.create_new_did_wallet(
wallet_node.wallet_state_manager, wallet, uint64(101)
)
ph = await wallet2.get_new_puzzlehash()
for i in range(1, num_blocks):
await full_node_1.farm_new_transaction_block(FarmNewBlockProtocol(ph))
await time_out_assert(15, did_wallet.get_confirmed_balance, 101)
await time_out_assert(15, did_wallet.get_unconfirmed_balance, 101)
recovery_list = [bytes.fromhex(did_wallet.get_my_DID())]
did_wallet_2: DIDWallet = await DIDWallet.create_new_did_wallet(
wallet_node_2.wallet_state_manager, wallet2, uint64(101), recovery_list
)
for i in range(1, num_blocks):
await full_node_1.farm_new_transaction_block(FarmNewBlockProtocol(ph))
await time_out_assert(15, did_wallet_2.get_confirmed_balance, 101)
await time_out_assert(15, did_wallet_2.get_unconfirmed_balance, 101)
assert did_wallet_2.did_info.backup_ids == recovery_list
recovery_list.append(bytes.fromhex(did_wallet_2.get_my_DID()))
did_wallet_3: DIDWallet = await DIDWallet.create_new_did_wallet(
wallet_node_2.wallet_state_manager, wallet2, uint64(201), recovery_list
)
ph2 = await wallet.get_new_puzzlehash()
for i in range(1, num_blocks):
await full_node_1.farm_new_transaction_block(FarmNewBlockProtocol(ph2))
assert did_wallet_3.did_info.backup_ids == recovery_list
await time_out_assert(15, did_wallet_3.get_confirmed_balance, 201)
await time_out_assert(15, did_wallet_3.get_unconfirmed_balance, 201)
coins = await did_wallet_3.select_coins(1)
coin = coins.pop()
filename = "test.backup"
did_wallet_3.create_backup(filename)
did_wallet_4 = await DIDWallet.create_new_did_wallet_from_recovery(
wallet_node.wallet_state_manager,
wallet,
filename,
)
pubkey = (
await did_wallet_4.wallet_state_manager.get_unused_derivation_record(did_wallet_2.wallet_info.id)
).pubkey
new_ph = await did_wallet_4.get_new_inner_hash()
message_spend_bundle = await did_wallet.create_attestment(coin.name(), new_ph, pubkey, "test1.attest")
message_spend_bundle2 = await did_wallet_2.create_attestment(coin.name(), new_ph, pubkey, "test2.attest")
message_spend_bundle = message_spend_bundle.aggregate([message_spend_bundle, message_spend_bundle2])
(
test_info_list,
test_message_spend_bundle,
) = await did_wallet_4.load_attest_files_for_recovery_spend(["test1.attest", "test2.attest"])
assert message_spend_bundle == test_message_spend_bundle
for i in range(1, num_blocks):
await full_node_1.farm_new_transaction_block(FarmNewBlockProtocol(ph2))
await did_wallet_4.recovery_spend(coin, new_ph, test_info_list, pubkey, message_spend_bundle)
for i in range(1, num_blocks):
await full_node_1.farm_new_transaction_block(FarmNewBlockProtocol(ph2))
await time_out_assert(15, did_wallet_4.get_confirmed_balance, 201)
await time_out_assert(15, did_wallet_4.get_unconfirmed_balance, 201)
await time_out_assert(15, did_wallet_3.get_confirmed_balance, 0)
await time_out_assert(15, did_wallet_3.get_unconfirmed_balance, 0)
@pytest.mark.asyncio
async def test_did_recovery_with_empty_set(self, two_wallet_nodes):
num_blocks = 5
full_nodes, wallets = two_wallet_nodes
full_node_1 = full_nodes[0]
server_1 = full_node_1.server
wallet_node, server_2 = wallets[0]
wallet_node_2, server_3 = wallets[1]
wallet = wallet_node.wallet_state_manager.main_wallet
ph = await wallet.get_new_puzzlehash()
await server_2.start_client(PeerInfo("localhost", uint16(server_1._port)), None)
await server_3.start_client(PeerInfo("localhost", uint16(server_1._port)), None)
for i in range(1, num_blocks):
await full_node_1.farm_new_transaction_block(FarmNewBlockProtocol(ph))
funds = sum(
[
calculate_pool_reward(uint32(i)) + calculate_base_farmer_reward(uint32(i))
for i in range(1, num_blocks - 1)
]
)
await time_out_assert(15, wallet.get_confirmed_balance, funds)
did_wallet: DIDWallet = await DIDWallet.create_new_did_wallet(
wallet_node.wallet_state_manager, wallet, uint64(101)
)
for i in range(1, num_blocks):
await full_node_1.farm_new_transaction_block(FarmNewBlockProtocol(ph))
await time_out_assert(15, did_wallet.get_confirmed_balance, 101)
await time_out_assert(15, did_wallet.get_unconfirmed_balance, 101)
coins = await did_wallet.select_coins(1)
coin = coins.pop()
info = Program.to([])
pubkey = (await did_wallet.wallet_state_manager.get_unused_derivation_record(did_wallet.wallet_info.id)).pubkey
spend_bundle = await did_wallet.recovery_spend(
coin, ph, info, pubkey, SpendBundle([], AugSchemeMPL.aggregate([]))
)
additions = spend_bundle.additions()
assert additions == []
@pytest.mark.asyncio
async def test_did_attest_after_recovery(self, two_wallet_nodes):
num_blocks = 5
full_nodes, wallets = two_wallet_nodes
full_node_1 = full_nodes[0]
server_1 = full_node_1.server
wallet_node, server_2 = wallets[0]
wallet_node_2, server_3 = wallets[1]
wallet = wallet_node.wallet_state_manager.main_wallet
wallet2 = wallet_node_2.wallet_state_manager.main_wallet
ph = await wallet.get_new_puzzlehash()
await server_2.start_client(PeerInfo("localhost", uint16(server_1._port)), None)
await server_3.start_client(PeerInfo("localhost", uint16(server_1._port)), None)
for i in range(1, num_blocks):
await full_node_1.farm_new_transaction_block(FarmNewBlockProtocol(ph))
funds = sum(
[
calculate_pool_reward(uint32(i)) + calculate_base_farmer_reward(uint32(i))
for i in range(1, num_blocks - 1)
]
)
await time_out_assert(15, wallet.get_confirmed_balance, funds)
did_wallet: DIDWallet = await DIDWallet.create_new_did_wallet(
wallet_node.wallet_state_manager, wallet, uint64(101)
)
ph2 = await wallet2.get_new_puzzlehash()
for i in range(1, num_blocks):
await full_node_1.farm_new_transaction_block(FarmNewBlockProtocol(ph2))
await time_out_assert(15, did_wallet.get_confirmed_balance, 101)
await time_out_assert(15, did_wallet.get_unconfirmed_balance, 101)
recovery_list = [bytes.fromhex(did_wallet.get_my_DID())]
did_wallet_2: DIDWallet = await DIDWallet.create_new_did_wallet(
wallet_node_2.wallet_state_manager, wallet2, uint64(101), recovery_list
)
ph = await wallet.get_new_puzzlehash()
for i in range(1, num_blocks):
await full_node_1.farm_new_transaction_block(FarmNewBlockProtocol(ph))
await time_out_assert(15, did_wallet_2.get_confirmed_balance, 101)
await time_out_assert(15, did_wallet_2.get_unconfirmed_balance, 101)
assert did_wallet_2.did_info.backup_ids == recovery_list
# Update coin with new ID info
recovery_list = [bytes.fromhex(did_wallet_2.get_my_DID())]
await did_wallet.update_recovery_list(recovery_list, uint64(1))
assert did_wallet.did_info.backup_ids == recovery_list
await did_wallet.create_update_spend()
for i in range(1, num_blocks):
await full_node_1.farm_new_transaction_block(FarmNewBlockProtocol(ph2))
await time_out_assert(15, did_wallet.get_confirmed_balance, 101)
await time_out_assert(15, did_wallet.get_unconfirmed_balance, 101)
# DID Wallet 2 recovers into itself with new innerpuz
new_ph = await did_wallet_2.get_new_inner_hash()
coins = await did_wallet_2.select_coins(1)
coin = coins.pop()
pubkey = (
await did_wallet_2.wallet_state_manager.get_unused_derivation_record(did_wallet_2.wallet_info.id)
).pubkey
message_spend_bundle = await did_wallet.create_attestment(coin.name(), new_ph, pubkey, "test.attest")
for i in range(1, num_blocks):
await full_node_1.farm_new_transaction_block(FarmNewBlockProtocol(ph2))
(
info,
message_spend_bundle,
) = await did_wallet_2.load_attest_files_for_recovery_spend(["test.attest"])
await did_wallet_2.recovery_spend(coin, new_ph, info, pubkey, message_spend_bundle)
for i in range(1, num_blocks):
await full_node_1.farm_new_transaction_block(FarmNewBlockProtocol(ph))
await time_out_assert(15, did_wallet_2.get_confirmed_balance, 101)
await time_out_assert(15, did_wallet_2.get_unconfirmed_balance, 101)
# Recovery spend
coins = await did_wallet.select_coins(1)
coin = coins.pop()
pubkey = (await did_wallet.wallet_state_manager.get_unused_derivation_record(did_wallet.wallet_info.id)).pubkey
await did_wallet_2.create_attestment(coin.name(), ph, pubkey, "test.attest")
for i in range(1, num_blocks):
await full_node_1.farm_new_transaction_block(FarmNewBlockProtocol(ph2))
(
test_info_list,
test_message_spend_bundle,
) = await did_wallet.load_attest_files_for_recovery_spend(["test.attest"])
await did_wallet.recovery_spend(coin, ph, test_info_list, pubkey, test_message_spend_bundle)
for i in range(1, num_blocks):
await full_node_1.farm_new_transaction_block(FarmNewBlockProtocol(ph))
await time_out_assert(15, wallet.get_confirmed_balance, 30000000000000)
await time_out_assert(15, wallet.get_unconfirmed_balance, 30000000000000)
await time_out_assert(15, did_wallet.get_confirmed_balance, 0)
await time_out_assert(15, did_wallet.get_unconfirmed_balance, 0)
@pytest.mark.asyncio
async def test_make_double_output(self, two_wallet_nodes):
num_blocks = 5
full_nodes, wallets = two_wallet_nodes
full_node_1 = full_nodes[0]
server_1 = full_node_1.server
wallet_node, server_2 = wallets[0]
wallet_node_2, server_3 = wallets[1]
wallet = wallet_node.wallet_state_manager.main_wallet
wallet2 = wallet_node_2.wallet_state_manager.main_wallet
ph = await wallet.get_new_puzzlehash()
await server_2.start_client(PeerInfo("localhost", uint16(server_1._port)), None)
await server_3.start_client(PeerInfo("localhost", uint16(server_1._port)), None)
for i in range(1, num_blocks):
await full_node_1.farm_new_transaction_block(FarmNewBlockProtocol(ph))
funds = sum(
[
calculate_pool_reward(uint32(i)) + calculate_base_farmer_reward(uint32(i))
for i in range(1, num_blocks - 1)
]
)
await time_out_assert(15, wallet.get_confirmed_balance, funds)
did_wallet: DIDWallet = await DIDWallet.create_new_did_wallet(
wallet_node.wallet_state_manager, wallet, uint64(101)
)
ph2 = await wallet2.get_new_puzzlehash()
for i in range(1, num_blocks):
await full_node_1.farm_new_transaction_block(FarmNewBlockProtocol(ph2))
await time_out_assert(15, did_wallet.get_confirmed_balance, 101)
await time_out_assert(15, did_wallet.get_unconfirmed_balance, 101)
await time_out_assert(15, did_wallet.get_spendable_balance, 101)
# Lock up with non DID innerpuz so that we can create two outputs
# Innerpuz will output the innersol, so we just pass in ((51 0xMyPuz 49) (51 0xMyPuz 51))
innerpuz = Program.to(binutils.assemble("1"))
innerpuzhash = innerpuz.get_tree_hash()
puz = did_wallet_puzzles.create_fullpuz(
innerpuzhash,
did_wallet.did_info.origin_coin.puzzle_hash,
)
# Add the hacked puzzle to the puzzle store so that it is recognised as "our" puzzle
old_devrec = await did_wallet.wallet_state_manager.get_unused_derivation_record(did_wallet.wallet_info.id)
devrec = DerivationRecord(
old_devrec.index,
puz.get_tree_hash(),
old_devrec.pubkey,
old_devrec.wallet_type,
old_devrec.wallet_id,
)
await did_wallet.wallet_state_manager.puzzle_store.add_derivation_paths([devrec])
await did_wallet.create_spend(puz.get_tree_hash())
for i in range(1, num_blocks):
await full_node_1.farm_new_transaction_block(FarmNewBlockProtocol(ph2))
await time_out_assert(15, did_wallet.get_confirmed_balance, 101)
await time_out_assert(15, did_wallet.get_unconfirmed_balance, 101)
await time_out_assert(15, did_wallet.get_spendable_balance, 101)
# Create spend by hand so that we can use the weird innersol
coins = await did_wallet.select_coins(1)
coin = coins.pop()
# innerpuz is our desired output
innersol = Program.to([[51, coin.puzzle_hash, 45], [51, coin.puzzle_hash, 56]])
# full solution is (corehash parent_info my_amount innerpuz_reveal solution)
parent_info = await did_wallet.get_parent_for_coin(coin)
fullsol = Program.to(
[
[did_wallet.did_info.origin_coin.parent_coin_info, did_wallet.did_info.origin_coin.amount],
[
parent_info.parent_name,
parent_info.inner_puzzle_hash,
parent_info.amount,
],
coin.amount,
innersol,
]
)
try:
cost, result = puz.run_with_cost(DEFAULT_CONSTANTS.MAX_BLOCK_COST_CLVM, fullsol)
except Exception as e:
assert e.args == ("path into atom",)
else:
assert False
@pytest.mark.asyncio
async def test_make_fake_coin(self, two_wallet_nodes):
num_blocks = 5
full_nodes, wallets = two_wallet_nodes
full_node_1 = full_nodes[0]
server_1 = full_node_1.server
wallet_node, server_2 = wallets[0]
wallet_node_2, server_3 = wallets[1]
await server_2.start_client(PeerInfo("localhost", uint16(server_1._port)), None)
wallet = wallet_node.wallet_state_manager.main_wallet
wallet2 = wallet_node_2.wallet_state_manager.main_wallet
ph = await wallet.get_new_puzzlehash()
await server_3.start_client(PeerInfo("localhost", uint16(server_1._port)), None)
for i in range(1, num_blocks):
await full_node_1.farm_new_transaction_block(FarmNewBlockProtocol(ph))
funds = sum(
[
calculate_pool_reward(uint32(i)) + calculate_base_farmer_reward(uint32(i))
for i in range(1, num_blocks - 1)
]
)
await time_out_assert(15, wallet.get_confirmed_balance, funds)
did_wallet: DIDWallet = await DIDWallet.create_new_did_wallet(
wallet_node.wallet_state_manager, wallet, uint64(101)
)
ph2 = await wallet2.get_new_puzzlehash()
for i in range(1, num_blocks):
await full_node_1.farm_new_transaction_block(FarmNewBlockProtocol(ph2))
await time_out_assert(15, did_wallet.get_confirmed_balance, 101)
await time_out_assert(15, did_wallet.get_unconfirmed_balance, 101)
await time_out_assert(15, did_wallet.get_spendable_balance, 101)
coins = await did_wallet.select_coins(1)
coin = coins.pop()
# copy info for later
parent_info = await did_wallet.get_parent_for_coin(coin)
id_puzhash = coin.puzzle_hash
await did_wallet.create_spend(ph)
for i in range(1, num_blocks):
await full_node_1.farm_new_transaction_block(FarmNewBlockProtocol(ph))
await time_out_assert(15, did_wallet.get_confirmed_balance, 0)
await time_out_assert(15, did_wallet.get_unconfirmed_balance, 0)
tx_record = await wallet.generate_signed_transaction(101, id_puzhash)
await wallet.push_transaction(tx_record)
for i in range(1, num_blocks):
await full_node_1.farm_new_transaction_block(FarmNewBlockProtocol(ph))
await time_out_assert(15, wallet.get_confirmed_balance, 21999999999899)
await time_out_assert(15, wallet.get_unconfirmed_balance, 21999999999899)
coins = await did_wallet.select_coins(1)
assert len(coins) >= 1
coin = coins.pop()
# Write spend by hand
# innerpuz solution is (mode amount new_puz identity my_puz)
innersol = Program.to([0, coin.amount, ph, coin.name(), coin.puzzle_hash])
# full solution is (corehash parent_info my_amount innerpuz_reveal solution)
innerpuz = did_wallet.did_info.current_inner
full_puzzle: Program = did_wallet_puzzles.create_fullpuz(
innerpuz,
did_wallet.did_info.origin_coin.puzzle_hash,
)
fullsol = Program.to(
[
[did_wallet.did_info.origin_coin.parent_coin_info, did_wallet.did_info.origin_coin.amount],
[
parent_info.parent_name,
parent_info.inner_puzzle_hash,
parent_info.amount,
],
coin.amount,
innersol,
]
)
list_of_solutions = [CoinSolution(coin, full_puzzle, fullsol)]
# sign for AGG_SIG_ME
message = coin.puzzle_hash + coin.name() + did_wallet.wallet_state_manager.constants.AGG_SIG_ME_ADDITIONAL_DATA
pubkey = did_wallet_puzzles.get_pubkey_from_innerpuz(innerpuz)
index = await did_wallet.wallet_state_manager.puzzle_store.index_for_pubkey(pubkey)
private = master_sk_to_wallet_sk(did_wallet.wallet_state_manager.private_key, index)
signature = AugSchemeMPL.sign(private, message)
sigs = [signature]
aggsig = AugSchemeMPL.aggregate(sigs)
spend_bundle = SpendBundle(list_of_solutions, aggsig)
did_record = TransactionRecord(
confirmed_at_height=uint32(0),
created_at_time=uint64(int(time.time())),
to_puzzle_hash=ph,
amount=uint64(coin.amount),
fee_amount=uint64(0),
confirmed=False,
sent=uint32(0),
spend_bundle=spend_bundle,
additions=spend_bundle.additions(),
removals=spend_bundle.removals(),
wallet_id=did_wallet.wallet_info.id,
sent_to=[],
trade_id=None,
type=uint32(TransactionType.OUTGOING_TX.value),
name=token_bytes(),
)
await did_wallet.standard_wallet.push_transaction(did_record)
await time_out_assert(15, wallet.get_confirmed_balance, 21999999999899)
await time_out_assert(15, wallet.get_unconfirmed_balance, 21999999999899)
ph2 = Program.to(binutils.assemble("()")).get_tree_hash()
for i in range(1, num_blocks + 3):
await full_node_1.farm_new_block(FarmNewBlockProtocol(ph2))
# It ends in 900 so it's not gone through
# Assert coin ID is failing
await time_out_assert(15, wallet.get_confirmed_balance, 23999999999899)
await time_out_assert(15, wallet.get_unconfirmed_balance, 23999999999899)
"""
| 43.306477 | 119 | 0.690293 |
acf6fc3a842376560909cd763895ddaa9e17319a | 1,007 | py | Python | loss/build_loss.py | KaenChan/face-uncertainty-pytorch | e0641092a4e09b16d6d008d9bbb93913ac6d72df | [
"MIT"
] | 3 | 2022-01-12T16:07:02.000Z | 2022-01-24T07:02:37.000Z | loss/build_loss.py | KaenChan/face-uncertainty-pytorch | e0641092a4e09b16d6d008d9bbb93913ac6d72df | [
"MIT"
] | null | null | null | loss/build_loss.py | KaenChan/face-uncertainty-pytorch | e0641092a4e09b16d6d008d9bbb93913ac6d72df | [
"MIT"
] | null | null | null | from loss.idq_loss import IDQ_Loss
from loss.pc_loss import PC_Loss
from loss.fast_mls_loss import Fast_MLS_Loss
from loss.mls_loss import MLS_Loss
def build_uncertainty_loss(config):
if 'uncertainty_loss_type' in dir(config) and config.uncertainty_loss_type == 'idq_loss':
criterion_uncertainty = IDQ_Loss(config.idq_s, config.idq_m)
elif 'uncertainty_loss_type' in dir(config) and config.uncertainty_loss_type == 'pc_loss':
criterion_uncertainty = PC_Loss()
elif 'uncertainty_loss_type' in dir(config) and config.uncertainty_loss_type == 'fast_mls_loss':
assert config.uncertainty_size == 1
criterion_uncertainty = Fast_MLS_Loss()
else:
criterion_uncertainty = MLS_Loss()
print('uncertainty_loss_type', config.uncertainty_loss_type)
if config.uncertainty_loss_type in ['pc_loss', 'loser_loss', 'idq_loss']:
is_confidence_prob = True
else:
is_confidence_prob = False
return criterion_uncertainty, is_confidence_prob
| 43.782609 | 100 | 0.75571 |
acf6fc496c12ac6e3452fc9a153bef33b8aa35b8 | 1,947 | py | Python | discrete/test_benchmarking.py | Daffan/APPLR | ceb394cd337b4b4ccb3915d05d1f42fd317c5c8e | [
"MIT"
] | 2 | 2020-11-14T22:08:08.000Z | 2021-04-24T19:45:40.000Z | discrete/test_benchmarking.py | Daffan/APPLR | ceb394cd337b4b4ccb3915d05d1f42fd317c5c8e | [
"MIT"
] | null | null | null | discrete/test_benchmarking.py | Daffan/APPLR | ceb394cd337b4b4ccb3915d05d1f42fd317c5c8e | [
"MIT"
] | 2 | 2020-10-16T20:58:13.000Z | 2020-11-14T22:08:13.000Z | from os.path import exists, join
import json
import os
import numpy as np
import torch
import time
import pickle
BASE_PATH = '/u/zifan/APPLR/buffer_test'
benchmarking_test = [0, 8, 17, 19, 27, 32, 41, 47, 48, 57, 64, 69, 76, 78, 88, 93, 100, 104, 112, 118, 123, 129, 133, 138, 144, 150, 159, 163, 168, 175, 184, 189, 193, 201, 208, 214, 218, 226, 229, 237, 240, 246, 256, 258, 265, 270, 277, 284, 290, 294]
def main():
def get_world_name(dirname):
idx = benchmarking_test[int(dirname.split('_')[-1])]
return 'world_' + str(idx)
result = {}
for dirname, dirnames, filenames in os.walk(BASE_PATH):
for d in dirnames:
if d.startswith('actor'):
idx = benchmarking_test[int(d.split('_')[-1])]
result['world_' + str(idx)] = {
'ep_return': [],
'ep_length': [],
'succeed': []
}
for dirname, dirnames, filenames in os.walk(BASE_PATH):
for filename in filenames:
p = join(dirname, filename)
if p.endswith('.pickle'):
with open(p, 'rb') as f:
traj = pickle.load(f)
world = get_world_name(dirname)
result[world]['ep_return'].append(sum([t[2] for t in traj]))
result[world]['ep_length'].append(len(traj))
result[world]['succeed'].append(int(traj[-1][-1]['succeed']))
print('>>>>>>>>>>>>>>>>>>>>>>>>>>> Report <<<<<<<<<<<<<<<<<<<<<<<<<<<<')
print('supported samples: %f per world' %(np.mean([len(result[k]['ep_return']) for k in result.keys()])))
for k2 in ['ep_return', 'ep_length', 'succeed']:
k1 = result.keys()
avg = np.mean([np.mean(result[k][k2]) for k in k1])
print('Avg %s: %f' %(k2, avg))
with open(join(BASE_PATH, 'report.json'), 'w') as fp:
json.dump(result, fp)
if __name__ == "__main__":
main()
| 36.735849 | 252 | 0.53056 |
acf6fde5d84c736326669d3d00a7f96af74a621a | 24,279 | py | Python | opflexagent/test/test_gbp_ovs_agent.py | elastx/python-opflex-agent | 955f5fa66ee52c1fc58aded06eef1fe735b86bc6 | [
"Apache-2.0"
] | 7 | 2015-09-04T06:18:11.000Z | 2017-07-12T07:35:35.000Z | opflexagent/test/test_gbp_ovs_agent.py | elastx/python-opflex-agent | 955f5fa66ee52c1fc58aded06eef1fe735b86bc6 | [
"Apache-2.0"
] | 86 | 2015-04-10T15:53:47.000Z | 2021-08-18T10:31:09.000Z | opflexagent/test/test_gbp_ovs_agent.py | elastx/python-opflex-agent | 955f5fa66ee52c1fc58aded06eef1fe735b86bc6 | [
"Apache-2.0"
] | 17 | 2015-04-10T15:41:45.000Z | 2021-08-30T10:23:34.000Z | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import shutil
import sys
from unittest import mock
sys.modules["apicapi"] = mock.Mock() # noqa
sys.modules["pyinotify"] = mock.Mock() # noqa
from opflexagent import gbp_agent
from opflexagent import snat_iptables_manager
from opflexagent.test import base
from opflexagent.utils.ep_managers import endpoint_file_manager
from neutron.agent.linux import ip_lib
from neutron.api.rpc.callbacks import events
from neutron.conf.agent import dhcp as dhcp_config
from neutron.objects import trunk as trunk_obj
from neutron.plugins.ml2.drivers.openvswitch.agent import (
ovs_neutron_agent as ovs)
from oslo_config import cfg
from oslo_utils import uuidutils
_uuid = uuidutils.generate_uuid
NOTIFIER = 'neutron.plugins.ml2.rpc.AgentNotifierApi'
EP_DIR = '.%s_endpoints/'
class TestGBPOpflexAgent(base.OpflexTestBase):
def setUp(self):
cfg.CONF.register_opts(dhcp_config.DHCP_OPTS)
super(TestGBPOpflexAgent, self).setUp()
mock.patch('neutron.agent.ovsdb.impl_idl.api_factory').start()
notifier_p = mock.patch(NOTIFIER)
notifier_cls = notifier_p.start()
self.notifier = mock.Mock()
notifier_cls.return_value = self.notifier
cfg.CONF.set_default('firewall_driver',
'neutron.agent.firewall.NoopFirewallDriver',
group='SECURITYGROUP')
cfg.CONF.set_default('quitting_rpc_timeout', 10, 'AGENT')
self.ep_dir = EP_DIR % _uuid()
self.agent = self._initialize_agent()
self._mock_agent(self.agent)
self.addCleanup(self._purge_endpoint_dir)
self.addCleanup(self.agent.bridge_manager.int_br.reset_mock)
self.addCleanup(self.agent.bridge_manager.fabric_br.reset_mock)
ip_lib.IPDevice = mock.Mock()
def _try_port_binding_args(self, net_type='net_type'):
port = mock.Mock()
port.trunk_details = None
port.vif_id = uuidutils.generate_uuid()
return {'port': port,
'net_uuid': 'net_id',
'network_type': net_type,
'physical_network': 'phys_net',
'fixed_ips': [{'subnet_id': 'id1',
'ip_address': '192.168.0.2'},
{'subnet_id': 'id2',
'ip_address': '192.168.1.2'}],
'device_owner': 'compute:',
'segmentation_id': ''}
def _purge_endpoint_dir(self):
try:
shutil.rmtree(self.ep_dir)
except OSError:
pass
def _initialize_agent(self):
cfg.CONF.set_override('epg_mapping_dir', self.ep_dir, 'OPFLEX')
kwargs = gbp_agent.create_agent_config_map(cfg.CONF)
class MockFixedIntervalLoopingCall(object):
def __init__(self, f):
self.f = f
def start(self, interval=0):
self.f()
resources = [
mock.patch('opflexagent.utils.bridge_managers.ovs_manager.'
'OvsManager.setup_integration_bridge',
return_value=mock.Mock()),
mock.patch('neutron.agent.common.ovs_lib.OVSBridge.'
'create'),
mock.patch('neutron.agent.common.ovs_lib.OVSBridge.'
'set_secure_mode'),
mock.patch('neutron.agent.common.ovs_lib.OVSBridge.'
'get_local_port_mac',
return_value='00:00:00:00:00:01'),
mock.patch('neutron.agent.common.ovs_lib.BaseOVS.get_bridges'),
mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall',
new=MockFixedIntervalLoopingCall),
mock.patch('opflexagent.gbp_agent.GBPOpflexAgent.'
'_report_state')]
with base.nested_context_manager(*resources):
agent = gbp_agent.GBPOpflexAgent(**kwargs)
# set back to true because initial report state will succeed due
# to mocked out RPC calls
agent.use_call = True
agent.tun_br = mock.Mock()
agent.host = 'host1'
agent.sg_agent = mock.Mock()
return agent
def _mock_agent(self, agent):
# Mock EP manager methods
agent.ep_manager._write_endpoint_file = mock.Mock(
return_value=agent.ep_manager.epg_mapping_file)
agent.ep_manager._write_vrf_file = mock.Mock()
agent.ep_manager._delete_endpoint_file = mock.Mock()
agent.ep_manager._delete_vrf_file = mock.Mock()
agent.ep_manager.snat_iptables = mock.Mock()
agent.ep_manager.snat_iptables.setup_snat_for_es = mock.Mock(
return_value=tuple([None, None]))
agent.ep_manager._release_int_fip = mock.Mock()
agent.opflex_networks = ['phys_net']
# Mock bridge
agent.bridge_manager.int_br = mock.Mock()
agent.bridge_manager.int_br.get_vif_port_set = mock.Mock(
return_value=set())
agent.bridge_manager.add_patch_ports = mock.Mock()
agent.bridge_manager.delete_patch_ports = mock.Mock()
agent.bridge_manager.fabric_br = mock.Mock()
agent.bridge_manager.trunk_rpc = mock.Mock()
agent.of_rpc.get_gbp_details = mock.Mock()
agent.port_manager.of_rpc.request_endpoint_details_list = mock.Mock()
agent.notify_worker.terminate()
def test_port_unbound_snat_cleanup(self):
self.agent.int_br = mock.Mock()
with mock.patch('neutron.agent.linux.ip_lib.IPDevice') as mock1:
instance = mock1.return_value
instance.link.address = 'foo-mac'
mapping = self._get_gbp_details()
self.agent.of_rpc.get_gbp_details.return_value = mapping
setup_snat = self.agent.ep_manager.snat_iptables.setup_snat_for_es
setup_snat.return_value = (
tuple(['foo-if', 'foo-mac']))
args_1 = self._try_port_binding_args('opflex')
args_1['port'].gbp_details = mapping
self.agent.try_port_binding(**args_1)
instance.link.address = 'aa:bb:cc:00:11:44'
args_2 = self._try_port_binding_args('opflex')
args_2['port'].gbp_details = mapping
setup_snat.return_value = (
tuple(['foo-if', 'aa:bb:cc:00:11:44']))
self.agent.try_port_binding(**args_2)
self.assertEqual(
1,
self.agent.ep_manager.snat_iptables
.setup_snat_for_es.call_count)
self.agent.port_unbound(args_1['port'].vif_id)
self.assertFalse(
self.agent.ep_manager.snat_iptables.cleanup_snat_for_es.called)
self.agent.port_unbound(args_2['port'].vif_id)
(self.agent.ep_manager.
snat_iptables.cleanup_snat_for_es.assert_called_with('EXT-1'))
self.agent.ep_manager._delete_endpoint_file.assert_called_with(
'EXT-1')
def test_try_port_binding_no_mapping(self):
self.agent.int_br = mock.Mock()
args = self._try_port_binding_args('opflex')
args['port'].gbp_details = None
self.agent.try_port_binding(**args)
self.assertFalse(self.agent.int_br.set_db_attribute.called)
self.assertFalse(self.agent.ep_manager._write_endpoint_file.called)
def test_subnet_update(self):
fake_sub = {'tenant_id': 'tenant-id', 'id': 'someid'}
self.agent.subnet_update(mock.Mock(), fake_sub)
self.assertEqual(set(['tenant-id']), self.agent.updated_vrf)
def test_subnet_has_updates(self):
fake_sub = {'tenant_id': 'tenant-id', 'id': 'someid'}
polling_manager = mock.Mock()
polling_manager.is_polling_required = False
self.agent.sg_agent.firewall_refresh_needed = mock.Mock(
return_value=False)
self.assertFalse(self.agent._agent_has_updates(polling_manager))
self.agent.subnet_update(mock.Mock(), fake_sub)
self.assertTrue(self.agent._agent_has_updates(polling_manager))
def test_process_network_ports(self):
self.agent.bridge_manager.add_patch_ports = mock.Mock()
fake_sub = {'tenant_id': 'tenant-id', 'id': 'someid'}
mapping = self._get_gbp_details(l3_policy_id='tenant-id')
self.agent.of_rpc.get_vrf_details_list = mock.Mock(
return_value=[{'l3_policy_id': 'tenant-id',
'vrf_tenant': mapping['vrf_tenant'],
'vrf_name': mapping['vrf_name'],
'vrf_subnets': mapping['vrf_subnets'] +
['1.1.1.0/24']}])
args = self._try_port_binding_args('opflex')
args['port'].gbp_details = mapping
self.agent.try_port_binding(**args)
self.agent.bridge_manager.add_patch_ports.assert_called_once_with(
[args['port'].vif_id])
self.agent.ep_manager._write_vrf_file.reset_mock()
self.agent.subnet_update(mock.Mock(), fake_sub)
port_info = self.agent.bridge_manager.scan_ports(set())
port_info['vrf_updated'] = self.agent.updated_vrf
port_info['added'] = set(['1', '2'])
self.agent.process_network_ports(port_info, False)
self.agent.of_rpc.get_vrf_details_list.assert_called_once_with(
mock.ANY, mock.ANY, set(['tenant-id']), mock.ANY)
self.agent.ep_manager._write_vrf_file.assert_called_once_with(
'tenant-id', {
"domain-policy-space": mapping['vrf_tenant'],
"domain-name": mapping['vrf_name'],
"internal-subnets": sorted(['192.168.0.0/16',
'192.169.0.0/16',
'1.1.1.0/24',
'169.254.0.0/16'])})
def test_stale_endpoints_in_process_network_ports(self):
self.agent.ep_manager.undeclare_endpoint = mock.Mock()
self.agent.plugin_rpc.update_device_down = mock.Mock()
self.agent.ep_manager._stale_endpoints.add('EXT-2.ep')
self.agent.process_network_ports({}, False)
self.agent.ep_manager.undeclare_endpoint.assert_called_once_with(
'EXT-2.ep')
def test_dead_port(self):
port = mock.Mock(ofport=1)
self.agent.bridge_manager.int_br.get_vif_port_by_id = mock.Mock(
return_value=port)
with mock.patch.object(gbp_agent.ep_manager.EndpointFileManager,
'undeclare_endpoint'):
self.agent.treat_devices_added_or_updated(
{'device': 'some_device'})
self.agent.ep_manager.undeclare_endpoint.assert_called_once_with(
port.vif_id)
def test_missing_port(self):
self.agent.bridge_manager.int_br.get_vif_port_by_id = mock.Mock(
return_value=None)
with mock.patch.object(gbp_agent.ep_manager.EndpointFileManager,
'undeclare_endpoint'):
self.agent.treat_devices_added_or_updated(
{'device': 'some_device'})
self.agent.ep_manager.undeclare_endpoint.assert_called_once_with(
'some_device')
def test_admin_disabled_port(self):
# Set port's admin_state_up to False => mapping file should be removed
mapping = self._get_gbp_details(device='some_device')
port_details = {'device': 'some_device',
'admin_state_up': False,
'port_id': mapping['port_id'],
'network_id': 'some-net',
'network_type': 'opflex',
'physical_network': 'phys_net',
'segmentation_id': '',
'fixed_ips': [],
'device_owner': 'some-vm'}
self.agent.plugin_rpc.update_device_up = mock.Mock()
self.agent.plugin_rpc.update_device_down = mock.Mock()
port = mock.Mock(ofport=1, vif_id=mapping['port_id'])
self.agent.bridge_manager.int_br.get_vif_port_by_id = mock.Mock(
return_value=port)
self.agent.ep_manager._mapping_cleanup = mock.Mock()
self.agent.ep_manager._mapping_to_file = mock.Mock()
self.agent.treat_devices_added_or_updated(
{'device': 'some_device', 'neutron_details': port_details,
'gbp_details': mapping, 'port_id': 'port_id'})
self.agent.ep_manager._mapping_cleanup.assert_called_once_with(
mapping['port_id'])
port_details['admin_state_up'] = True
self.agent.treat_devices_added_or_updated(
{'device': 'some_device', 'neutron_details': port_details,
'gbp_details': mapping, 'port_id': 'port_id'})
self.assertTrue(self.agent.ep_manager._mapping_to_file.called)
def test_stale_endpoints(self):
self.agent.ep_manager._write_file(
'uuid1_AA', {}, self.agent.ep_manager.epg_mapping_file)
self.agent.ep_manager._write_file(
'uuid1_BB', {}, self.agent.ep_manager.epg_mapping_file)
self.agent.ep_manager._write_file(
'uuid1_CC', {}, self.agent.ep_manager.epg_mapping_file)
self.agent.ep_manager._write_file(
'uuid2_BB', {}, self.agent.ep_manager.epg_mapping_file)
self.agent.ep_manager._write_file(
'uuid2_BB', {}, self.agent.ep_manager.epg_mapping_file)
resources = [
mock.patch.object(
snat_iptables_manager.SnatIptablesManager,
'cleanup_snat_all'),
mock.patch.object(
snat_iptables_manager.SnatIptablesManager,
'check_if_exists', return_value=False),
mock.patch.object(
endpoint_file_manager.EndpointFileManager,
'undeclare_endpoint'),
mock.patch.object(ovs.OVSPluginApi, 'update_device_down')]
with base.nested_context_manager(*resources):
port_stats = {'regular': {'added': 0,
'updated': 0,
'removed': 0},
'ancillary': {'added': 0,
'removed': 0}}
agent = self._initialize_agent()
self._mock_agent(agent)
agent.bridge_manager.int_br.get_vif_port_set = mock.Mock(
return_value=set(['uuid1']))
agent._main_loop(set(), True, 1, port_stats, mock.Mock(), True)
agent.ep_manager.undeclare_endpoint.assert_called_once_with(
'uuid2')
def test_process_deleted_ports(self):
self.agent.bridge_manager.delete_patch_ports = mock.Mock()
resources = [
mock.patch.object(
snat_iptables_manager.SnatIptablesManager,
'cleanup_snat_all'),
mock.patch.object(
endpoint_file_manager.EndpointFileManager,
'undeclare_endpoint'),
mock.patch.object(ovs.OVSPluginApi, 'update_device_down')]
with base.nested_context_manager(*resources):
agent = self._initialize_agent()
self._mock_agent(agent)
port_info = {'current': set(['1', '2']),
'removed': set(['3', '5'])}
agent.bridge_manager.scan_ports = mock.Mock(return_value=port_info)
agent.bridge_manager.delete_patch_ports = mock.Mock()
agent.deleted_ports.add('3')
agent.deleted_ports.add('4')
port_stats = {'regular': {'added': 0,
'updated': 0,
'removed': 0},
'ancillary': {'added': 0,
'removed': 0}}
agent._main_loop(set(), True, 1, port_stats, mock.Mock(), True)
# 3, 4 and 5 are undeclared once
expected = [mock.call('3'), mock.call('4'), mock.call('5')]
self._check_call_list(
expected,
agent.ep_manager.undeclare_endpoint.call_args_list)
expected = [mock.call(['3']), mock.call(['4']), mock.call(['5'])]
self._check_call_list(
expected,
agent.bridge_manager.delete_patch_ports.call_args_list)
def test_process_vrf_update(self):
self.agent.ep_manager._delete_vrf_file = mock.Mock()
self.agent.of_rpc.get_vrf_details_list = mock.Mock(
return_value=[{'l3_policy_id': 'tenant-id',
'vrf_tenant': 'tn-tenant',
'vrf_name': 'ctx'}])
self.agent.process_vrf_update(set(['tenant_id']))
# not called because VRF is not owned
self.assertFalse(self.agent.ep_manager._delete_vrf_file.called)
# now create a port for this vrf
mapping = self._get_gbp_details(l3_policy_id='tenant-id')
self.agent.of_rpc.get_gbp_details.return_value = mapping
args = self._try_port_binding_args('opflex')
args['port'].gbp_details = mapping
self.agent.ep_manager._write_vrf_file = mock.Mock()
self.agent.try_port_binding(**args)
self.agent.ep_manager._write_vrf_file.assert_called_once_with(
'tenant-id', {
"domain-policy-space": mapping['vrf_tenant'],
"domain-name": mapping['vrf_name'],
"internal-subnets": sorted(['192.168.0.0/16',
'192.169.0.0/16',
'169.254.0.0/16'])})
self.assertFalse(self.agent.ep_manager._delete_vrf_file.called)
# Now simulate a deletion
self.agent.process_vrf_update(set(['tenant_id']))
self.agent.ep_manager._delete_vrf_file.assert_called_once_with(
'tenant-id')
def test_apply_config_interval(self):
self.assertEqual(0.5, self.agent.config_apply_interval)
def test_trunk_handler(self):
port = mock.Mock()
port.vif_id = uuidutils.generate_uuid()
trunk_id = uuidutils.generate_uuid()
subports = [
trunk_obj.SubPort(
port_id=uuidutils.generate_uuid(), trunk_id=trunk_id,
segmentation_type='foo', segmentation_id=i)
for i in range(2)]
trunk_details = {}
trunk_details['trunk_id'] = trunk_id
trunk_details['master_port_id'] = port.vif_id
trunk_details['subports'] = subports
port.trunk_details = trunk_details
self.agent.bridge_manager.handle_subports(
None, None, subports, events.CREATED)
self.agent.bridge_manager.handle_subports(
None, None, subports, events.DELETED)
self.assertFalse(self.agent.bridge_manager.add_patch_ports.called)
self.assertFalse(self.agent.bridge_manager.delete_patch_ports.called)
def binding_call(context, subports):
return {trunk_id: [{'id': x.port_id, 'mac_address': '%s' % i}
for i, x in enumerate(subports)]}
self.agent.bridge_manager.trunk_rpc.update_subport_bindings = (
binding_call)
self.agent.bridge_manager.manage_trunk(port)
self.agent.bridge_manager.unmanage_trunk(port.vif_id)
self.agent.bridge_manager.add_patch_ports.assert_called_with(
[subports[0].port_id, subports[1].port_id],
attached_macs={subports[0].port_id: '0', subports[1].port_id: '1'})
call_args = self.agent.bridge_manager.delete_patch_ports.call_args
self.assertEqual(set(call_args[0][0]),
set([subports[0].port_id, subports[1].port_id]))
def _test_port_bound_to_host(self, net_type, svi=False):
if net_type is 'vlan':
vlan_info = {}
vlan_info['device'] = 'some_device'
if svi:
vlan_info['svi'] = True
vlan_info['endpoint_group_name'] = 'svi-net-id'
mapping = self._get_gbp_details(**vlan_info)
else:
mapping = self._get_gbp_details(device='some_device')
seg_id = 1234 if net_type is 'vlan' else ''
port_details = {'device': 'some_device',
'admin_state_up': True,
'port_id': mapping['port_id'],
'network_id': 'some-net',
'network_type': net_type,
'physical_network': 'phys_net',
'segmentation_id': seg_id,
'fixed_ips': [],
'device_owner': 'some-vm'}
self.agent.plugin_rpc.update_device_up = mock.Mock()
self.agent.plugin_rpc.update_device_down = mock.Mock()
port = mock.Mock(ofport=1, vif_id=mapping['port_id'])
self.agent.bridge_manager.int_br.get_vif_port_by_id = mock.Mock(
return_value=port)
self.agent.ep_manager._mapping_cleanup = mock.Mock()
self.agent.ep_manager._mapping_to_file = mock.Mock()
# first test is with no binding attribute. This is what
# happens when port binding is first attempted, in order to
# bind the port to a host.
mapping['host'] = ''
self.agent.treat_devices_added_or_updated(
{'device': 'some_device', 'neutron_details': port_details,
'gbp_details': mapping, 'port_id': 'port_id'})
epargs = self.agent.ep_manager._mapping_to_file.call_args_list
self.assertEqual(port_details['network_type'],
epargs[0][0][0].network_type)
if net_type is 'vlan':
self.assertEqual(port_details['segmentation_id'],
epargs[0][0][0].segmentation_id)
if svi:
self.assertEqual(True, epargs[0][0][1].get('svi'))
self.assertEqual(vlan_info['endpoint_group_name'],
epargs[0][0][1].get('endpoint_group_name'))
else:
self.assertIsNone(epargs[0][0][1].get('svi'))
self.assertEqual(mapping['endpoint_group_name'],
epargs[0][0][1].get('endpoint_group_name'))
else:
self.assertIsNone(epargs[0][0][1].get('svi'))
self.assertEqual(mapping['endpoint_group_name'],
epargs[0][0][1].get('endpoint_group_name'))
self.assertEqual('', epargs[0][0][0].segmentation_id)
self.agent.ep_manager._mapping_cleanup.assert_called_once_with(
port_details['port_id'], cleanup_vrf=False,
mac_exceptions=set([mapping['mac_address']]))
self.agent.ep_manager._mapping_cleanup.reset_mock()
self.agent.ep_manager._mapping_to_file.reset_mock()
# Now try binding with a different host
mapping['host'] = 'host2'
self.agent.treat_devices_added_or_updated(
{'device': 'some_device', 'neutron_details': port_details,
'gbp_details': mapping, 'port_id': 'port_id'})
self.assertFalse(self.agent.ep_manager._mapping_to_file.called)
self.agent.ep_manager._mapping_cleanup.assert_called_once_with(
port_details['device'])
def test_port_bound_to_host_net_opflex(self):
self._test_port_bound_to_host('opflex')
def test_port_bound_to_host_net_vlan(self):
self._test_port_bound_to_host('vlan')
def test_port_bound_to_host_net_vlan_svi(self):
self._test_port_bound_to_host('vlan', svi=True)
def test_vrf_update(self):
fake_vrf = 'coke-tenant coke-vrf'
self.agent.opflex_notify_vrf(mock.Mock(), fake_vrf)
self.assertEqual(set(['coke-tenant coke-vrf']), self.agent.updated_vrf)
| 45.381308 | 79 | 0.608921 |
acf6fe641d0242f135209b313b6a5e67ebb2e40c | 1,910 | py | Python | regression/data/subreducedgaussian.py | b8raoult/magics | eb2c86ec6e392e89c90044128dc671f22283d6ad | [
"ECL-2.0",
"Apache-2.0"
] | 41 | 2018-12-07T23:10:50.000Z | 2022-02-19T03:01:49.000Z | regression/data/subreducedgaussian.py | b8raoult/magics | eb2c86ec6e392e89c90044128dc671f22283d6ad | [
"ECL-2.0",
"Apache-2.0"
] | 59 | 2019-01-04T15:43:30.000Z | 2022-03-31T09:48:15.000Z | regression/data/subreducedgaussian.py | b8raoult/magics | eb2c86ec6e392e89c90044128dc671f22283d6ad | [
"ECL-2.0",
"Apache-2.0"
] | 13 | 2019-01-07T14:36:33.000Z | 2021-09-06T14:48:36.000Z | # (C) Copyright 1996-2016 ECMWF.
#
# This software is licensed under the terms of the Apache Licence Version 2.0
# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0.
# In applying this licence, ECMWF does not waive the privileges and immunities
# granted to it by virtue of its status as an intergovernmental organisation nor
# does it submit to any jurisdiction.
# importing Magics module
from Magics.macro import *
ref = 'subreducedgaussian'
# Setting of the output file name
output = output(output_formats=['png'],
output_name_first_page_number='off',
output_name=ref)
# Setting the coordinates of the geographical area
projection = mmap(subpage_map_projection='cylindrical',
subpage_lower_left_latitude= 0.,
subpage_lower_left_longitude= -70.,
subpage_upper_right_longitude= 70.,
subpage_upper_right_latitude= 90.,
page_id_line = 'off'
)
# Coastlines setting
coast = mcoast(map_grid='on', map_grid_colour='tan',
map_coastline_land_shade='on',
map_coastline_land_shade_colour='cream',
map_coastline_colour='tan')
# Import the z500 data
data = mgrib(grib_input_file_name='subreducedgaussian.grib')
# Define the simple contouring for z500
contour = mcont(
legend='off',
contour_line_colour='navy',
contour_line_thickness=2,
contour_label='on',
contour_highlight_colour='navy',
contour_highlight_thickness=6,
)
title = \
mtext(text_lines=["<font size='1'>Reduced Gaussain Grid...</font>"
,
"<font colour='evergreen'>only a subarae</font> "
,
], text_justification='left', text_font_size=0.8,
text_colour='charcoal')
# To the plot
plot(
output,
projection,
coast,
data,
contour,
mtext(),
)
| 26.527778 | 80 | 0.658115 |
acf6fe8db57cf41d83f1c3442f7fec152577905e | 138 | py | Python | app_rational/apps.py | bogdandrienko/kostanay-minerals | d266b3899f8403b5182e1dadf74b1f8bb580d17c | [
"MIT"
] | 1 | 2021-02-13T08:40:51.000Z | 2021-02-13T08:40:51.000Z | app_rational/apps.py | bogdandrienko/chrysotile-minerals | 47a4097e29ee40f2606807e28b2da466dfd7f3f4 | [
"MIT"
] | null | null | null | app_rational/apps.py | bogdandrienko/chrysotile-minerals | 47a4097e29ee40f2606807e28b2da466dfd7f3f4 | [
"MIT"
] | null | null | null | from django.apps import AppConfig
class AppRationalConfig(AppConfig):
name = 'app_rational'
verbose_name = 'Рационализаторство'
| 19.714286 | 39 | 0.768116 |
acf6feb42af97b53f876bbda264c6c2320e0e30c | 3,535 | py | Python | scripts/preprocessing/run_preprocessing.py | leonl42/MLinPractice | a101531e2861f6f065fa30e25c715791eb119385 | [
"MIT"
] | null | null | null | scripts/preprocessing/run_preprocessing.py | leonl42/MLinPractice | a101531e2861f6f065fa30e25c715791eb119385 | [
"MIT"
] | null | null | null | scripts/preprocessing/run_preprocessing.py | leonl42/MLinPractice | a101531e2861f6f065fa30e25c715791eb119385 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Runs the specified collection of preprocessing steps
Created on Tue Sep 28 16:43:18 2021
@author: lbechberger
"""
import argparse, csv, pickle
import pandas as pd
from sklearn.pipeline import make_pipeline
from scripts.preprocessing.punctuation_remover import PunctuationRemover
from scripts.preprocessing.tokenizer import Tokenizer
from scripts.preprocessing.stopwords import StopWords
from scripts.preprocessing.hashtag_remover import HashtagRemover
from scripts.preprocessing.lower import Lower
from scripts.preprocessing.abbrevations import Abbrevations
from scripts.preprocessing.timedeltas import Timedeltas
from scripts.preprocessing.lemmatization import Lemmatization
from scripts.preprocessing.post import Post
from scripts.util import COLUMN_TWEET, SUFFIX_TOKENIZED, PANDAS_DTYPE
# setting up CLI
parser = argparse.ArgumentParser(description = "Various preprocessing steps")
parser.add_argument("input_file", help = "path to the input csv file")
parser.add_argument("output_file", help = "path to the output csv file")
parser.add_argument("-p", "--punctuation", action = "store_true", help = "remove punctuation")
parser.add_argument("-t", "--tokenize", action = "store_true", help = "tokenize given column into individual words")
parser.add_argument("--tokenize_input", help = "input column to tokenize", default = COLUMN_TWEET)
parser.add_argument("-tdeltas", "--timedeltas", action = "store_true", help = "create timedeltas for tweet creation datetime")
parser.add_argument("-l", "--lower", action = "store_true", help = "make every letter in the tweet lowercase")
parser.add_argument("-ab", "--abbrevations", action = "store_true", help = "replace abbrevations with their long form")
parser.add_argument("-sw", "--stopwords", action = "store_true", help = "remove stopwords from the tweet")
parser.add_argument("-hr","--hashtag_removal", action = "store_true", help = "remove hashtags from the tweet")
parser.add_argument("-post", "--post", action = "store_true", help = "part of speech tag the tweet")
parser.add_argument("-lemma", "--lemmatization", action = "store_true", help = "lemmatize the tweet")
parser.add_argument("-e", "--export_file", help = "create a pipeline and export to the given location", default = None)
args = parser.parse_args()
# load data
df = pd.read_csv(args.input_file, quoting = csv.QUOTE_NONNUMERIC, lineterminator = "\n", dtype = PANDAS_DTYPE)
# collect all preprocessors
preprocessors = []
if args.hashtag_removal:
preprocessors.append(HashtagRemover())
if args.punctuation:
preprocessors.append(PunctuationRemover())
if args.lower:
preprocessors.append(Lower())
if args.abbrevations:
preprocessors.append(Abbrevations())
if args.tokenize:
preprocessors.append(Tokenizer(args.tokenize_input, args.tokenize_input + SUFFIX_TOKENIZED))
if args.timedeltas:
preprocessors.append(Timedeltas())
if args.stopwords:
preprocessors.append(StopWords())
if args.post:
preprocessors.append(Post())
if args.lemmatization:
preprocessors.append(Lemmatization())
# call all preprocessing steps
for preprocessor in preprocessors:
df = preprocessor.fit_transform(df)
# store the results
df.to_csv(args.output_file, index = False, quoting = csv.QUOTE_NONNUMERIC, line_terminator = "\n")
# create a pipeline if necessary and store it as pickle file
if args.export_file is not None:
pipeline = make_pipeline(*preprocessors)
with open(args.export_file, 'wb') as f_out:
pickle.dump(pipeline, f_out) | 45.909091 | 126 | 0.768317 |
acf7004f94ba90a9c7941c1fc8ba98b72f995f3c | 1,196 | py | Python | argostrain/data.py | argosopentech/onmt-models | e706c1ef72e6c46d321b2bac3328c13ef4bcd39e | [
"MIT"
] | 32 | 2020-08-29T20:52:23.000Z | 2021-10-20T08:08:50.000Z | argostrain/data.py | ZendaiOwl/argos-train | e706c1ef72e6c46d321b2bac3328c13ef4bcd39e | [
"MIT"
] | 7 | 2020-12-21T19:49:22.000Z | 2021-10-20T12:17:57.000Z | argostrain/data.py | ZendaiOwl/argos-train | e706c1ef72e6c46d321b2bac3328c13ef4bcd39e | [
"MIT"
] | 3 | 2021-03-15T15:34:33.000Z | 2021-08-05T10:56:36.000Z | import sys
import os
import argparse
import random
from functools import partial
import argostrain
from argostrain.dataset import *
def prepare_data(source_data, target_data):
# Build dataset
dataset = FileDataset(open(source_data), open(target_data))
print("Read data from file")
# Split and write data
source_data, target_data = dataset.data()
source_data = list(source_data)
target_data = list(target_data)
VALID_SIZE = 2000
assert(len(source_data) > VALID_SIZE)
os.mkdir('run/split_data')
source_valid_file = open('run/split_data/src-val.txt', 'w')
source_valid_file.writelines(source_data[0:VALID_SIZE])
source_valid_file.close()
source_train_file = open('run/split_data/src-train.txt', 'w')
source_train_file.writelines(source_data[VALID_SIZE:])
source_train_file.close()
target_valid_file = open('run/split_data/tgt-val.txt', 'w')
target_valid_file.writelines(target_data[0:VALID_SIZE])
target_valid_file.close()
target_train_file = open('run/split_data/tgt-train.txt', 'w')
target_train_file.writelines(target_data[VALID_SIZE:])
target_train_file.close()
print('Done splitting data')
| 27.181818 | 65 | 0.735786 |
acf70175fbe5afbf08862e495c85daaf892947e1 | 8,722 | py | Python | automox_console_sdk/models/pre_patch_prepatch_devices.py | ax-ncolyer/automox-console-sdk-python | 27ba2279e2d59e3f0cbfc00e34eddb51838e402e | [
"MIT"
] | null | null | null | automox_console_sdk/models/pre_patch_prepatch_devices.py | ax-ncolyer/automox-console-sdk-python | 27ba2279e2d59e3f0cbfc00e34eddb51838e402e | [
"MIT"
] | null | null | null | automox_console_sdk/models/pre_patch_prepatch_devices.py | ax-ncolyer/automox-console-sdk-python | 27ba2279e2d59e3f0cbfc00e34eddb51838e402e | [
"MIT"
] | null | null | null | # coding: utf-8
"""
Automox Console API
API for use with the Automox Console # noqa: E501
OpenAPI spec version: 2021-09-01
Contact: support@automox.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class PrePatchPrepatchDevices(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'id': 'int',
'name': 'str',
'create_time': 'datetime',
'group': 'str',
'connected': 'bool',
'needs_reboot': 'bool',
'os_family': 'str',
'compliant': 'bool',
'patches': 'Patches'
}
attribute_map = {
'id': 'id',
'name': 'name',
'create_time': 'createTime',
'group': 'group',
'connected': 'connected',
'needs_reboot': 'needsReboot',
'os_family': 'os_family',
'compliant': 'compliant',
'patches': 'patches'
}
def __init__(self, id=None, name=None, create_time=None, group=None, connected=None, needs_reboot=None, os_family=None, compliant=None, patches=None): # noqa: E501
"""PrePatchPrepatchDevices - a model defined in Swagger""" # noqa: E501
self._id = None
self._name = None
self._create_time = None
self._group = None
self._connected = None
self._needs_reboot = None
self._os_family = None
self._compliant = None
self._patches = None
self.discriminator = None
if id is not None:
self.id = id
if name is not None:
self.name = name
if create_time is not None:
self.create_time = create_time
if group is not None:
self.group = group
if connected is not None:
self.connected = connected
if needs_reboot is not None:
self.needs_reboot = needs_reboot
if os_family is not None:
self.os_family = os_family
if compliant is not None:
self.compliant = compliant
if patches is not None:
self.patches = patches
@property
def id(self):
"""Gets the id of this PrePatchPrepatchDevices. # noqa: E501
:return: The id of this PrePatchPrepatchDevices. # noqa: E501
:rtype: int
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this PrePatchPrepatchDevices.
:param id: The id of this PrePatchPrepatchDevices. # noqa: E501
:type: int
"""
self._id = id
@property
def name(self):
"""Gets the name of this PrePatchPrepatchDevices. # noqa: E501
:return: The name of this PrePatchPrepatchDevices. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this PrePatchPrepatchDevices.
:param name: The name of this PrePatchPrepatchDevices. # noqa: E501
:type: str
"""
self._name = name
@property
def create_time(self):
"""Gets the create_time of this PrePatchPrepatchDevices. # noqa: E501
:return: The create_time of this PrePatchPrepatchDevices. # noqa: E501
:rtype: datetime
"""
return self._create_time
@create_time.setter
def create_time(self, create_time):
"""Sets the create_time of this PrePatchPrepatchDevices.
:param create_time: The create_time of this PrePatchPrepatchDevices. # noqa: E501
:type: datetime
"""
self._create_time = create_time
@property
def group(self):
"""Gets the group of this PrePatchPrepatchDevices. # noqa: E501
:return: The group of this PrePatchPrepatchDevices. # noqa: E501
:rtype: str
"""
return self._group
@group.setter
def group(self, group):
"""Sets the group of this PrePatchPrepatchDevices.
:param group: The group of this PrePatchPrepatchDevices. # noqa: E501
:type: str
"""
self._group = group
@property
def connected(self):
"""Gets the connected of this PrePatchPrepatchDevices. # noqa: E501
:return: The connected of this PrePatchPrepatchDevices. # noqa: E501
:rtype: bool
"""
return self._connected
@connected.setter
def connected(self, connected):
"""Sets the connected of this PrePatchPrepatchDevices.
:param connected: The connected of this PrePatchPrepatchDevices. # noqa: E501
:type: bool
"""
self._connected = connected
@property
def needs_reboot(self):
"""Gets the needs_reboot of this PrePatchPrepatchDevices. # noqa: E501
:return: The needs_reboot of this PrePatchPrepatchDevices. # noqa: E501
:rtype: bool
"""
return self._needs_reboot
@needs_reboot.setter
def needs_reboot(self, needs_reboot):
"""Sets the needs_reboot of this PrePatchPrepatchDevices.
:param needs_reboot: The needs_reboot of this PrePatchPrepatchDevices. # noqa: E501
:type: bool
"""
self._needs_reboot = needs_reboot
@property
def os_family(self):
"""Gets the os_family of this PrePatchPrepatchDevices. # noqa: E501
:return: The os_family of this PrePatchPrepatchDevices. # noqa: E501
:rtype: str
"""
return self._os_family
@os_family.setter
def os_family(self, os_family):
"""Sets the os_family of this PrePatchPrepatchDevices.
:param os_family: The os_family of this PrePatchPrepatchDevices. # noqa: E501
:type: str
"""
self._os_family = os_family
@property
def compliant(self):
"""Gets the compliant of this PrePatchPrepatchDevices. # noqa: E501
:return: The compliant of this PrePatchPrepatchDevices. # noqa: E501
:rtype: bool
"""
return self._compliant
@compliant.setter
def compliant(self, compliant):
"""Sets the compliant of this PrePatchPrepatchDevices.
:param compliant: The compliant of this PrePatchPrepatchDevices. # noqa: E501
:type: bool
"""
self._compliant = compliant
@property
def patches(self):
"""Gets the patches of this PrePatchPrepatchDevices. # noqa: E501
:return: The patches of this PrePatchPrepatchDevices. # noqa: E501
:rtype: Patches
"""
return self._patches
@patches.setter
def patches(self, patches):
"""Sets the patches of this PrePatchPrepatchDevices.
:param patches: The patches of this PrePatchPrepatchDevices. # noqa: E501
:type: Patches
"""
self._patches = patches
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(PrePatchPrepatchDevices, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, PrePatchPrepatchDevices):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 27.341693 | 168 | 0.586792 |
acf7019a3ad49449bc38915d01fd7bf3515871dd | 3,445 | py | Python | research/object_detection/xml_to_csv.py | AlvaroCavalcante/models | 5312d38ecadeb4b5647e7a80e35cf19c515925f4 | [
"Apache-2.0"
] | 2 | 2020-07-24T01:53:09.000Z | 2020-10-26T07:45:50.000Z | research/object_detection/xml_to_csv.py | AlvaroCavalcante/models | 5312d38ecadeb4b5647e7a80e35cf19c515925f4 | [
"Apache-2.0"
] | 1 | 2019-12-04T14:51:17.000Z | 2019-12-04T14:51:17.000Z | research/object_detection/xml_to_csv.py | AlvaroCavalcante/models | 5312d38ecadeb4b5647e7a80e35cf19c515925f4 | [
"Apache-2.0"
] | 8 | 2020-09-17T13:09:11.000Z | 2021-12-17T09:58:43.000Z | """
Usage:
# Create train data:
python xml_to_csv.py -i [PATH_TO_IMAGES_FOLDER]/train -o [PATH_TO_ANNOTATIONS_FOLDER]/train_labels.csv
# Create test data:
python xml_to_csv.py -i [PATH_TO_IMAGES_FOLDER]/test -o [PATH_TO_ANNOTATIONS_FOLDER]/test_labels.csv
"""
import os
import glob
import pandas as pd
import argparse
import xml.etree.ElementTree as ET
def xml_to_csv(path):
"""Iterates through all .xml files (generated by labelImg) in a given directory and combines them in a single Pandas datagrame.
Parameters:
----------
path : {str}
The path containing the .xml files
Returns
-------
Pandas DataFrame
The produced dataframe
"""
classes_names = []
xml_list = []
for xml_file in glob.glob(path + "/*.xml"):
tree = ET.parse(xml_file)
root = tree.getroot()
for member in root.findall("object"):
classes_names.append(member[0].text)
value = (
root.find("filename").text,
int(root.find("size")[0].text),
int(root.find("size")[1].text),
member[0].text,
int(float(member[4][0].text)),
int(float(member[4][1].text)),
int(float(member[4][2].text)),
int(float(member[4][3].text)),
)
xml_list.append(value)
column_name = [
"filename",
"width",
"height",
"class",
"xmin",
"ymin",
"xmax",
"ymax",
]
xml_df = pd.DataFrame(xml_list, columns=column_name)
classes_names = list(set(classes_names))
classes_names.sort()
return xml_df, classes_names
def main():
# Initiate argument parser
parser = argparse.ArgumentParser(
description="Sample TensorFlow XML-to-CSV converter"
)
parser.add_argument(
"-i",
"--inputDir",
help="Path to the folder where the input .xml files are stored",
type=str,
)
parser.add_argument(
"-o", "--outputFile", help="Name of output .csv file (including path)", type=str
)
parser.add_argument(
"-l",
"--labelMapDir",
help="Directory path to save label_map.pbtxt file is specified.",
type=str,
default="",
)
args = parser.parse_args()
if args.inputDir is None:
args.inputDir = os.getcwd()
if args.outputFile is None:
args.outputFile = args.inputDir + "/labels.csv"
assert os.path.isdir(args.inputDir)
os.makedirs(os.path.dirname(args.outputFile), exist_ok=True)
xml_df, classes_names = xml_to_csv(args.inputDir)
xml_df.to_csv(args.outputFile, index=None)
print("Successfully converted xml to csv.")
if args.labelMapDir:
os.makedirs(args.labelMapDir, exist_ok=True)
label_map_path = os.path.join(args.labelMapDir, "label_map.pbtxt")
print("Generate `{}`".format(label_map_path))
# Create the `label_map.pbtxt` file
pbtxt_content = ""
for i, class_name in enumerate(classes_names):
pbtxt_content = (
pbtxt_content
+ "item {{\n id: {0}\n name: '{1}'\n}}\n\n".format(
i + 1, class_name
)
)
pbtxt_content = pbtxt_content.strip()
with open(label_map_path, "w") as f:
f.write(pbtxt_content)
if __name__ == "__main__":
main()
| 28.94958 | 131 | 0.582583 |
acf701e5d5658363b51689a76fc896d711a90074 | 1,031 | py | Python | Misc/146_LRUcache.py | PsiPhiTheta/LeetCode | b4473d3fdf317012b6224b363306d66a33b07932 | [
"Unlicense"
] | 1 | 2018-12-09T21:09:36.000Z | 2018-12-09T21:09:36.000Z | Misc/146_LRUcache.py | PsiPhiTheta/LeetCode | b4473d3fdf317012b6224b363306d66a33b07932 | [
"Unlicense"
] | null | null | null | Misc/146_LRUcache.py | PsiPhiTheta/LeetCode | b4473d3fdf317012b6224b363306d66a33b07932 | [
"Unlicense"
] | 1 | 2018-12-09T21:09:40.000Z | 2018-12-09T21:09:40.000Z | class LRUCache(object):
def __init__(self, capacity):
"""
:type capacity: int
"""
self.oDic = collections.OrderedDict()
self.cap = capacity
def get(self, key):
"""
:type key: int
:rtype: int
"""
if key in self.oDic:
self.oDic.move_to_end(key) # update position in cache (since used)
return self.oDic[key]
else:
return -1
def put(self, key, value):
"""
:type key: int
:type value: int
:rtype: void
"""
if key in self.oDic:
del self.oDic[key]
self.oDic[key] = value # update value
else:
if len(self.oDic) == self.cap:
self.oDic.popitem(False) # remove least recently used (furthest from end)
self.oDic[key] = value # write new
# Your LRUCache object will be instantiated and called as such:
# obj = LRUCache(capacity)
# param_1 = obj.get(key)
# obj.put(key,value)
| 25.775 | 89 | 0.522793 |
acf70218c4b16233c1ba06ab9ec2aeaa4a5d4cb9 | 3,091 | py | Python | azure_sdk/resources/compute/virtual_machine_extension.py | cloudify-incubator/cloudify-azure-plugin | 49ecc485b70099d6d23dff81f50b17ab31f7fc18 | [
"Apache-2.0"
] | 2 | 2018-08-16T01:50:35.000Z | 2018-11-17T20:31:37.000Z | azure_sdk/resources/compute/virtual_machine_extension.py | cloudify-incubator/cloudify-azure-plugin | 49ecc485b70099d6d23dff81f50b17ab31f7fc18 | [
"Apache-2.0"
] | 43 | 2017-05-18T12:31:42.000Z | 2019-01-08T09:20:42.000Z | azure_sdk/resources/compute/virtual_machine_extension.py | cloudify-incubator/cloudify-azure-plugin | 49ecc485b70099d6d23dff81f50b17ab31f7fc18 | [
"Apache-2.0"
] | 4 | 2018-01-17T15:12:54.000Z | 2019-07-16T10:39:24.000Z | # #######
# Copyright (c) 2020 - 2022 Cloudify Platform Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from azure.mgmt.compute import ComputeManagementClient
from cloudify_azure import (constants, utils)
from azure_sdk.common import AzureResource
class VirtualMachineExtension(AzureResource):
def __init__(self, azure_config, logger,
api_version=constants.API_VER_COMPUTE):
super(VirtualMachineExtension, self).__init__(azure_config)
self.logger = logger
self.client = \
ComputeManagementClient(self.credentials, self.subscription_id,
api_version=api_version)
def get(self, group_name, vm_name, vm_extension_name):
self.logger.info("Get vm_extension...{0}".format(vm_extension_name))
virtual_machine_extension = self.client.virtual_machine_extensions.get(
resource_group_name=group_name,
vm_name=vm_name,
vm_extension_name=vm_extension_name
).as_dict()
self.logger.info(
'Get virtual_machine_extension result: {0}'.format(
utils.secure_logging_content(virtual_machine_extension))
)
return virtual_machine_extension
def create_or_update(self, group_name, vm_name, vm_extension_name, params):
self.logger.info("Create/Updating vm_extension...{0}".format(
vm_extension_name))
create_async_operation = \
self.client.virtual_machine_extensions.create_or_update(
resource_group_name=group_name,
vm_name=vm_name,
vm_extension_name=vm_extension_name,
extension_parameters=params,
)
create_async_operation.wait()
virtual_machine_extension = create_async_operation.result().as_dict()
self.logger.info(
'Create virtual_machine_extension result: {0}'.format(
utils.secure_logging_content(virtual_machine_extension))
)
return virtual_machine_extension
def delete(self, group_name, vm_name, vm_extension_name):
self.logger.info("Deleting vm_extension...{0}".format(
vm_extension_name))
delete_async_operation = self.client.virtual_machine_extensions.delete(
resource_group_name=group_name,
vm_name=vm_name,
vm_extension_name=vm_extension_name
)
delete_async_operation.wait()
self.logger.debug(
'Deleted virtual_machine_extension {0}'.format(vm_extension_name))
| 41.77027 | 79 | 0.685215 |
acf7029911d44568784ce61297384e50c5ba37b5 | 39,269 | py | Python | PySight.py | cudor/PySight2MISP | 2a42eb55fa99824b495857f2ee188ee24e9a3231 | [
"MIT"
] | null | null | null | PySight.py | cudor/PySight2MISP | 2a42eb55fa99824b495857f2ee188ee24e9a3231 | [
"MIT"
] | null | null | null | PySight.py | cudor/PySight2MISP | 2a42eb55fa99824b495857f2ee188ee24e9a3231 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Created on Sep 20, 2016
@author: deralexxx
Script to pull iocs from iSight and push them to MISP
Alexander Jaeger
See CHANGELOG.md for history
"""
import _thread
import datetime
import email.utils
import hashlib
import hmac
import json
import os
from pymisp import PyMISP, MISPEvent, MISPObject
import requests
import sys
import threading
import time
import urllib.parse
import urllib3
# Read the config file.
import PySight_settings
# Import our own iSight report model.
from model.pySightReport import pySightReport
# Suppress insecure HTTPS request warnings.
urllib3.disable_warnings()
# Error handling function.
def error_handling(e, a_string):
"""
:param e:
:type e:
:param a_string:
:type a_string:
:return:
:rtype:
"""
if hasattr(e, 'message'):
PySight_settings.logger.error('%s %s', a_string, e.message)
import traceback
PySight_settings.logger.debug('1 %s', e.__doc__)
PySight_settings.logger.debug('2 %s', sys.exc_info())
PySight_settings.logger.debug('3 %s', sys.exc_info()[0])
PySight_settings.logger.debug('4 %s', sys.exc_info()[1])
#PySight_settings.logger.debug('5 %s', sys.exc_info()[2], 'Sorry I mean line...',
# traceback.tb_lineno(sys.exc_info()[2]))
ex_type, ex, tb = sys.exc_info()
PySight_settings.logger.debug('6 %s', traceback.print_tb(tb))
return sys, traceback
# Update an existing MISP event.
def update_misp_event(misp_instance, event, isight_alert):
# Update attributes based on the iSight report.
#
# Ideas of Alex not implemented:
# Use expanded networkIdentifier as a comment.
# Create attributes and use object relationships for iSight fields that have no corresponding MISP object attribute.
#
# Unused iSight fields: observationTime
PySight_settings.logger.debug('Updating the event %s', event)
# Verify that misp_instance is of the correct type
if not isinstance(misp_instance, PyMISP):
PySight_settings.logger.error('Parameter misp_instance is not a PyMISP object')
return False
# Determine whether the to_ids flag shall be set.
if isight_alert.emailIdentifier == 'Attacker' or isight_alert.emailIdentifier == 'Compromised':
email_ids = True
else:
email_ids = False
if isight_alert.fileIdentifier == 'Attacker' or isight_alert.fileIdentifier == 'Compromised':
file_ids = True
elif isight_alert.intelligenceType == 'malware':
file_ids = True
else:
file_ids = False
if isight_alert.networkIdentifier == 'Attacker' or isight_alert.networkIdentifier == 'Compromised':
network_ids = True
else:
network_ids = False
# Use malwareFamily as the default comment.
if isight_alert.malwareFamily:
default_comment = isight_alert.malwareFamily
else:
default_comment = ''
# If the alert contains email indicators, create an email object.
if isight_alert.emailIdentifier:
# If emailLanguage is provided, add it to the default comment.
if isight_alert.emailLanguage:
add_comment = 'Email language: ' + isight_alert.emailLanguage
if default_comment == '':
email_comment = add_comment
else:
email_comment = default_comment + '; ' + add_comment
else:
email_comment = default_comment
# Create the object.
email_object = MISPObject('email')
email_object.comment = email_comment
# Add attributes to the object.
if isight_alert.senderAddress:
email_object.add_attribute('from', value=isight_alert.senderAddress, to_ids=email_ids)
if isight_alert.senderName:
email_object.add_attribute('from-display-name', value=isight_alert.senderName, to_ids=False)
if isight_alert.sourceIP:
email_object.add_attribute('ip-src', value=isight_alert.sourceIP, to_ids=email_ids)
if isight_alert.subject:
email_object.add_attribute('subject', value=isight_alert.subject, to_ids=False)
if isight_alert.recipient:
email_object.add_attribute('to', value=isight_alert.recipient, to_ids=False)
if isight_alert.senderDomain:
domain_attribute = event.add_attribute(category='Network activity', type='domain',
value=isight_alert.senderDomain, to_ids=False)
email_object.add_reference(domain_attribute.uuid, 'derived-from', comment='Email source domain')
# Lastly, add the object to the event.
event.add_object(email_object)
# If the report contains an MD5 hash, create a file object.
if isight_alert.md5:
# If a file description is given, add it to the default comment.
if isight_alert.description:
add_comment = isight_alert.description
if default_comment == '':
file_comment = add_comment
else:
file_comment = default_comment + '; ' + add_comment
else:
file_comment = default_comment
# Create the object.
file_object = MISPObject('file')
file_object.comment = file_comment
# Add attributes to the object.
file_object.add_attribute('md5', value=isight_alert.md5, to_ids=file_ids)
if isight_alert.sha1:
file_object.add_attribute('sha1', value=isight_alert.sha1, to_ids=file_ids)
if isight_alert.sha256:
file_object.add_attribute('sha256', value=isight_alert.sha256, to_ids=file_ids)
if isight_alert.fileName and not isight_alert.fileName == 'UNAVAILABLE' and \
not isight_alert.fileName.upper() == 'UNKNOWN':
# Don't use filenames for detection.
file_object.add_attribute('filename', value=isight_alert.fileName, to_ids=False)
if isight_alert.fileSize:
# Don't use file size for detection.
file_object.add_attribute('size-in-bytes', value=isight_alert.fileSize, to_ids=False)
if isight_alert.fuzzyHash:
file_object.add_attribute('ssdeep', value=isight_alert.fuzzyHash, to_ids=file_ids)
if isight_alert.fileType and not isight_alert.fileType == 'fileType':
# Don't use file type for detection.
file_object.add_attribute('text', value=isight_alert.fileType, to_ids=False)
if isight_alert.fileCompilationDateTime:
# Convert epoch format to ISO86011 UTC format.
compile_date = datetime.datetime.fromtimestamp(isight_alert.fileCompilationDateTime)
file_object.add_attribute('compilation-timestamp', value=str(compile_date), to_ids=False)
if isight_alert.filePath:
file_object.add_attribute('path', value=isight_alert.filePath, to_ids=False)
# Lastly, add the object to the event.
event.add_object(file_object)
# If the report contains a user agent string, create a user-agent attribute.
if isight_alert.userAgent:
event.add_attribute(category='Network activity', type='user-agent', value=isight_alert.userAgent,
to_ids=network_ids, comment=default_comment)
# If the report contains an ASN, create an AS attribute.
if isight_alert.asn:
# Don't use the ASN for detection.
event.add_attribute(category='Network activity', type='AS', value=isight_alert.asn, to_ids=False,
comment=default_comment)
# If the report contains a domain, create a hostname attribute (because iSight domain names are in fact hostnames).
if isight_alert.domain:
# If an IP address is provided with a hostname, put the IP address in a comment, possibly in addition to the
# default network comment.
if isight_alert.ip:
add_comment = 'Resolves to ' + isight_alert.ip
if default_comment == '':
temp_comment = add_comment
else:
temp_comment = default_comment + '; ' + add_comment
else:
temp_comment = default_comment
# If a protocol is provided, also add it to the comment.
if isight_alert.protocol:
add_comment = isight_alert.protocol
if temp_comment == '':
host_comment = add_comment
else:
host_comment = temp_comment + '; ' + add_comment
else:
host_comment = temp_comment
# Add the attribute to the event. If a port is provided, use a combined attribute.
if isight_alert.port:
host_port = isight_alert.domain + '|' + isight_alert.port
new_attr = event.add_attribute(category='Network activity', type='hostname|port', value=host_port,
to_ids=network_ids, comment=host_comment)
else:
new_attr = event.add_attribute(category='Network activity', type='hostname', value=isight_alert.domain,
to_ids=network_ids, comment=host_comment)
# Add VERIS C2 tag to all hostname indicators.
event.add_attribute_tag('veris:action:malware:variety="C2"', new_attr.value)
# Ideally, the tag would only be added if FireEye marks the indicator as a C2 IP address.
#if isight_alert.networkType == 'C&C':
# # Add veris tag to attribute.
# new_attr.add_tag('veris:action:malware:variety="C2"')
# If the report doesn't contain a hostname but contains an IP address, create an ip-src or ip-dst attribute.
elif isight_alert.ip:
# Add the protocol to the comment if it is provided by iSight.
if isight_alert.protocol:
add_comment = isight_alert.protocol
if default_comment == '':
ip_comment = add_comment
else:
ip_comment = default_comment + '; ' + add_comment
else:
ip_comment = default_comment
# Determine whether it's a source or destination IP address.
# For specific network types, the IP address should be a destination IP address.
if isight_alert.networkType == 'URL' or isight_alert.networkType == 'C&C' or \
isight_alert.networkType == 'downloadLink' or isight_alert.networkType == 'maliciousLink' or \
isight_alert.networkType == 'wateringHole':
ip_type = 'ip-dst'
# Else (networkType == 'network'), we determine the IP address type based on the network identifier.
else:
if isight_alert.networkIdentifier == 'Attacker':
# Indicator is confirmed to host malicious content, has functioned as a command-and-control (C2) server,
# and/or otherwise acted as a source of malicious activity.
# Might be source or destination, but likelihood of destination is higher.
ip_type = 'ip-dst'
elif isight_alert.networkIdentifier == 'Compromised':
# Indicator is confirmed to host malicious content due to compromise or abuse.
ip_type = 'ip-dst'
elif isight_alert.networkIdentifier == 'Related':
# Indicator likely related to an attack but potentially only partially confirmed.
# Might be source or destination, but likelihood of destination is higher.
ip_type = 'ip-dst'
elif isight_alert.networkIdentifier == 'Victim':
# Indicator representing an entity that has been confirmed to have been victimized by malicious
# activity.
# Might be source or destination, but likelihood of destination is higher.
ip_type = 'ip-dst'
else:
# Might be source or destination, but likelihood of source is higher.
ip_type = 'ip-src'
if isight_alert.port:
# If a port is provided, it's likely a destination IP address.
ip_type = 'ip-dst'
type_combo = ip_type + '|port'
ip_port = isight_alert.ip + '|' + isight_alert.port
new_attr = event.add_attribute(category='Network activity', type=type_combo, value=ip_port,
to_ids=network_ids, comment=ip_comment)
else:
new_attr = event.add_attribute(category='Network activity', type=ip_type, value=isight_alert.ip,
to_ids=network_ids, comment=ip_comment)
# Add VERIS C2 tag to all IP address indicators.
event.add_attribute_tag('veris:action:malware:variety="C2"', new_attr.value)
# Ideally, the tag would only be added when FireEye marks the IP address as a C2 host.
#if isight_alert.networkType == 'C&C':
# # Add veris tag to attribute.
# new_attr.add_tag('veris:action:malware:variety="C2"')
# If the report contains a domain registrant email address, then create a whois attribute.
if isight_alert.registrantEmail:
whois_object = MISPObject('whois')
whois_object.comment = default_comment
whois_object.add_attribute('registrant-email', value=isight_alert.registrantEmail, to_ids=network_ids)
if isight_alert.registrantName:
whois_object.add_attribute('registrant-name', value=isight_alert.registrantName, to_ids=False)
if isight_alert.domain:
whois_object.add_attribute('domain', value=isight_alert.domain, to_ids=network_ids)
elif isight_alert.sourceDomain:
whois_object.add_attribute('domain', value=isight_alert.sourceDomain, to_ids=network_ids)
event.add_object(whois_object)
# If the report contains a URL, create a url attribute.
if isight_alert.url:
event.add_attribute(category='Network activity', type='url', value=isight_alert.url, to_ids=network_ids,
comment=default_comment)
# Add VERIS C2 tag to all url indicators.
event.add_attribute_tag('veris:action:malware:variety="C2"', isight_alert.url)
# Ideally, this tag would only be added when FireEye marks the indicator as a C2 URL.
#if isight_alert.networkType == 'C&C':
# # Add veris tag to attribute.
# event.add_attribute_tag('veris:action:malware:variety="C2"', isight_alert.url)
# If the report contains registry information, create a regkey attribute.
# Ideally, the registry field would be split into hive, key and value.
if isight_alert.registry:
# If a file description is given, add it to the default comment.
if isight_alert.description:
add_comment = isight_alert.description
if default_comment == '':
reg_comment = add_comment
else:
reg_comment = default_comment + '; ' + add_comment
else:
reg_comment = default_comment
event.add_attribute(category='Artifacts dropped', type='regkey', value=isight_alert.registry, to_ids=file_ids,
comment=reg_comment)
# If the report contains a malware family, create a malware-type attribute.
if isight_alert.malwareFamily:
event.add_attribute(category='Antivirus detection', type='text', value=isight_alert.malwareFamily,
to_ids=False)
# If the report contains an actor, create a threat-actor attribute.
if isight_alert.actor:
# Don't use the threat actor for detection.
event.add_attribute(category='Attribution', type='threat-actor', value=isight_alert.actor, to_ids=False)
# Finally, commit the event additions to the MISP instance and add it to the list of to be published events.
misp_instance.update_event(event)
global new_events
new_events.add(event['id'])
# Create a new MISP event.
def create_misp_event(misp_instance, isight_report_instance, event_tags):
# No MISP event for this iSight report ID exists yet.
# Alas, create a new MISP event.
# Convert the publication date of the iSight report into a datetime object.
if isight_report_instance.publishDate:
date = datetime.datetime.fromtimestamp(isight_report_instance.publishDate)
else:
# If iSight doesn't provide a date, use today's date.
date = datetime.datetime.now(datetime.timezone.utc)
# Create a MISP event from the FireEye iSight report with the following parameters.
event = MISPEvent()
event.distribution = 1 # This community only
if isight_report_instance.riskRating == 'CRITICAL' or isight_report_instance.riskRating == 'Critical':
event.threat_level_id = 1 # High
elif isight_report_instance.riskRating == 'HIGH' or isight_report_instance.riskRating == 'High':
event.threat_level_id = 1 # High
elif isight_report_instance.riskRating == 'MEDIUM' or isight_report_instance.riskRating == 'Medium':
event.threat_level_id = 2 # Medium
elif isight_report_instance.riskRating == 'LOW' or isight_report_instance.riskRating == 'Low':
event.threat_level_id = 3 # Low
else:
event.threat_level_id = 4 # Unknown
event.analysis = 2 # Completed
event.info = "iSIGHT: " + isight_report_instance.title
event.date = date
# Push the event to the MISP server.
my_event = misp_instance.add_event(event, pythonify=True)
PySight_settings.logger.debug('Created MISP event %s for iSight report %s', event, isight_report_instance.reportId)
# Add the event ID to the global list of newly created events.
global new_events
new_events.add(my_event['id'])
# Add default tags to the event.
if event_tags:
for event_tag in event_tags:
misp_instance.tag(my_event, event_tag)
# Use some iSight ThreatScapes for event tagging. Reports can have multiple ThreatScapes.
if 'Cyber Espionage' in isight_report_instance.ThreatScape:
# VERIS distinguishes between external, internal or partner actors. This difference is not yet implemented in
# MISP. External would be most likely.
#misp_instance.tag(my_event, 'veris:actor:external:motive="Espionage"')
misp_instance.tag(my_event, 'veris:actor:motive="Espionage"')
if 'Hacktivism' in isight_report_instance.ThreatScape:
misp_instance.tag(my_event, 'veris:actor:external:variety="Activist"')
if 'Critical Infrastructure' in isight_report_instance.ThreatScape:
misp_instance.tag(my_event, 'basf:technology="OT"')
if 'Cyber Physical' in isight_report_instance.ThreatScape:
misp_instance.tag(my_event, 'basf:technology="OT"')
if 'Cyber Crime' in isight_report_instance.ThreatScape:
misp_instance.tag(my_event, 'veris:actor:external:variety="Organized crime"')
# Add the iSight report ID and web link as attributes.
if isight_report_instance.reportId:
misp_instance.add_attribute(my_event, {'category': 'External analysis', 'type': 'text', 'to_ids': False,
'value': isight_report_instance.reportId}, pythonify=True)
if isight_report_instance.webLink:
misp_instance.add_attribute(my_event, {'category': 'External analysis', 'type': 'link', 'to_ids': False,
'value': isight_report_instance.webLink}, pythonify=True)
# Put the ThreatScape into an Attribution attribute, but disable correlation.
if isight_report_instance.ThreatScape:
misp_instance.add_attribute(my_event, {'category': 'Attribution', 'type': 'text', 'to_ids': False,
'value': isight_report_instance.ThreatScape,
'disable_correlation': True}, pythonify=True)
# Add specific attributes from this iSight report.
update_misp_event(misp_instance, my_event, isight_report_instance)
# Retrieve the event ID of an event.
def check_misp_all_results(a_result):
"""
:param a_result:
:type a_result:
:return: previous event from MISP
:rtype:
"""
# PySight_settings.logger.debug('Checking %s if it contains previous events', a_result)
if 'message' in a_result:
if a_result['message'] == 'No matches.':
PySight_settings.logger.error('No existing MISP event found')
# has really no event
return False
elif 'Event' in a_result[0]:
previous_event = a_result[0]['Event']['id']
PySight_settings.logger.debug('Found an existing MISP event with ID %s', previous_event)
return previous_event
else:
for e in a_result['response']:
previous_event = e['Event']['id']
PySight_settings.logger.debug('Found an existing MISP event with ID %s', previous_event)
return previous_event
# Check whether there already exists a MISP event for a specific FireEye iSight report.
def misp_check_for_previous_event(misp_instance, isight_alert):
"""
Default: No event exists for this iSight report ID.
:param misp_instance:
:type misp_instance:
:param isight_alert:
:type isight_alert:
:return:
event id if an event is there
false if no event exists yet
:rtype:
"""
event = False
if misp_instance is None:
PySight_settings.logger.error('No MISP instance provided')
return False
# Search based on report ID.
if isight_alert.reportId:
result = misp_instance.search(value=isight_alert.reportId, type_attribute='text', category='External analysis')
# If something was found in the MISP instance, then retrieve the event
if result:
event = check_misp_all_results(result)
# If no event found, search based on report URL.
if isight_alert.webLink and not event:
result = misp_instance.search(value=isight_alert.webLink, type_attribute='link', category='External analysis')
# If something was found in the MISP instance, then retrieve the event
if result:
event = check_misp_all_results(result)
if not result:
PySight_settings.logger.debug('Found no existing event for iSight report ID %s', isight_alert.reportId)
return event
# Generate a PyMISP instance.
def get_misp_instance():
"""
:return: MISP Instance
:rtype: PyMISP
"""
# Proxy settings are taken from the config file and converted to a dict.
if PySight_settings.MISP_PROXY:
misp_proxies = {
'http': str(PySight_settings.PROXY_URL),
'https': str(PySight_settings.PROXY_URL)
}
else:
misp_proxies = {}
try:
# URL of the MISP instance, API key and SSL certificate validation are taken from the config file.
return PyMISP(PySight_settings.MISP_URL, PySight_settings.MISP_KEY, PySight_settings.MISP_VERIFYCERT,
proxies=misp_proxies)
except Exception:
PySight_settings.logger.error('Unexpected error in MISP init: %s', sys.exc_info())
return False
# Process one FireEye iSight report and convert it into a MISP events.
def process_isight_indicator(isight_json, event_tags, t_semaphore, t_lock):
if PySight_settings.THREADING:
# Acquire a semaphore (decrease the counter in the semaphore).
t_semaphore.acquire()
PySight_settings.logger.debug("Starting thread number %s out of max. %s threads", threading.active_count(),
PySight_settings.NUMBER_THREADS)
PySight_settings.logger.debug('Processing report %s', isight_json['reportId'])
try:
# Get a MISP instance per thread
this_misp_instance = get_misp_instance()
# Without a MISP instance this does not make sense
if this_misp_instance is False:
raise ValueError("No MISP instance found.")
# Parse the FireEye iSight report
isight_report_instance = pySightReport(isight_json)
# If in DEBUG mode, write the iSight reports to a file
if PySight_settings.DEBUG_MODE:
# Create the "reports" subdirectory for storing iSight reports, if it doesn't exist already
if not os.path.exists("reports"):
os.makedirs("reports")
f = open("reports/" + isight_report_instance.reportId, 'a')
# Write the iSight report into the "reports" subdirectory.
f.write(json.dumps(isight_json, sort_keys=True, indent=4, separators=(',', ': ')))
f.close()
if PySight_settings.THREADING:
# Lock multithreading until a MISP event is created
# Otherwise, parallel threads might create separate MISP events for one iSight report
t_lock.acquire()
# Check whether we already have an event for this reportID.
PySight_settings.logger.debug('Checking for existing event with report ID %s', isight_report_instance.reportId)
event_id = misp_check_for_previous_event(this_misp_instance, isight_report_instance)
if not event_id:
# Create a new MISP event
PySight_settings.logger.debug('No event found for report ID %s -- will create a new one',
isight_report_instance.reportId)
create_misp_event(this_misp_instance, isight_report_instance, event_tags)
if PySight_settings.THREADING:
t_lock.release()
else:
if PySight_settings.THREADING:
t_lock.release()
# Add the data to the found event
event = this_misp_instance.get_event(event_id, pythonify=True)
update_misp_event(this_misp_instance, event, isight_report_instance)
# Reset the iSight report instance when done.
isight_report_instance = None
except AttributeError as e_AttributeError:
sys, traceback = error_handling(e_AttributeError, a_string="Attribute Error")
if PySight_settings:
t_semaphore.release()
return False
except TypeError as e_TypeError:
sys, traceback = error_handling(e_TypeError, a_string="Type Error:")
if PySight_settings.THREADING:
t_semaphore.release()
return False
except Exception as e_Exception:
sys, traceback = error_handling(e_Exception, a_string="General Error:")
if PySight_settings.THREADING:
t_semaphore.release()
return False
if PySight_settings.THREADING:
t_semaphore.release()
# Process all FireEye iSight reports and convert them to MISP events.
def misp_process_isight_indicators(a_result, event_tags):
# Use both a semaphore and lock for threading.
# If threading is disabled we need these as dummy objects anyway.
thread_limiter = threading.Semaphore(PySight_settings.NUMBER_THREADS)
thread_locker = _thread.allocate_lock()
if PySight_settings.THREADING:
# Create a list of all threads.
threads = []
# Process each indicator in the JSON message
for indicator in a_result['message']:
# Define and start a thread
t = threading.Thread(target=process_isight_indicator, args=(indicator, event_tags, thread_limiter,
thread_locker))
threads.append(t)
t.start()
# Wait for all threads to finish.
for t in threads:
t.join()
else:
for indicator in a_result['message']:
process_isight_indicator(indicator, event_tags, thread_limiter, thread_locker)
# When done, publish all the newly created MISP events.
misp_instance = get_misp_instance()
global new_events
for event_id in new_events:
PySight_settings.logger.debug('Publishing event %s', event_id)
misp_instance.publish(event_id, alert=False)
# Make the FireEye iSight API request.
def isight_load_data(a_url, a_query, a_header):
"""
:param a_url:
:type a_url:
:param a_query:
:type a_query:
:param a_header:
:type a_header:
:return:
:rtype:
"""
# This is the URL for the iSight API query
url_to_load = a_url + a_query
# Set the proxy if specified
if PySight_settings.ISIGHT_PROXY:
isight_proxies = {
'http': PySight_settings.PROXY_URL,
'https': PySight_settings.PROXY_URL
}
PySight_settings.logger.debug('Connecting to FireEye iSight via proxy %s', PySight_settings.PROXY_URL)
else:
isight_proxies = {}
PySight_settings.logger.debug('Connecting directly to FireEye iSight without a proxy',
PySight_settings.PROXY_URL)
PySight_settings.logger.debug('FireEye iSight request URL: %s', url_to_load)
PySight_settings.logger.debug('FireEye iSight request header: %s', a_header)
try:
r = requests.get(url_to_load, headers=a_header, proxies=isight_proxies,
verify=PySight_settings.ISIGHT_VERIFYCERT)
except urllib.error.HTTPError as e:
PySight_settings.logger.error('Urllib HTTP error code: %s', e.code)
PySight_settings.logger.error('Urllib HTTP error message: %s', e.read())
except requests.exceptions.ChunkedEncodingError as e:
PySight_settings.logger.error('Error when connecting to the FireEye iSight API: %s', e)
return False
if r.status_code == 204:
PySight_settings.logger.warning('No result found for search')
return False
elif r.status_code == 404:
PySight_settings.logger.error('%s: check the FireEye iSight API URL', r.reason)
PySight_settings.logger.debug('%s', r.text)
return False
elif r.status_code != 200:
PySight_settings.logger.error('Request not successful: %s', r.text)
return False
return_data_cleaned = r.text.replace('\n', '')
json_return_data_cleaned = json.loads(return_data_cleaned)
PySight_settings.logger.debug('Number of indicators returned: %s', len(json_return_data_cleaned['message']))
if not json_return_data_cleaned['success']:
PySight_settings.logger.error('Error with the FireEye iSight API connection %s',
json_return_data_cleaned['message']['description'])
PySight_settings.logger.debug(json_return_data_cleaned)
return False
else:
# For debugging purposes, write the returned IOCs to a file
if PySight_settings.DEBUG_MODE:
timestring = datetime.datetime.now(datetime.timezone.utc).strftime('%Y%m%d-%H%M%S')
if not os.path.exists('debug'):
os.makedirs('debug')
f = open(os.path.join('debug', timestring), 'w')
f.write(json.dumps(json_return_data_cleaned, sort_keys=True, indent=6, separators=(',', ': ')))
f.close()
return json_return_data_cleaned
# Define the header for the HTTP requests to the iSight API.
def set_header(a_prv_key, a_pub_key, a_query):
"""
:param a_prv_key:
:type a_prv_key:
:param a_pub_key:
:type a_pub_key:
:param a_query:
:type a_query:
:return: Header for iSight search
:rtype:
"""
# Prepare the data to calculate the X-Auth-Hash.
accept_version = '2.5'
output_format = 'application/json'
time_stamp = email.utils.formatdate(localtime=True)
string_to_hash = a_query + accept_version + output_format + time_stamp
# Convert the authentication information from UTF-8 encoding to a bytes object
message = bytes(string_to_hash, 'utf-8')
secret = bytes(a_prv_key, 'utf-8')
# Hash the authentication information
hashed = hmac.new(secret, message, hashlib.sha256)
header = {
'X-Auth': a_pub_key,
'X-Auth-Hash': hashed.hexdigest(),
'Accept': output_format,
'Accept-Version': accept_version,
'Date': time_stamp
}
return header
# Prepare the request to the FireEye iSight API.
def isight_prepare_data_request(a_url, a_query, a_pub_key, a_prv_key):
"""
:param a_url:
:type a_url:
:param a_query:
:type a_query:
:param a_pub_key:
:type a_pub_key:
:param a_prv_key:
:type a_prv_key:
:return:
:rtype:
"""
header = set_header(a_prv_key, a_pub_key, a_query)
result = isight_load_data(a_url, a_query, header)
if not result:
PySight_settings.logger.error('Something went wrong when retrieving indicators from the FireEye iSight API')
return False
else:
return result
# Search for FireEye iSight reports published since the specified last hours.
def isight_search_indicators(base_url, public_key, private_key, hours):
# Convert hours to seconds and subtract them from the current time
since = int(time.time()) - hours * 60 * 60
# Limit the returned data to that published since this Epoch datetime and the present time.
# Therefore, add the 'since' parameter as a query string.
params = {
'since': since
}
search_query = '/view/indicators?' + urllib.parse.urlencode(params)
# Retrieve indicators and warning data since the specified date and time.
return isight_prepare_data_request(base_url, search_query, public_key, private_key)
# This function is called from test_pysight.py but otherwise not used.
def data_search_report(url, public_key, private_key, a_reportid):
print("text_search_wildcard Response:")
# wild card text search
# FIXME: not used
# params = {
# 'reportID': a_reportid
# }
text_search_query = '/report/' + a_reportid
isight_prepare_data_request(url, text_search_query, public_key, private_key)
# This function is not used.
def data_text_search_title(url, public_key, private_key):
print("text_search_title Response:")
# title phrase search
params = {
'text': 'title:"Software Stack 3.1.2"'
}
text_search_query = '/search/text?' + urllib.urlencode(params)
isight_prepare_data_request(url, text_search_query, public_key, private_key)
# This function is not used.
def data_text_search_wildcard(url, public_key, private_key):
print("text_search_wildcard Response:")
# wild card text search
params = {
'text': 'zero-day*',
'limit': '10',
'offset': '0'
}
text_search_query = '/search/text?' + urllib.urlencode(params)
isight_prepare_data_request(url, text_search_query, public_key, private_key)
# This function is not used.
def data_text_search_sensitive_reports(url, public_key, private_key):
print("text_search_sensitive_reports Response:")
params = {
'text': 'title:"Latin American"',
'customerIntelOnly': True
}
text_search_query = '/search/text?' + urllib.urlencode(params)
isight_prepare_data_request(url, text_search_query, public_key, private_key)
# This function is not used.
def data_advanced_search_filter_indicators(url, public_key, private_key):
print("advanced_search_filter_indicators Response:")
# Indicator field md5
advanced_search_query = '/search/advanced?query=md5=~8512835a95d0fabfb&fileIdentifier=[Victim;Attacker]'
isight_prepare_data_request(url, advanced_search_query, public_key, private_key)
# This function is not used.
def data_basic_search_ip(url, public_key, private_key, ip):
PySight_settings.logger.debug("basic_search Response")
# Query for search
basic_search_query = '/search/basic?ip=' + ip
isight_prepare_data_request(url, basic_search_query, public_key, private_key)
# This function is not used.
def data_ioc(url, public_key, private_key):
# print ("iocs Response:")
# 30 days back start date
start_date = int(time.time()) - 2592000
end_date = int(time.time())
ioc_query = '/view/iocs?' + 'start_date=' + str(start_date) + '&end_date=' + str(end_date)
return isight_prepare_data_request(url, ioc_query, public_key, private_key)
# This function is not used.
def data_text_search_simple(url, public_key, private_key):
print("text_search_simple Response:")
# simple text search
params = {
'text': 'Stack-Based Buffer Overflow Vulnerability',
'limit': '10',
'offset': '0'
}
text_search_query = '/search/text?' + urllib.urlencode(params)
isight_prepare_data_request(url, text_search_query, public_key, private_key)
# This function is not used.
def data_text_search_filter(url, public_key, private_key):
try:
print("text_search_filter Response:")
# filter text search
params = {
'text': 'malware',
'filter': 'threatScape:cyberEspionage,cyberCrime&riskRating:HIGH,LOW&language:english',
'sortBy': 'title:asc,reportId:desc',
'limit': '10',
'offset': '5'
}
text_search_query = '/search/text?' + urllib.urlencode(params)
print('text_search_query', text_search_query)
isight_prepare_data_request(url, text_search_query, public_key, private_key)
params = {
'text': 'malware',
'filter': 'cveId:~\'CVE\''
}
text_search_query = '/search/text?' + urllib.urlencode(params)
return isight_prepare_data_request(url, text_search_query, public_key, private_key)
except Exception:
return False
if __name__ == '__main__':
# If loglevel equals DEBUG, log the time the script ran.
PySight_settings.logger.info('PySight2MISP started at %s', datetime.datetime.now(datetime.timezone.utc))
if PySight_settings.DEBUG_MODE:
# This is to log the time used to run the script
from timeit import default_timer as timer
start = timer()
# Retrieve FireEye iSight indicators of the last x hours.
result = isight_search_indicators(PySight_settings.ISIGHT_URL, PySight_settings.ISIGHT_KEY,
PySight_settings.ISIGHT_SECRET, PySight_settings.HOURS)
if result is False:
PySight_settings.logger.warning('No indicators available from FireEye iSight')
else:
# Use a global set of newly created MISP events so that we can publish them once the script is finished
# instead of after each update of the event.
new_events = set()
misp_process_isight_indicators(result, PySight_settings.MISP_EVENTTAGS)
PySight_settings.logger.info('PySight2MISP finished at %s', datetime.datetime.now(datetime.timezone.utc))
# If loglevel equals DEBUG, log the time the script ran.
if PySight_settings.DEBUG_MODE:
end = timer()
PySight_settings.logger.debug('Time taken %s', end - start)
# data_ioc(url, public_key, private_key)
# data_text_search_simple(PySight_settings.ISIGHT_URL, public_key, private_key)
# data_text_search_filter(PySight_settings.ISIGHT_URL, public_key, private_key)
# data_text_search_title(url, public_key, private_key)
# data_text_search_wildcard(url, public_key, private_key)
# data_text_search_sensitive_reports(PySight_settings.ISIGHT_URL, public_key, private_key)
# data_advanced_search_filter_indicators(url, public_key, private_key)
| 43.439159 | 120 | 0.671013 |
acf702a3da50a796c0cd28ab741a54822f4b53be | 7,314 | py | Python | src/oci/marketplace/models/orchestration_variable.py | Manny27nyc/oci-python-sdk | de60b04e07a99826254f7255e992f41772902df7 | [
"Apache-2.0",
"BSD-3-Clause"
] | 249 | 2017-09-11T22:06:05.000Z | 2022-03-04T17:09:29.000Z | src/oci/marketplace/models/orchestration_variable.py | Manny27nyc/oci-python-sdk | de60b04e07a99826254f7255e992f41772902df7 | [
"Apache-2.0",
"BSD-3-Clause"
] | 228 | 2017-09-11T23:07:26.000Z | 2022-03-23T10:58:50.000Z | src/oci/marketplace/models/orchestration_variable.py | Manny27nyc/oci-python-sdk | de60b04e07a99826254f7255e992f41772902df7 | [
"Apache-2.0",
"BSD-3-Clause"
] | 224 | 2017-09-27T07:32:43.000Z | 2022-03-25T16:55:42.000Z | # coding: utf-8
# Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class OrchestrationVariable(object):
"""
The model of a variable for an orchestration resource.
"""
#: A constant which can be used with the data_type property of a OrchestrationVariable.
#: This constant has a value of "STRING"
DATA_TYPE_STRING = "STRING"
#: A constant which can be used with the data_type property of a OrchestrationVariable.
#: This constant has a value of "INTEGER"
DATA_TYPE_INTEGER = "INTEGER"
def __init__(self, **kwargs):
"""
Initializes a new OrchestrationVariable object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param name:
The value to assign to the name property of this OrchestrationVariable.
:type name: str
:param default_value:
The value to assign to the default_value property of this OrchestrationVariable.
:type default_value: str
:param description:
The value to assign to the description property of this OrchestrationVariable.
:type description: str
:param data_type:
The value to assign to the data_type property of this OrchestrationVariable.
Allowed values for this property are: "STRING", "INTEGER", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:type data_type: str
:param is_mandatory:
The value to assign to the is_mandatory property of this OrchestrationVariable.
:type is_mandatory: bool
:param hint_message:
The value to assign to the hint_message property of this OrchestrationVariable.
:type hint_message: str
"""
self.swagger_types = {
'name': 'str',
'default_value': 'str',
'description': 'str',
'data_type': 'str',
'is_mandatory': 'bool',
'hint_message': 'str'
}
self.attribute_map = {
'name': 'name',
'default_value': 'defaultValue',
'description': 'description',
'data_type': 'dataType',
'is_mandatory': 'isMandatory',
'hint_message': 'hintMessage'
}
self._name = None
self._default_value = None
self._description = None
self._data_type = None
self._is_mandatory = None
self._hint_message = None
@property
def name(self):
"""
Gets the name of this OrchestrationVariable.
The name of the variable.
:return: The name of this OrchestrationVariable.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this OrchestrationVariable.
The name of the variable.
:param name: The name of this OrchestrationVariable.
:type: str
"""
self._name = name
@property
def default_value(self):
"""
Gets the default_value of this OrchestrationVariable.
The variable's default value.
:return: The default_value of this OrchestrationVariable.
:rtype: str
"""
return self._default_value
@default_value.setter
def default_value(self, default_value):
"""
Sets the default_value of this OrchestrationVariable.
The variable's default value.
:param default_value: The default_value of this OrchestrationVariable.
:type: str
"""
self._default_value = default_value
@property
def description(self):
"""
Gets the description of this OrchestrationVariable.
A description of the variable.
:return: The description of this OrchestrationVariable.
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""
Sets the description of this OrchestrationVariable.
A description of the variable.
:param description: The description of this OrchestrationVariable.
:type: str
"""
self._description = description
@property
def data_type(self):
"""
Gets the data_type of this OrchestrationVariable.
The data type of the variable.
Allowed values for this property are: "STRING", "INTEGER", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:return: The data_type of this OrchestrationVariable.
:rtype: str
"""
return self._data_type
@data_type.setter
def data_type(self, data_type):
"""
Sets the data_type of this OrchestrationVariable.
The data type of the variable.
:param data_type: The data_type of this OrchestrationVariable.
:type: str
"""
allowed_values = ["STRING", "INTEGER"]
if not value_allowed_none_or_none_sentinel(data_type, allowed_values):
data_type = 'UNKNOWN_ENUM_VALUE'
self._data_type = data_type
@property
def is_mandatory(self):
"""
Gets the is_mandatory of this OrchestrationVariable.
Whether the variable is mandatory.
:return: The is_mandatory of this OrchestrationVariable.
:rtype: bool
"""
return self._is_mandatory
@is_mandatory.setter
def is_mandatory(self, is_mandatory):
"""
Sets the is_mandatory of this OrchestrationVariable.
Whether the variable is mandatory.
:param is_mandatory: The is_mandatory of this OrchestrationVariable.
:type: bool
"""
self._is_mandatory = is_mandatory
@property
def hint_message(self):
"""
Gets the hint_message of this OrchestrationVariable.
A brief textual description that helps to explain the variable.
:return: The hint_message of this OrchestrationVariable.
:rtype: str
"""
return self._hint_message
@hint_message.setter
def hint_message(self, hint_message):
"""
Sets the hint_message of this OrchestrationVariable.
A brief textual description that helps to explain the variable.
:param hint_message: The hint_message of this OrchestrationVariable.
:type: str
"""
self._hint_message = hint_message
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| 30.22314 | 245 | 0.639595 |
acf702d90280b5dd630d819127d72002c0b7a5f7 | 16,568 | py | Python | VGG-19/vgg-19/experiments/cifar-10/conv-Ultimate-Tensorization/train.py | zfgao66/deeplearning-mpo-tensorflow | c345b9fea79e16f98f9b50e0b4e0bcaf4ed4c8e6 | [
"MIT"
] | 24 | 2019-04-30T14:59:43.000Z | 2021-11-16T03:47:38.000Z | VGG-19/vgg-19/experiments/cifar-10/conv-Ultimate-Tensorization/train.py | HC1022/deeplearning-mpo | c345b9fea79e16f98f9b50e0b4e0bcaf4ed4c8e6 | [
"MIT"
] | null | null | null | VGG-19/vgg-19/experiments/cifar-10/conv-Ultimate-Tensorization/train.py | HC1022/deeplearning-mpo | c345b9fea79e16f98f9b50e0b4e0bcaf4ed4c8e6 | [
"MIT"
] | 9 | 2019-08-14T10:50:37.000Z | 2022-03-15T14:41:52.000Z | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import os.path
import imp
import datetime
import shutil
import time
#import tensorflow.python.platform
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
import re
import sys
import shutil
import input_data
from hyper_parameters import *
net = None
#tf.set_random_seed(FLAGS.random_seed)
np.random.seed(FLAGS.random_seed)
def batch(image, label, batch_size, name):
b_images, b_labels= tf.train.batch([image, label],
batch_size=batch_size,
num_threads=8,
capacity=3 * FLAGS.batch_size + 20,
name=name)
tf.summary.image('sumary/images/' + name, b_images)
return b_images, b_labels
def tower_loss_and_eval(images, labels, train_phase, reuse=None, cpu_variables=False):
with tf.variable_scope('inference', reuse=reuse):
logits = net.inference(images, train_phase, cpu_variables=cpu_variables)
losses = net.losses(logits, labels)
total_loss = tf.add_n(losses, name='total_loss')
# Add weight decay to the loss.
total_loss = total_loss + FLAGS.WEIGHT_DECAY * tf.add_n(
[tf.nn.l2_loss(v) for v in tf.trainable_variables()])
loss_averages = tf.train.ExponentialMovingAverage(0.99, name='avg')
loss_averages_op = loss_averages.apply(losses + [total_loss])
for l in losses + [total_loss]:
loss_name = l.op.name
tf.summary.scalar(loss_name + ' (raw)', l)
tf.summary.scalar(loss_name, loss_averages.average(l))
with tf.control_dependencies([loss_averages_op]):
total_loss = tf.identity(total_loss)
evaluation = net.evaluation(logits, labels)
return total_loss, evaluation
def average_gradients(tower_grads):
average_grads = []
for grad_and_vars in zip(*tower_grads):
grads = []
for g, _ in grad_and_vars:
expanded_g = tf.expand_dims(g, 0)
grads.append(expanded_g)
grad = tf.concat(grads, 0)
grad = tf.reduce_mean(grad, 0)
grad_and_var = (grad, grad_and_vars[0][1])
average_grads.append(grad_and_var)
return average_grads
def run_training(restore_chkpt=None):
global net
net = imp.load_source('net', FLAGS.net_module)
with tf.Graph().as_default(), tf.device('/cpu:0'):
train_phase = tf.Variable(True, trainable=False, name='train_phase', dtype=tf.bool, collections=[])
inp_data = input_data.get_input_data(FLAGS)
t_image, t_label = inp_data['train']['image_input'], inp_data['train']['label_input']
t_image = input_data.aug_train(t_image, inp_data['aux'])
v_image, v_label = inp_data['validation']['image_input'], inp_data['validation']['label_input']
v_image = input_data.aug_eval(v_image, inp_data['aux'])
v_images, v_labels = batch(v_image, v_label, FLAGS.batch_size * FLAGS.num_gpus, 'eval_batch')
v_images_split = tf.split(v_images, FLAGS.num_gpus)
v_labels_split = tf.split(v_labels, FLAGS.num_gpus)
global_step = tf.get_variable('global_step',
[],
initializer=tf.constant_initializer(0),
trainable=False)
epoch_steps = inp_data['train']['images'].shape[0] / (FLAGS.batch_size)
decay_steps = int(FLAGS.num_epochs_per_decay * epoch_steps)
lr = tf.train.exponential_decay(FLAGS.initial_learning_rate,
global_step,
decay_steps,
FLAGS.learning_rate_decay_factor,
staircase=True)
# boundaries = [int(epoch_steps * epoch) for epoch in learning_rate_decay_boundary]
# values = [FLAGS.initial_learning_rate*decay for decay in learning_rate_decay_value]
# lr = tf.train.piecewise_constant(tf.cast(global_step, tf.int32), boundaries, values)
opt = tf.train.MomentumOptimizer(learning_rate=lr, momentum=FLAGS.MOMENTUM,
name='optimizer', use_nesterov=True)
# opt = tf.train.AdamOptimizer(lr, name='optimizer')
tower_grads = []
tower_evals = []
tower_losses = []
cpu_variables = FLAGS.num_gpus > 1
for i in range(FLAGS.num_gpus):
reuse = i > 0
with tf.device('/gpu:%d' % i):
with tf.name_scope('tower_%d' % i) as scope:
t_images, t_labels = batch(t_image, t_label, FLAGS.batch_size, 'train_batch')
images, labels = tf.cond(train_phase,
lambda: (t_images, t_labels),
lambda: (v_images_split[i], v_labels_split[i]))
loss, evaluation = tower_loss_and_eval(images, labels, train_phase, reuse, cpu_variables)
tower_losses.append(loss)
tower_evals.append(evaluation)
summaries = tf.get_collection(tf.GraphKeys.SUMMARIES, scope)
grads = opt.compute_gradients(loss)
tower_grads.append(grads)
# We must calculate the mean of each gradient. Note that this is the
# synchronization point across all towers.
grads = average_gradients(tower_grads)
summaries.append(tf.summary.scalar('learning_rate', lr))
for grad, var in grads:
if grad is not None:
summaries.append(tf.summary.histogram('gradients/' + var.op.name, grad))
# Apply the gradients to adjust the shared variables.
apply_gradients_op = opt.apply_gradients(grads, global_step=global_step)
with tf.control_dependencies([apply_gradients_op]):
normalize_gs = global_step.assign_add(FLAGS.num_gpus - 1)
for var in tf.trainable_variables():
summaries.append(tf.summary.histogram('variables/' + var.op.name, var))
train_loss = tf.Variable(5.0, trainable=False, name='train_loss', dtype=tf.float32)
train_precision = tf.Variable(0.0, trainable=False, name='train_precision', dtype=tf.float32)
train_lp_decay = 0.9
train_lp_updates = []
for i in range(FLAGS.num_gpus):
train_lp_updates.append(train_loss.assign_sub((1.0 - train_lp_decay) * (train_loss - tower_losses[i])))
new_precision = tf.reduce_mean(tf.cast(tower_evals[i], tf.float32))
train_lp_updates.append(train_precision.assign_sub((1.0 - train_lp_decay) * (train_precision - new_precision)))
train_lp_update = tf.group(*train_lp_updates)
summaries.append(tf.summary.scalar('loss/train', train_loss))
summaries.append(tf.summary.scalar('precision/train', train_precision))
validation_loss = tf.Variable(0.0, trainable=False, dtype=tf.float32)
validation_precision = tf.Variable(0.0, trainable=False, dtype=tf.float32)
assign_ph = tf.placeholder(tf.float32, shape=[])
vl_assign_op = validation_loss.assign(assign_ph)
vp_assign_op = validation_precision.assign(assign_ph)
summaries.append(tf.summary.scalar('loss/validation', validation_loss))
summaries.append(tf.summary.scalar('precision/validation', validation_precision))
variable_averages = tf.train.ExponentialMovingAverage(0.9, global_step, zero_debias=True)
variables_averages_op = variable_averages.apply(tf.trainable_variables())
train_op = tf.group(apply_gradients_op, normalize_gs, variables_averages_op, train_lp_update)
qrunners = tf.get_collection(tf.GraphKeys.QUEUE_RUNNERS)
for qr in qrunners:
summaries.append(tf.summary.scalar('queues/size/' + qr.name, qr.queue.size()))
saver = tf.train.Saver(tf.global_variables())
ema_saver = tf.train.Saver(variable_averages.variables_to_restore())
summary_op = tf.summary.merge(summaries)
init = tf.global_variables_initializer()
switch_train = train_phase.assign(True)
switch_eval = train_phase.assign(False)
sess = tf.Session(config=tf.ConfigProto(
allow_soft_placement=True,
log_device_placement=FLAGS.log_device_placement))
#initialize const variables with dataset
sess.run(inp_data['initializer'], feed_dict=inp_data['init_feed'])
sess.run(init)
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
summary_writer = tf.summary.FileWriter(FLAGS.log_dir, sess.graph)
sys.stdout.write('\n\n')
epoch_steps = int(inp_data['train']['images'].shape[0] / FLAGS.batch_size + 0.5)
start_epoch = 0
if restore_chkpt is not None:
saver.restore(sess, restore_chkpt)
sys.stdout.write('Previously started training session restored from "%s".\n' % restore_chkpt)
start_epoch = int(sess.run(global_step)) // epoch_steps
print_hyper_parameters()
sys.stdout.write('Starting with epoch #%d.\n' % (start_epoch + 1))
bestValidationPrecision = 0.0
for epoch in range(start_epoch, FLAGS.max_epochs):
sys.stdout.write('\n')
_ = sess.run(switch_train)
lr_val = sess.run(opt._learning_rate)
sys.stdout.write('Epoch #%d. [Train], learning rate: %.2e\n' % (epoch + 1, lr_val))
sys.stdout.flush()
cum_t = 0.0
step = 0
log_steps = FLAGS.log_steps
fmt_str = 'Epoch #%d [%s]. Step %d/%d (%d%%). Speed = %.2f sec/b, %.2f img/sec. Batch_loss = %.3f. Batch_precision = %.3f'
while step < epoch_steps:
start_time = time.time()
_, loss_value, eval_value = sess.run([train_op, loss, evaluation])
duration = time.time() - start_time
step += FLAGS.num_gpus
assert not np.isnan(loss_value), 'Model diverged with loss = NaN'
cum_t += duration
sec_per_batch = duration / FLAGS.num_gpus
img_per_sec = FLAGS.num_gpus * FLAGS.batch_size / duration
if cum_t > 2.0:
cum_t = 0.0
sys.stdout.write('\r')
sys.stdout.write(fmt_str %
(epoch + 1,
'Train',
step + 1,
epoch_steps,
int(100.0 * (step + 1) / epoch_steps),
sec_per_batch,
img_per_sec,
loss_value,
np.mean(eval_value) * 100.0
))
sys.stdout.flush()
log_steps -= FLAGS.num_gpus
if (log_steps < 0):
log_steps = FLAGS.log_steps
summary_str = sess.run(summary_op)
glob_step = epoch * epoch_steps + step
summary_writer.add_summary(summary_str, glob_step)
sys.stdout.write('\r')
sys.stdout.write(fmt_str %
(epoch + 1,
'Train',
epoch_steps,
epoch_steps,
100,
sec_per_batch,
img_per_sec,
loss_value,
np.mean(eval_value) * 100.0
))
sys.stdout.write('\n')
train_loss_val, train_precision_val = sess.run([train_loss, train_precision])
sys.stdout.write('Epoch #%d. Train loss = %.3f. Train precision = %.3f.\n' %
(epoch + 1,
train_loss_val,
train_precision_val * 100.0))
checkpoint_path = os.path.join(FLAGS.log_dir, 'model.ckpt')
chkpt = saver.save(sess, checkpoint_path, global_step=global_step)
sys.stdout.write('Checkpoint "%s" saved.\n\n' % chkpt)
#Evaluation phase
sess.run(switch_eval)
sys.stdout.write('Epoch #%d. [Evaluation]\n' % (epoch + 1))
ema_saver.restore(sess, chkpt)
sys.stdout.write('EMA variables restored.\n')
eval_cnt = inp_data['validation']['images'].shape[0]
eval_steps = (eval_cnt + FLAGS.batch_size - 1) // FLAGS.batch_size
eval_correct = 0
eval_loss = 0.0
cum_t = 0.0
while eval_cnt > 0:
start_time = time.time()
eval_values_and_losses = sess.run(tower_evals + tower_losses)
duration = time.time() - start_time
eval_values = eval_values_and_losses[:FLAGS.num_gpus]
eval_values = np.concatenate(eval_values, axis=0)
eval_losses = eval_values_and_losses[-FLAGS.num_gpus:]
cnt = min(eval_values.shape[0], eval_cnt)
eval_correct += np.sum(eval_values[:cnt])
eval_loss += np.sum(eval_losses) * FLAGS.batch_size
eval_cnt -= cnt
cur_step = eval_steps - (eval_cnt + FLAGS.batch_size - 1) // FLAGS.batch_size
sec_per_batch = duration / FLAGS.num_gpus
img_per_sec = FLAGS.num_gpus * FLAGS.batch_size / duration
cum_t += duration
if cum_t > 0.5:
cum_t = 0.0
sys.stdout.write('\r')
sys.stdout.write(fmt_str %
(epoch + 1,
'Evaluation',
cur_step,
eval_steps,
int(100.0 * cur_step / eval_steps),
sec_per_batch,
img_per_sec,
eval_losses[-1],
np.mean(eval_values) * 100.0
))
sys.stdout.flush()
sys.stdout.write('\r')
sys.stdout.write(fmt_str %
(epoch + 1,
'Evaluation',
eval_steps,
eval_steps,
int(100.0),
sec_per_batch,
img_per_sec,
eval_losses[-1],
np.mean(eval_values) * 100.0
))
sys.stdout.write('\n')
sys.stdout.flush()
eval_precision = eval_correct / inp_data['validation']['images'].shape[0]
eval_loss = eval_loss / inp_data['validation']['images'].shape[0]
if eval_precision > bestValidationPrecision:
bestValidationPrecision = eval_precision
sys.stdout.write('Epoch #%d. Validation loss = %.3f. Validation precision = %.3f. '
'Best precision = %.3f\n' %
(epoch + 1,
eval_loss,
eval_precision * 100.0,
bestValidationPrecision * 100))
saver.restore(sess, chkpt)
sys.stdout.write('Variables restored.\n\n')
sess.run(vl_assign_op, feed_dict={assign_ph: eval_loss})
sess.run(vp_assign_op, feed_dict={assign_ph: eval_precision})
# w = os.get_terminal_size().columns
w = 40
sys.stdout.write(('=' * w + '\n') * 2)
bestFile = open(os.path.join(FLAGS.log_dir, 'best.txt'), 'w')
bestFile.write('Best precision = %.4f\n' % bestValidationPrecision)
bestFile.close()
coord.request_stop()
coord.join(threads)
def main(_):
latest_chkpt = tf.train.latest_checkpoint(FLAGS.log_dir)
if latest_chkpt is not None:
while True:
sys.stdout.write('Checkpoint "%s" found. Continue last training session?\n' % latest_chkpt)
sys.stdout.write('Continue - [c/C]. Restart (all content of log dir will be removed) - [r/R]. Abort - [a/A].\n')
ans = input().lower()
if len(ans) == 0:
continue
if ans[0] == 'c':
break
elif ans[0] == 'r':
latest_chkpt = None
shutil.rmtree(FLAGS.log_dir)
break
elif ans[0] == 'a':
return
run_training(restore_chkpt=latest_chkpt)
if __name__ == '__main__':
tf.app.run()
| 39.826923 | 134 | 0.573334 |
acf7033490f49a0536507e24cd25de2de4238b51 | 599 | py | Python | qiskit/test/mock/backends/casablanca/__init__.py | t-imamichi/qiskit-core | 8d2eeeac44f97af1e10514cdae4157e5923ff2e5 | [
"Apache-2.0"
] | 1,456 | 2017-08-05T16:33:05.000Z | 2018-06-05T04:15:35.000Z | qiskit/test/mock/backends/casablanca/__init__.py | t-imamichi/qiskit-core | 8d2eeeac44f97af1e10514cdae4157e5923ff2e5 | [
"Apache-2.0"
] | 365 | 2017-08-04T06:09:16.000Z | 2018-06-05T08:33:37.000Z | qiskit/test/mock/backends/casablanca/__init__.py | declanmillar/qiskit-terra | 43e4a72c9c1537dd3d220a52f7e56423dfdd926c | [
"Apache-2.0"
] | 463 | 2017-08-05T04:10:01.000Z | 2018-06-05T06:43:21.000Z | # This code is part of Qiskit.
#
# (C) Copyright IBM 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Mock casablanca backend"""
from .fake_casablanca import FakeCasablancaV2
from .fake_casablanca import FakeCasablanca
| 35.235294 | 77 | 0.772955 |
acf703aa181ca3c6002f7b61ca64ebcabc28143d | 26,413 | py | Python | bblogger/nordicsemi/dfu/package.py | lohmega/jamble | ca7d2788c584cfb1c86ae766d06f6a9d57a60974 | [
"Apache-2.0"
] | null | null | null | bblogger/nordicsemi/dfu/package.py | lohmega/jamble | ca7d2788c584cfb1c86ae766d06f6a9d57a60974 | [
"Apache-2.0"
] | 3 | 2020-05-27T13:00:45.000Z | 2020-09-29T12:42:23.000Z | bblogger/nordicsemi/dfu/package.py | lohmega/jamble | ca7d2788c584cfb1c86ae766d06f6a9d57a60974 | [
"Apache-2.0"
] | null | null | null |
#
# Copyright (c) 2016 Nordic Semiconductor ASA
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice, this
# list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# 3. Neither the name of Nordic Semiconductor ASA nor the names of other
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# 4. This software must only be used in or with a processor manufactured by Nordic
# Semiconductor ASA, or in or with a processor manufactured by a third party that
# is used in combination with a processor manufactured by Nordic Semiconductor.
#
# 5. Any software provided in binary or object form under this license must not be
# reverse engineered, decompiled, modified and/or disassembled.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Python standard library
import os
import tempfile
import shutil
import binascii
from enum import Enum
# 3rd party libraries
from zipfile import ZipFile
import hashlib
# Nordic libraries
from nordicsemi.dfu.nrfhex import nRFHex
from nordicsemi.dfu.init_packet_pb import InitPacketPB, DFUType, CommandTypes, ValidationTypes, SigningTypes, HashTypes
from nordicsemi.dfu.manifest import ManifestGenerator, Manifest
from nordicsemi.dfu.model import HexType, FirmwareKeys
from nordicsemi.dfu.crc16 import calc_crc16
#from nordicsemi.zigbee.ota_file import OTA_file
#from .signing import Signing
HexTypeToInitPacketFwTypemap = {
HexType.APPLICATION: DFUType.APPLICATION,
HexType.BOOTLOADER: DFUType.BOOTLOADER,
HexType.SOFTDEVICE: DFUType.SOFTDEVICE,
HexType.SD_BL: DFUType.SOFTDEVICE_BOOTLOADER,
HexType.EXTERNAL_APPLICATION: DFUType.EXTERNAL_APPLICATION
}
class PackageException(Exception):
pass
class PacketField(Enum):
DEBUG_MODE = 1
HW_VERSION = 2
FW_VERSION = 3
REQUIRED_SOFTDEVICES_ARRAY = 4
class Package:
"""
Packages and unpacks Nordic DFU packages. Nordic DFU packages are zip files that contains firmware and meta-information
necessary for utilities to perform a DFU on nRF5X devices.
The internal data model used in Package is a dictionary. The dictionary is expressed like this in
json format:
{
"manifest": {
"bootloader": {
"bin_file": "asdf.bin",
"dat_file": "asdf.dat",
"init_packet_data": {
"application_version": null,
"device_revision": null,
"device_type": 5,
"firmware_hash": "asdfasdkfjhasdkfjashfkjasfhaskjfhkjsdfhasjkhf",
"softdevice_req": [
17,
18
]
}
}
}
Attributes application, bootloader, softdevice, softdevice_bootloader shall not be put into the manifest if they are null
"""
DEFAULT_DEBUG_MODE = False
DEFAULT_HW_VERSION = 0xFFFFFFFF
DEFAULT_APP_VERSION = 0xFFFFFFFF
DEFAULT_BL_VERSION = 0xFFFFFFFF
DEFAULT_SD_REQ = [0xFFFE]
DEFAULT_SD_ID = [0xFFFE]
DEFAULT_DFU_VER = 0.5
MANIFEST_FILENAME = "manifest.json"
DEFAULT_BOOT_VALIDATION_TYPE = ValidationTypes.VALIDATE_GENERATED_CRC.name
def __init__(self,
debug_mode=DEFAULT_DEBUG_MODE,
hw_version=DEFAULT_HW_VERSION,
app_version=DEFAULT_APP_VERSION,
bl_version=DEFAULT_BL_VERSION,
sd_req=DEFAULT_SD_REQ,
sd_id=DEFAULT_SD_ID,
app_fw=None,
bootloader_fw=None,
softdevice_fw=None,
sd_boot_validation=DEFAULT_BOOT_VALIDATION_TYPE,
app_boot_validation=DEFAULT_BOOT_VALIDATION_TYPE,
key_file=None,
is_external=False,
zigbee_format=False,
manufacturer_id=0,
image_type=0,
comment='',
zigbee_ota_min_hw_version=None,
zigbee_ota_max_hw_version=None):
"""
Constructor that requires values used for generating a Nordic DFU package.
:param int debug_mode: Debug init-packet field
:param int hw_version: Hardware version init-packet field
:param int app_version: App version init-packet field
:param int bl_version: Bootloader version init-packet field
:param list sd_req: Softdevice Requirement init-packet field
:param list sd_id: Softdevice Requirement init-packet field for the Application if softdevice_fw is set
:param str app_fw: Path to application firmware file
:param str bootloader_fw: Path to bootloader firmware file
:param str softdevice_fw: Path to softdevice firmware file
:param str key_file: Path to Signing key file (PEM)
:param int zigbee_ota_min_hw_version: Minimal zigbee ota hardware version
:param int zigbee_ota_max_hw_version: Maximum zigbee ota hardware version
:return: None
"""
init_packet_vars = {}
if debug_mode is not None:
init_packet_vars[PacketField.DEBUG_MODE] = debug_mode
if hw_version is not None:
init_packet_vars[PacketField.HW_VERSION] = hw_version
if sd_id is not None:
init_packet_vars[PacketField.REQUIRED_SOFTDEVICES_ARRAY] = sd_id
if sd_boot_validation is not None:
sd_boot_validation_type = [ValidationTypes[sd_boot_validation]]
else:
sd_boot_validation_type = [ValidationTypes.VALIDATE_GENERATED_CRC]
if app_boot_validation is not None:
app_boot_validation_type = [ValidationTypes[app_boot_validation]]
else:
app_boot_validation_type = [ValidationTypes.VALIDATE_GENERATED_CRC]
self.firmwares_data = {}
if app_fw:
firmware_type = HexType.EXTERNAL_APPLICATION if is_external else HexType.APPLICATION
self.__add_firmware_info(firmware_type=firmware_type,
firmware_version=app_version,
filename=app_fw,
boot_validation_type=app_boot_validation_type,
init_packet_data=init_packet_vars)
if sd_req is not None:
init_packet_vars[PacketField.REQUIRED_SOFTDEVICES_ARRAY] = sd_req
if bootloader_fw:
self.__add_firmware_info(firmware_type=HexType.BOOTLOADER,
firmware_version=bl_version,
filename=bootloader_fw,
boot_validation_type=[ValidationTypes.VALIDATE_GENERATED_CRC],
init_packet_data=init_packet_vars)
if softdevice_fw:
self.__add_firmware_info(firmware_type=HexType.SOFTDEVICE,
firmware_version=0xFFFFFFFF,
filename=softdevice_fw,
boot_validation_type=sd_boot_validation_type,
init_packet_data=init_packet_vars)
self.key_file = key_file
self.work_dir = None
self.manifest = None
if zigbee_format:
self.is_zigbee = True
self.image_type = image_type
self.manufacturer_id = manufacturer_id
self.comment = comment
self.zigbee_ota_min_hw_version = zigbee_ota_min_hw_version
self.zigbee_ota_max_hw_version = zigbee_ota_max_hw_version
else:
self.is_zigbee = False
self.image_type = None
self.manufacturer_id = None
self.comment = None
def __del__(self):
"""
Destructor removes the temporary working directory
:return:
"""
if self.work_dir is not None:
shutil.rmtree(self.work_dir)
self.work_dir = None
def rm_work_dir(self, preserve):
# Delete the temporary directory
if self.work_dir is not None:
if not preserve:
shutil.rmtree(self.work_dir)
self.work_dir = None
def parse_package(self, filename, preserve_work_dir=False):
self.work_dir = self.__create_temp_workspace()
self.zip_file = filename
self.zip_dir = os.path.join(self.work_dir, 'unpacked_zip')
self.manifest = Package.unpack_package(filename, self.zip_dir)
self.rm_work_dir(preserve_work_dir)
def image_str(self, index, hex_type, img):
type_strs = {HexType.SD_BL : "sd_bl",
HexType.SOFTDEVICE : "softdevice",
HexType.BOOTLOADER : "bootloader",
HexType.APPLICATION : "application",
HexType.EXTERNAL_APPLICATION : "external application"}
# parse init packet
with open(os.path.join(self.zip_dir, img.dat_file), "rb") as imgf:
initp_bytes = imgf.read()
initp = InitPacketPB(from_bytes=initp_bytes)
sd_req = ""
for x in initp.init_command.sd_req:
sd_req = sd_req + "0x{0:02X}, ".format(x)
if len(sd_req) != 0:
sd_req = sd_req[:-2]
if (initp.packet.HasField('signed_command')):
cmd = initp.packet.signed_command.command
signature_type = SigningTypes(initp.packet.signed_command.signature_type).name
signature_hex = binascii.hexlify(initp.packet.signed_command.signature)
else:
cmd = initp.packet.command
signature_type = 'UNSIGNED'
signature_hex = 'N/A'
boot_validation_type = []
boot_validation_bytes = []
for x in cmd.init.boot_validation:
boot_validation_type.append(ValidationTypes(x.type).name)
boot_validation_bytes.append(binascii.hexlify(x.bytes))
s = """|
|- Image #{0}:
|- Type: {1}
|- Image file: {2}
|- Init packet file: {3}
|
|- op_code: {4}
|- signature_type: {5}
|- signature (little-endian): {6}
|
|- fw_version: 0x{7:08X} ({7})
|- hw_version 0x{8:08X} ({8})
|- sd_req: {9}
|- type: {10}
|- sd_size: {11}
|- bl_size: {12}
|- app_size: {13}
|
|- hash_type: {14}
|- hash (little-endian): {15}
|
|- boot_validation_type: {16}
|- boot_validation_signature (little-endian): {17}
|
|- is_debug: {18}
""".format(index,
type_strs[hex_type],
img.bin_file,
img.dat_file,
CommandTypes(cmd.op_code).name,
signature_type,
signature_hex,
cmd.init.fw_version,
cmd.init.hw_version,
sd_req,
DFUType(cmd.init.type).name,
cmd.init.sd_size,
cmd.init.bl_size,
cmd.init.app_size,
HashTypes(cmd.init.hash.hash_type).name,
binascii.hexlify(cmd.init.hash.hash),
boot_validation_type,
boot_validation_bytes,
cmd.init.is_debug,
)
return s
def __str__(self):
imgs = ""
i = 0
if self.manifest.softdevice_bootloader:
imgs = imgs + self.image_str(i, HexType.SD_BL, self.manifest.softdevice_bootloader)
i = i + 1
if self.manifest.softdevice:
imgs = imgs + self.image_str(i, HexType.SOFTDEVICE, self.manifest.softdevice)
i = i + 1
if self.manifest.bootloader:
imgs = imgs + self.image_str(i, HexType.BOOTLOADER, self.manifest.bootloader)
i = i + 1
if self.manifest.application:
imgs = imgs + self.image_str(i, HexType.APPLICATION, self.manifest.application)
i = i + 1
s = """
DFU Package: <{0}>:
|
|- Image count: {1}
""".format(self.zip_file, i)
s = s + imgs
return s
def generate_package(self, filename, preserve_work_dir=False):
"""
Generates a Nordic DFU package. The package is a zip file containing firmware(s) and metadata required
for Nordic DFU applications to perform DFU onn nRF5X devices.
:param str filename: Filename for generated package.
:param bool preserve_work_dir: True to preserve the temporary working directory.
Useful for debugging of a package, and if the user wants to look at the generated package without having to
unzip it.
:return: None
"""
self.zip_file = filename
self.work_dir = self.__create_temp_workspace()
sd_bin_created = False
if Package._is_bootloader_softdevice_combination(self.firmwares_data):
# Removing softdevice and bootloader data from dictionary and adding the combined later
softdevice_fw_data = self.firmwares_data.pop(HexType.SOFTDEVICE)
bootloader_fw_data = self.firmwares_data.pop(HexType.BOOTLOADER)
softdevice_fw_name = softdevice_fw_data[FirmwareKeys.FIRMWARE_FILENAME]
bootloader_fw_name = bootloader_fw_data[FirmwareKeys.FIRMWARE_FILENAME]
new_filename = "sd_bl.bin"
sd_bl_file_path = os.path.join(self.work_dir, new_filename)
nrf_hex = nRFHex(softdevice_fw_name, bootloader_fw_name)
nrf_hex.tobinfile(sd_bl_file_path)
softdevice_size = nrf_hex.size()
bootloader_size = nrf_hex.bootloadersize()
boot_validation_type = []
boot_validation_type.extend(softdevice_fw_data[FirmwareKeys.BOOT_VALIDATION_TYPE])
boot_validation_type.extend(bootloader_fw_data[FirmwareKeys.BOOT_VALIDATION_TYPE])
self.__add_firmware_info(firmware_type=HexType.SD_BL,
firmware_version=bootloader_fw_data[FirmwareKeys.INIT_PACKET_DATA][PacketField.FW_VERSION], # use bootloader version in combination with SD
filename=sd_bl_file_path,
init_packet_data=softdevice_fw_data[FirmwareKeys.INIT_PACKET_DATA],
boot_validation_type=boot_validation_type,
sd_size=softdevice_size,
bl_size=bootloader_size)
# Need to generate SD only bin for boot validation signature
sd_bin = Package.normalize_firmware_to_bin(self.work_dir, softdevice_fw_data[FirmwareKeys.FIRMWARE_FILENAME])
sd_bin_path = os.path.join(self.work_dir, sd_bin)
sd_bin_created = True
for key, firmware_data in self.firmwares_data.items():
# Normalize the firmware file and store it in the work directory
firmware_data[FirmwareKeys.BIN_FILENAME] = \
Package.normalize_firmware_to_bin(self.work_dir, firmware_data[FirmwareKeys.FIRMWARE_FILENAME])
# Calculate the hash for the .bin file located in the work directory
bin_file_path = os.path.join(self.work_dir, firmware_data[FirmwareKeys.BIN_FILENAME])
firmware_hash = Package.calculate_sha256_hash(bin_file_path)
bin_length = int(Package.calculate_file_size(bin_file_path))
sd_size = 0
bl_size = 0
app_size = 0
if key in [HexType.APPLICATION, HexType.EXTERNAL_APPLICATION]:
app_size = bin_length
elif key == HexType.SOFTDEVICE:
sd_size = bin_length
elif key == HexType.BOOTLOADER:
bl_size = bin_length
elif key == HexType.SD_BL:
bl_size = firmware_data[FirmwareKeys.BL_SIZE]
sd_size = firmware_data[FirmwareKeys.SD_SIZE]
boot_validation_type_array = firmware_data[FirmwareKeys.BOOT_VALIDATION_TYPE]
boot_validation_bytes_array = []
for x in boot_validation_type_array:
if x == ValidationTypes.VALIDATE_ECDSA_P256_SHA256:
if key == HexType.SD_BL:
boot_validation_bytes_array.append(Package.sign_firmware(self.key_file, sd_bin_path))
else:
boot_validation_bytes_array.append(Package.sign_firmware(self.key_file, bin_file_path))
else:
boot_validation_bytes_array.append(b'')
init_packet = InitPacketPB(
from_bytes = None,
hash_bytes=firmware_hash,
hash_type=HashTypes.SHA256,
boot_validation_type=boot_validation_type_array,
boot_validation_bytes=boot_validation_bytes_array,
dfu_type=HexTypeToInitPacketFwTypemap[key],
is_debug=firmware_data[FirmwareKeys.INIT_PACKET_DATA][PacketField.DEBUG_MODE],
fw_version=firmware_data[FirmwareKeys.INIT_PACKET_DATA][PacketField.FW_VERSION],
hw_version=firmware_data[FirmwareKeys.INIT_PACKET_DATA][PacketField.HW_VERSION],
sd_size=sd_size,
app_size=app_size,
bl_size=bl_size,
sd_req=firmware_data[FirmwareKeys.INIT_PACKET_DATA][PacketField.REQUIRED_SOFTDEVICES_ARRAY])
if (self.key_file is not None):
signer = Signing()
signer.load_key(self.key_file)
signature = signer.sign(init_packet.get_init_command_bytes())
init_packet.set_signature(signature, SigningTypes.ECDSA_P256_SHA256)
# Store the .dat file in the work directory
init_packet_filename = firmware_data[FirmwareKeys.BIN_FILENAME].replace(".bin", ".dat")
with open(os.path.join(self.work_dir, init_packet_filename), 'wb') as init_packet_file:
init_packet_file.write(init_packet.get_init_packet_pb_bytes())
firmware_data[FirmwareKeys.DAT_FILENAME] = \
init_packet_filename
if self.is_zigbee:
firmware_version = firmware_data[FirmwareKeys.INIT_PACKET_DATA][PacketField.FW_VERSION]
file_name = firmware_data[FirmwareKeys.BIN_FILENAME]
self.zigbee_ota_file = OTA_file(firmware_version,
len(init_packet.get_init_packet_pb_bytes()),
binascii.crc32(init_packet.get_init_packet_pb_bytes()) & 0xFFFFFFFF,
init_packet.get_init_packet_pb_bytes(),
os.path.getsize(file_name),
self.calculate_crc(32, file_name) & 0xFFFFFFFF,
bytes(open(file_name, 'rb').read()),
self.manufacturer_id,
self.image_type,
self.comment,
self.zigbee_ota_min_hw_version,
self.zigbee_ota_max_hw_version)
ota_file_handle = open(self.zigbee_ota_file.filename, 'wb')
ota_file_handle.write(self.zigbee_ota_file.binary)
ota_file_handle.close()
# Remove SD binary file created for boot validation
if sd_bin_created:
os.remove(sd_bin_path)
# Store the manifest to manifest.json
manifest = self.create_manifest()
with open(os.path.join(self.work_dir, Package.MANIFEST_FILENAME), "w") as manifest_file:
manifest_file.write(manifest)
# Package the work_dir to a zip file
Package.create_zip_package(self.work_dir, filename)
# Delete the temporary directory
self.rm_work_dir(preserve_work_dir)
@staticmethod
def __create_temp_workspace():
return tempfile.mkdtemp(prefix="nrf_dfu_pkg_")
@staticmethod
def create_zip_package(work_dir, filename):
files = os.listdir(work_dir)
with ZipFile(filename, 'w') as package:
for _file in files:
file_path = os.path.join(work_dir, _file)
package.write(file_path, _file)
@staticmethod
def calculate_file_size(firmware_filename):
b = os.path.getsize(firmware_filename)
return b
@staticmethod
def calculate_sha256_hash(firmware_filename):
read_buffer = 4096
digest = hashlib.sha256()
with open(firmware_filename, 'rb') as firmware_file:
while True:
data = firmware_file.read(read_buffer)
if data:
digest.update(data)
else:
break
# return hash in little endian
sha256 = digest.digest()
return sha256[31::-1]
@staticmethod
def calculate_crc(crc, firmware_filename):
"""
Calculates CRC16 has on provided firmware filename
:type str firmware_filename:
"""
data_buffer = b''
read_size = 4096
with open(firmware_filename, 'rb') as firmware_file:
while True:
data = firmware_file.read(read_size)
if data:
data_buffer += data
else:
break
if crc == 16:
return calc_crc16(data_buffer, 0xffff)
elif crc == 32:
return binascii.crc32(data_buffer)
else:
raise ValueError("Invalid CRC type")
@staticmethod
def sign_firmware(key, firmware_filename):
data_buffer = b''
with open(firmware_filename, 'rb') as firmware_file:
data_buffer = firmware_file.read()
signer = Signing()
signer.load_key(key)
return signer.sign(data_buffer)
def create_manifest(self):
manifest = ManifestGenerator(self.firmwares_data)
return manifest.generate_manifest()
@staticmethod
def _is_bootloader_softdevice_combination(firmwares):
return (HexType.BOOTLOADER in firmwares) and (HexType.SOFTDEVICE in firmwares)
def __add_firmware_info(self, firmware_type, firmware_version, filename, init_packet_data, boot_validation_type, sd_size=None, bl_size=None):
self.firmwares_data[firmware_type] = {
FirmwareKeys.FIRMWARE_FILENAME: filename,
FirmwareKeys.INIT_PACKET_DATA: init_packet_data.copy(),
# Copying init packet to avoid using the same for all firmware
FirmwareKeys.BOOT_VALIDATION_TYPE: boot_validation_type,
}
if firmware_type == HexType.SD_BL:
self.firmwares_data[firmware_type][FirmwareKeys.SD_SIZE] = sd_size
self.firmwares_data[firmware_type][FirmwareKeys.BL_SIZE] = bl_size
if firmware_version is not None:
self.firmwares_data[firmware_type][FirmwareKeys.INIT_PACKET_DATA][PacketField.FW_VERSION] = firmware_version
@staticmethod
def normalize_firmware_to_bin(work_dir, firmware_path):
firmware_filename = os.path.basename(firmware_path)
new_filename = firmware_filename.replace(".hex", ".bin")
new_filepath = os.path.join(work_dir, new_filename)
if not os.path.exists(new_filepath):
temp = nRFHex(firmware_path)
temp.tobinfile(new_filepath)
return new_filepath
@staticmethod
def unpack_package(package_path, target_dir):
"""
Unpacks a Nordic DFU package.
:param str package_path: Path to the package
:param str target_dir: Target directory to unpack the package to
:return: Manifest Manifest: Returns a manifest back to the user. The manifest is a parse datamodel
of the manifest found in the Nordic DFU package.
"""
package_path = os.path.realpath(package_path)
if not os.path.isfile(package_path):
raise PackageException("Package {0} not found.".format(package_path))
target_dir = os.path.abspath(target_dir)
target_base_path = os.path.dirname(target_dir)
if not os.path.exists(target_base_path):
raise PackageException("Base path to target directory {0} does not exist.".format(target_base_path))
if not os.path.isdir(target_base_path):
raise PackageException("Base path to target directory {0} is not a directory.".format(target_base_path))
if os.path.exists(target_dir):
raise PackageException(
"Target directory {0} exists, not able to unpack to that directory.",
target_dir)
with ZipFile(package_path, 'r') as pkg:
pkg.extractall(target_dir)
with open(os.path.join(target_dir, Package.MANIFEST_FILENAME), 'r') as f:
_json = f.read()
""":type :str """
return Manifest.from_json(_json)
| 40.26372 | 177 | 0.62208 |
acf704accc5f061b01f61978b4bb3848ec430dec | 9,808 | py | Python | src/sdk/pynni/nni/ppo_tuner/util.py | mchesser/nni | 4ede394ee504c3d04b7770062cd5c022fad519b1 | [
"MIT"
] | null | null | null | src/sdk/pynni/nni/ppo_tuner/util.py | mchesser/nni | 4ede394ee504c3d04b7770062cd5c022fad519b1 | [
"MIT"
] | null | null | null | src/sdk/pynni/nni/ppo_tuner/util.py | mchesser/nni | 4ede394ee504c3d04b7770062cd5c022fad519b1 | [
"MIT"
] | null | null | null | # Copyright (c) Microsoft Corporation
# All rights reserved.
#
# MIT License
#
# Permission is hereby granted, free of charge,
# to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and
# to permit persons to whom the Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
util functions
"""
import os
import random
import multiprocessing
import numpy as np
import tensorflow as tf
from gym.spaces import Discrete, Box, MultiDiscrete
def set_global_seeds(i):
"""set global seeds"""
rank = 0
myseed = i + 1000 * rank if i is not None else None
tf.set_random_seed(myseed)
np.random.seed(myseed)
random.seed(myseed)
def batch_to_seq(h, nbatch, nsteps, flat=False):
"""convert from batch to sequence"""
if flat:
h = tf.reshape(h, [nbatch, nsteps])
else:
h = tf.reshape(h, [nbatch, nsteps, -1])
return [tf.squeeze(v, [1]) for v in tf.split(axis=1, num_or_size_splits=nsteps, value=h)]
def seq_to_batch(h, flat=False):
"""convert from sequence to batch"""
shape = h[0].get_shape().as_list()
if not flat:
assert len(shape) > 1
nh = h[0].get_shape()[-1].value
return tf.reshape(tf.concat(axis=1, values=h), [-1, nh])
else:
return tf.reshape(tf.stack(values=h, axis=1), [-1])
def lstm(xs, ms, s, scope, nh, init_scale=1.0):
"""lstm cell"""
_, nin = [v.value for v in xs[0].get_shape()] # the first is nbatch
with tf.variable_scope(scope):
wx = tf.get_variable("wx", [nin, nh*4], initializer=ortho_init(init_scale))
wh = tf.get_variable("wh", [nh, nh*4], initializer=ortho_init(init_scale))
b = tf.get_variable("b", [nh*4], initializer=tf.constant_initializer(0.0))
c, h = tf.split(axis=1, num_or_size_splits=2, value=s)
for idx, (x, m) in enumerate(zip(xs, ms)):
c = c*(1-m)
h = h*(1-m)
z = tf.matmul(x, wx) + tf.matmul(h, wh) + b
i, f, o, u = tf.split(axis=1, num_or_size_splits=4, value=z)
i = tf.nn.sigmoid(i)
f = tf.nn.sigmoid(f)
o = tf.nn.sigmoid(o)
u = tf.tanh(u)
c = f*c + i*u
h = o*tf.tanh(c)
xs[idx] = h
s = tf.concat(axis=1, values=[c, h])
return xs, s
def lstm_model(nlstm=128, layer_norm=False):
"""
Builds LSTM (Long-Short Term Memory) network to be used in a policy.
Note that the resulting function returns not only the output of the LSTM
(i.e. hidden state of lstm for each step in the sequence), but also a dictionary
with auxiliary tensors to be set as policy attributes.
Specifically,
S is a placeholder to feed current state (LSTM state has to be managed outside policy)
M is a placeholder for the mask (used to mask out observations after the end of the episode, but can be used for other purposes too)
initial_state is a numpy array containing initial lstm state (usually zeros)
state is the output LSTM state (to be fed into S at the next call)
An example of usage of lstm-based policy can be found here: common/tests/test_doc_examples.py/test_lstm_example
Parameters:
----------
nlstm: int LSTM hidden state size
layer_norm: bool if True, layer-normalized version of LSTM is used
Returns:
-------
function that builds LSTM with a given input tensor / placeholder
"""
def network_fn(X, nenv=1, obs_size=-1):
with tf.variable_scope("emb", reuse=tf.AUTO_REUSE):
w_emb = tf.get_variable("w_emb", [obs_size+1, 32])
X = tf.nn.embedding_lookup(w_emb, X)
nbatch = X.shape[0]
nsteps = nbatch // nenv
h = tf.layers.flatten(X)
M = tf.placeholder(tf.float32, [nbatch]) #mask (done t-1)
S = tf.placeholder(tf.float32, [nenv, 2*nlstm]) #states
xs = batch_to_seq(h, nenv, nsteps)
ms = batch_to_seq(M, nenv, nsteps)
assert not layer_norm
h5, snew = lstm(xs, ms, S, scope='lstm', nh=nlstm)
h = seq_to_batch(h5)
initial_state = np.zeros(S.shape.as_list(), dtype=float)
return h, {'S':S, 'M':M, 'state':snew, 'initial_state':initial_state}
return network_fn
def ortho_init(scale=1.0):
"""init approach"""
def _ortho_init(shape, dtype, partition_info=None):
#lasagne ortho init for tf
shape = tuple(shape)
if len(shape) == 2:
flat_shape = shape
elif len(shape) == 4: # assumes NHWC
flat_shape = (np.prod(shape[:-1]), shape[-1])
else:
raise NotImplementedError
a = np.random.normal(0.0, 1.0, flat_shape)
u, _, v = np.linalg.svd(a, full_matrices=False)
q = u if u.shape == flat_shape else v # pick the one with the correct shape
q = q.reshape(shape)
return (scale * q[:shape[0], :shape[1]]).astype(np.float32)
return _ortho_init
def fc(x, scope, nh, *, init_scale=1.0, init_bias=0.0):
"""fully connected op"""
with tf.variable_scope(scope):
nin = x.get_shape()[1].value
w = tf.get_variable("w", [nin, nh], initializer=ortho_init(init_scale))
b = tf.get_variable("b", [nh], initializer=tf.constant_initializer(init_bias))
return tf.matmul(x, w)+b
def _check_shape(placeholder_shape, data_shape):
"""
check if two shapes are compatible (i.e. differ only by dimensions of size 1, or by the batch dimension)
"""
return True
# ================================================================
# Shape adjustment for feeding into tf placeholders
# ================================================================
def adjust_shape(placeholder, data):
"""
adjust shape of the data to the shape of the placeholder if possible.
If shape is incompatible, AssertionError is thrown
Parameters:
placeholder: tensorflow input placeholder
data: input data to be (potentially) reshaped to be fed into placeholder
Returns:
reshaped data
"""
if not isinstance(data, np.ndarray) and not isinstance(data, list):
return data
if isinstance(data, list):
data = np.array(data)
placeholder_shape = [x or -1 for x in placeholder.shape.as_list()]
assert _check_shape(placeholder_shape, data.shape), \
'Shape of data {} is not compatible with shape of the placeholder {}'.format(data.shape, placeholder_shape)
return np.reshape(data, placeholder_shape)
# ================================================================
# Global session
# ================================================================
def get_session(config=None):
"""Get default session or create one with a given config"""
sess = tf.get_default_session()
if sess is None:
sess = make_session(config=config, make_default=True)
return sess
def make_session(config=None, num_cpu=None, make_default=False, graph=None):
"""Returns a session that will use <num_cpu> CPU's only"""
if num_cpu is None:
num_cpu = int(os.getenv('RCALL_NUM_CPU', multiprocessing.cpu_count()))
if config is None:
config = tf.ConfigProto(
allow_soft_placement=True,
inter_op_parallelism_threads=num_cpu,
intra_op_parallelism_threads=num_cpu)
config.gpu_options.allow_growth = True
if make_default:
return tf.InteractiveSession(config=config, graph=graph)
else:
return tf.Session(config=config, graph=graph)
ALREADY_INITIALIZED = set()
def initialize():
"""Initialize all the uninitialized variables in the global scope."""
new_variables = set(tf.global_variables()) - ALREADY_INITIALIZED
get_session().run(tf.variables_initializer(new_variables))
ALREADY_INITIALIZED.update(new_variables)
def observation_placeholder(ob_space, batch_size=None, name='Ob'):
"""
Create placeholder to feed observations into of the size appropriate to the observation space
Parameters:
----------
ob_space: gym.Space observation space
batch_size: int size of the batch to be fed into input. Can be left None in most cases.
name: str name of the placeholder
Returns:
-------
tensorflow placeholder tensor
"""
assert isinstance(ob_space, (Discrete, Box, MultiDiscrete)), \
'Can only deal with Discrete and Box observation spaces for now'
dtype = ob_space.dtype
if dtype == np.int8:
dtype = np.uint8
return tf.placeholder(shape=(batch_size,) + ob_space.shape, dtype=dtype, name=name)
def explained_variance(ypred, y):
"""
Computes fraction of variance that ypred explains about y.
Returns 1 - Var[y-ypred] / Var[y]
interpretation:
ev=0 => might as well have predicted zero
ev=1 => perfect prediction
ev<0 => worse than just predicting zero
"""
assert y.ndim == 1 and ypred.ndim == 1
vary = np.var(y)
return np.nan if vary == 0 else 1 - np.var(y-ypred)/vary
| 36.734082 | 140 | 0.641007 |
acf707910bef8da84707435bf880c6852ffb84ed | 10,373 | py | Python | scripts/subsample-sequences.py | rpetit3/anthrax-metagenome-study | b4a6f2c4d49b57aeae898afd6a95c8f6cb437945 | [
"MIT"
] | null | null | null | scripts/subsample-sequences.py | rpetit3/anthrax-metagenome-study | b4a6f2c4d49b57aeae898afd6a95c8f6cb437945 | [
"MIT"
] | null | null | null | scripts/subsample-sequences.py | rpetit3/anthrax-metagenome-study | b4a6f2c4d49b57aeae898afd6a95c8f6cb437945 | [
"MIT"
] | null | null | null | #! /usr/bin/env python3
"""Create random subsampels of input sequences."""
import argparse as ap
import glob
import gzip
from multiprocessing import Pool
import os
import random
import subprocess
import numpy as np
GENOME_SIZE = None
BA_LENGTH = None
BA_SEQUENCES = None
BA_TOTAL = None
BCG_LENGTH = None
BCG_SEQUENCES = None
BCG_TOTAL = None
BA_KMERS = None
BCG_KMERS = None
LEF_KMERS = None
def read_coverages(input_file):
"""Return coverage values."""
coverages = []
with open(input_file, 'r') as input_handle:
for line in input_handle:
coverages.append(float(line.rstrip()))
return coverages
def read_sequences(input_file, min_length=None):
"""Return lines in a text file as a list."""
lines = []
lengths = []
total = 1
with open(input_file, 'r') as input_handle:
for line in input_handle:
line = line.rstrip()
length = len(line)
if min_length:
if length >= min_length:
lines.append(line)
if total <= 100000:
lengths.append(length)
total += 1
else:
lines.append(line)
lengths.append(length)
if total <= 100000:
lengths.append(length)
length_stats = get_coverage_stats(lengths)
return [lines, int(length_stats['mean'])]
def get_coverage_stats(coverage):
"""Return summary stats of a set of coverages."""
np_array = np.array(coverage)
return {
'min': min(coverage) if coverage else 0,
'median': int(np.median(np_array)) if coverage else 0,
'mean': np.mean(np_array) if coverage else 0,
'max': max(coverage) if coverage else 0
}
def output_handler(output, redirect='>'):
if output:
return [open(output, 'w'), '{0} {1}'.format(redirect, output)]
else:
return [subprocess.PIPE, '']
def run_command(cmd, cwd=os.getcwd(), stdout=False, stderr=False, shell=False):
"""Execute a single command and return STDOUT and STDERR."""
stdout, stdout_str = output_handler(stdout)
stderr, stderr_str = output_handler(stderr, redirect='2>')
p = subprocess.Popen(cmd, stdout=stdout, stderr=stderr, cwd=cwd,
shell=shell)
return p.communicate()
def has_hit(counts):
with open(counts, 'r') as count_handle:
for line in count_handle:
if int(line.rstrip().split()[1]):
return True
return False
def subsample(opts):
"""Subsample coverages."""
working_dir = opts[0]
output_dir = opts[1]
bcg_coverage = opts[2]
ba_coverage = opts[3]
replicate = opts[4] + 100
bcg_reads = 0
ba_reads = 0
basename = "replicate-{0:03d}".format(replicate)
if bcg_coverage or ba_coverage:
if not os.path.exists('{0}/{1}-lef.txt.gz'.format(output_dir, basename)):
fasta = []
fasta_output = "{0}/{1}.fasta".format(working_dir, basename)
random_seed = None
if bcg_coverage and ba_coverage:
bcg_reads = int(GENOME_SIZE * float(bcg_coverage) / BCG_LENGTH)
ba_reads = int(GENOME_SIZE * float(ba_coverage) / BA_LENGTH)
random_seed = (
int(bcg_coverage * 100) * int(ba_coverage * 10000) * replicate + bcg_reads + ba_reads
)
elif bcg_coverage:
bcg_reads = int(GENOME_SIZE * float(bcg_coverage) / BCG_LENGTH)
random_seed = (
int(bcg_coverage * 100) * replicate + bcg_reads
)
else:
ba_reads = int(GENOME_SIZE * float(ba_coverage) / BA_LENGTH)
random_seed = (
int(ba_coverage * 10000) * replicate + ba_reads
)
if bcg_coverage:
bcg_reads = int(GENOME_SIZE * float(bcg_coverage) / BCG_LENGTH)
random.seed(random_seed)
for element in random.sample(range(BCG_TOTAL), bcg_reads):
fasta.append(">{0}\n".format(element))
fasta.append("{0}\n".format(BCG_SEQUENCES[element]))
if ba_coverage:
ba_reads = int(GENOME_SIZE * float(ba_coverage) / BA_LENGTH)
random.seed(random_seed)
for element in random.sample(range(BA_TOTAL), ba_reads):
fasta.append(">{0}\n".format(element))
fasta.append("{0}\n".format(BA_SEQUENCES[element]))
with open(fasta_output, 'w') as fasta_handle:
fasta_handle.write("".join(fasta))
run_command(['mv', fasta_output, output_dir])
# Count kmers
"""
jellyfish = '{0}/{1}.jf'.format(working_dir, basename)
run_command(['jellyfish', 'count', '-C', '-m', '31',
'-s', '5M', '-o', jellyfish, fasta_output])
run_command(['rm', fasta_output])
ba_txt = '{0}/{1}-ba.txt'.format(working_dir, basename)
run_command(
['jellyfish', 'query', '-s', BA_KMERS, '-o', ba_txt, jellyfish]
)
ba_hit = has_hit(ba_txt)
bcg_txt = '{0}/{1}-bcg.txt'.format(working_dir, basename)
run_command(
['jellyfish', 'query', '-s', BCG_KMERS, '-o', bcg_txt, jellyfish]
)
bcg_hit = has_hit(bcg_txt)
lef_txt = '{0}/{1}-lef.txt'.format(working_dir, basename)
run_command(
['jellyfish', 'query', '-s', LEF_KMERS, '-o', lef_txt, jellyfish]
)
run_command(['rm', jellyfish])
if ba_hit and bcg_hit:
print("\tSUCCESS: Replicate: {0} Random Seed: {1} Reads: BCG {2} BA {3}".format(
replicate, random_seed, bcg_reads, ba_reads
))
run_command(['gzip', '-f', bcg_txt])
run_command(['gzip', '-f', lef_txt])
run_command(['gzip', '-f', ba_txt])
run_command(['mv', '{0}.gz'.format(ba_txt), output_dir])
run_command(['mv', '{0}.gz'.format(bcg_txt), output_dir])
run_command(['mv', '{0}.gz'.format(lef_txt), output_dir])
else:
run_command(['rm', bcg_txt])
run_command(['rm', lef_txt])
run_command(['rm', ba_txt])
"""
else:
print("\tSkipping replicate: {0}, already completed".format(replicate))
if __name__ == '__main__':
parser = ap.ArgumentParser(
prog='subsample-sequences.py', conflict_handler='resolve',
description="Create random subsamples of input sequences."
)
parser.add_argument('ba_sequences', type=str, metavar="BA_SEQS",
help='File of B. anthracis sequences, one per line.')
parser.add_argument('bcg_sequences', type=str, metavar="BCG_SEQS",
help='File of B. cereus sequences, one per line.')
parser.add_argument('ba_coverages', type=str, metavar="BA_COVERAGES",
help=('Coverages to subsample B. anthracis to.'))
parser.add_argument('bcg_coverages', type=str, metavar="BCG_COVERAGES",
help=('Coverages to subsample B. cereus to.'))
parser.add_argument('working', type=str, metavar="WORKING_DIR",
help=('Directory to put temporary files.'))
parser.add_argument('output', type=str, metavar="OUTPUT",
help=('Directory to subsampled FASTA files to.'))
parser.add_argument('ba', type=str, metavar="BA_KMERS",
help=('BA specific kmers.'))
parser.add_argument('bcg', type=str, metavar="BCG_KMERS",
help=('BCG specific kmers.'))
parser.add_argument('lef', type=str, metavar="LEF_KMERS",
help=('Lethal factor kmers.'))
parser.add_argument('--genome_size', metavar="INT", type=int,
default=5200000,
help='Genome size (Default 5.2Mb)')
parser.add_argument('--length', metavar="INT", type=int, default=100,
help='Per line sequence length (Default 100)')
parser.add_argument('--replicates', metavar="INT", type=int,
default=20,
help='Number of replicates per coverage (Default 100)')
parser.add_argument('--cpu', metavar="INT", type=int, default=23,
help='Total number of processes to launch (Default 1)')
args = parser.parse_args()
BA_SEQUENCES, BA_LENGTH = read_sequences(args.ba_sequences,
min_length=args.length)
BCG_SEQUENCES, BCG_LENGTH = read_sequences(args.bcg_sequences,
min_length=args.length)
print("Mean Read Lengths: BA {0}bp, BCG {1}bp".format(
BA_LENGTH, BCG_LENGTH
))
BA_TOTAL = len(BA_SEQUENCES)
BCG_TOTAL = len(BCG_SEQUENCES)
BA_KMERS = args.ba
BCG_KMERS = args.bcg
LEF_KMERS = args.lef
GENOME_SIZE = args.genome_size
bcg_coverages = read_coverages(args.bcg_coverages)
ba_coverages = read_coverages(args.ba_coverages)
for bcg_coverage in bcg_coverages:
print("Working on BCG coverage: {0}x".format(bcg_coverage))
for ba_coverage in ba_coverages:
print("\tWorking on BA coverage: {0}x".format(ba_coverage))
MATCHES = 0
start = 1
end = args.cpu
outputs = {}
path = "{0}/{1}/{2}".format(args.output, bcg_coverage, ba_coverage)
if not os.path.exists(path):
os.makedirs(path)
while MATCHES < args.replicates:
with Pool(processes=args.cpu) as pool:
pool.map(
subsample,
[[args.working, path, bcg_coverage, ba_coverage, r]
for r in range(start, end + 1)]
)
MATCHES = len(glob.glob("{0}/*.fasta".format(path)))
print("\tTests: {0}, Successes: {1}.".format(end, MATCHES))
if MATCHES < args.replicates:
start = end
end = start + args.cpu
| 38.996241 | 105 | 0.556541 |
acf707c84c01f5450a973a552b724faea89e6c98 | 6,361 | py | Python | catkin_ws/src/f23-LED/led_emitter/src/led_emitter_node_gazebo.py | SunnyChing/duckietown5909 | b3c1c0088fb2802c0198b52846a8454f2ec0e79b | [
"CC-BY-2.0"
] | null | null | null | catkin_ws/src/f23-LED/led_emitter/src/led_emitter_node_gazebo.py | SunnyChing/duckietown5909 | b3c1c0088fb2802c0198b52846a8454f2ec0e79b | [
"CC-BY-2.0"
] | null | null | null | catkin_ws/src/f23-LED/led_emitter/src/led_emitter_node_gazebo.py | SunnyChing/duckietown5909 | b3c1c0088fb2802c0198b52846a8454f2ec0e79b | [
"CC-BY-2.0"
] | null | null | null | #!/usr/bin/env python
import rospy
from rgb_led import *
import sys
import time
from std_msgs.msg import Float32, Int8, String
from duckietown_msgs.msg import BoolStamped, GazeboLED,AprilTagsWithInfos
from tf.transformations import euler_from_quaternion, quaternion_from_euler
from visualization_msgs.msg import Marker
from nav_msgs.msg import Odometry
class LEDEmitter(object):
def __init__(self):
self.node_name = rospy.get_name()
#self.pub_state = rospy.Publisher("~current_led_state",Float32,queue_size=1)
self.sub_pattern = rospy.Subscriber("~change_color_pattern", String, self.changePattern)
self.pub_pattern = rospy.Publisher("~emit_color_pattern",GazeboLED,queue_size=1)
self.sub_switch = rospy.Subscriber("~switch",BoolStamped,self.cbSwitch)
self.sub_topic_tag = rospy.Subscriber("~tag", AprilTagsWithInfos, self.cbTag, queue_size=1)
self.sub_odom = rospy.Subscriber("odom", Odometry, self.cbOdom, queue_size=1)
self.pub_state = rospy.Publisher("cstate", Marker, queue_size=1)
self.cycle = None
self.pattern = GazeboLED()
self.is_on = False
self.active = False
self.protocol = rospy.get_param("~LED_protocol") #should be a list of tuples
self.tags_id_seen=[]
self.cycle_timer = rospy.Timer(rospy.Duration.from_sec(.1), self.cycleTimer)
self.current_pattern_name = None
self.changePattern_('light_off')
self.cross = ''
self.intersection = ""
self.cross_dict={'1':{'35','61','52','32','57'},
'2':{'31','58','49','41','60','66','1','71'},
'3':{'30','9','33','68','72','64','38'},
'4':{'36','62','65','70','51','63','119'},
}
self.intersection_dict={'trafficLight':{'49','52','51','53','78','70','72','71'},
'stopSign1':{'35','61','36','9','65','30'},
'stopSign2':{'33','62','31','68','58'},
'stopSign3':{'39','63','119','41','60','67'},
'stopSign4':{'66','1','38','64','57'},}
self.veh_name = rospy.get_namespace().strip("/")
if not self.veh_name:
# fall back on private param passed thru rosrun
# syntax is: rosrun <pkg> <node> _veh:=<bot-id>
if rospy.has_param('~veh'):
self.veh_name = rospy.get_param('~veh')
rospy.loginfo('[%s] Vehicle: %s'%(self.node_name, self.veh_name))
self.pattern.car =self.veh_name
def cbSwitch(self, switch_msg): # active/inactive switch from FSM
self.active = switch_msg.data
#-----------------------------------------------------------
def changePattern(self, msg):
self.changePattern_(msg.data)
def changePattern_(self, pattern_name):
#rospy.loginfo('changePattern(%r)' % pattern_name)
color = self.protocol['signals'][pattern_name]['color']
self.cycle = self.protocol['signals'][pattern_name]['frequency']
#print("color: %s, freq (Hz): %s "%(color, self.cycle))
self.pattern.color= str(pattern_name)
#self.pattern.id = self.tags_id_seen
def cbTag(self, tag_msgs):
#loop through list of april tags
self.tags_id_seen=[]
for taginfo in tag_msgs.detections:
# rospy.loginfo("[%s] taginfo." %(taginfo))
#print(self.get_rotation(taginfo.pose))
if self.get_rotation(taginfo.pose) >0.3 or self.get_rotation(taginfo.pose) <-0.3 or taginfo.pose.pose.position.x >0.7:
return
self.tags_id_seen.append(str(taginfo.id))
if len([x for x in self.tags_id_seen if x in self.cross_dict['1'] ])!= 0:
self.cross = "1"
elif len([x for x in self.tags_id_seen if x in self.cross_dict['2'] ])!= 0:
self.cross = "2"
elif len([x for x in self.tags_id_seen if x in self.cross_dict['3'] ])!= 0:
self.cross = "3"
elif len([x for x in self.tags_id_seen if x in self.cross_dict['4'] ])!= 0:
self.cross = "4"
else:
self.cross = ''
self.pattern.cross= self.cross
if len([x for x in self.tags_id_seen if x in self.intersection_dict['trafficLight'] ])!= 0:
#print self.tags_id_seen,self.intersection_dict['trafficLight']
self.intersection = "trafficLight"
elif len([x for x in self.tags_id_seen if x in self.intersection_dict['stopSign1'] ])!= 0:
self.intersection = "stopSign1"
elif len([x for x in self.tags_id_seen if x in self.intersection_dict['stopSign2'] ])!= 0:
self.intersection = "stopSign2"
elif len([x for x in self.tags_id_seen if x in self.intersection_dict['stopSign3'] ])!= 0:
self.intersection = "stopSign3"
elif len([x for x in self.tags_id_seen if x in self.intersection_dict['stopSign4'] ])!= 0:
self.intersection = "stopSign4"
else:
self.intersection = ""
#print self.intersection
self.pattern.intersection = self.intersection
def cycleTimer(self,event):
if True:
self.pub_pattern.publish(self.pattern)
#rospy.loginfo("%s in %s." %(self.pattern.car,self.pattern.color))
text = Marker()
text.header.frame_id = "/world"
text.header.stamp = rospy.Time.now()
text.ns = self.veh_name
text.id = 1
text.type = 9 # text
text.action = 0
text.pose.position.x = self.x + 0.2
text.pose.position.y = self.y + 0.2
text.pose.position.z = 0.0
text.pose.orientation.w = 1.0
text.scale.z = 0.08
text.color.r = 1.0 # red
text.color.g = 1.0
text.color.b = 0.0
text.color.a = 1.0
text.text = str(self.pattern.color)
self.pub_state.publish(text)
def cbOdom(self,odo):
self.x = odo.pose.pose.position.x
self.y = odo.pose.pose.position.y
def get_rotation (self, msg):
orientation_q = msg.pose.orientation
orientation_list = [orientation_q.x, orientation_q.y, orientation_q.z, orientation_q.w]
(roll, pitch, yaw) = euler_from_quaternion (orientation_list)
return yaw
if __name__ == '__main__':
rospy.init_node('led_emitter',anonymous=False)
node = LEDEmitter()
rospy.spin()
| 42.125828 | 130 | 0.597862 |
acf7082c154ab73b6f58aad66a34a3f1fecc5de1 | 557 | py | Python | docs/source/gallery/examples/geochem/scaling.py | bomtuckle/pyrolite | c0af0ade14ff26b4e9fdd5a033b27e73df085c55 | [
"BSD-3-Clause"
] | 69 | 2019-02-25T00:17:53.000Z | 2022-03-31T17:26:48.000Z | docs/source/gallery/examples/geochem/scaling.py | bomtuckle/pyrolite | c0af0ade14ff26b4e9fdd5a033b27e73df085c55 | [
"BSD-3-Clause"
] | 68 | 2018-07-20T09:01:01.000Z | 2022-03-31T16:28:36.000Z | docs/source/gallery/examples/geochem/scaling.py | bomtuckle/pyrolite | c0af0ade14ff26b4e9fdd5a033b27e73df085c55 | [
"BSD-3-Clause"
] | 24 | 2018-10-02T04:32:10.000Z | 2021-11-10T08:24:17.000Z | """
Unit Scaling
=============
"""
import pyrolite.geochem
import pandas as pd
pd.set_option("precision", 3) # smaller outputs
########################################################################################
from pyrolite.util.synthetic import normal_frame
df = normal_frame(columns=['CaO', 'MgO', 'SiO2', 'FeO', 'Ni', 'Ti', 'La', 'Lu', 'Mg/Fe'])
########################################################################################
cols = ["Ni", "NiO", "La", "La2O3"]
df.head(2).pyrochem.convert_chemistry(to=cols)[cols] # these are in ppm!
| 34.8125 | 89 | 0.438061 |
acf7087468fdd645b8cf6eb9c06e6ac954b2b23b | 8,522 | py | Python | IELTS/Ielts test taker.py | VinayakBector2002/56IELTS_Taker | 996c71b0c58482e09687ac08a4c85d3092fcc429 | [
"Apache-2.0"
] | 2 | 2020-08-25T20:22:35.000Z | 2021-10-10T03:48:09.000Z | IELTS/Ielts test taker.py | VinayakBector2002/56IELTS_Taker | 996c71b0c58482e09687ac08a4c85d3092fcc429 | [
"Apache-2.0"
] | 1 | 2020-09-12T18:35:13.000Z | 2020-09-12T18:35:58.000Z | IELTS/Ielts test taker.py | VinayakBector2002/56IELTS_Taker | 996c71b0c58482e09687ac08a4c85d3092fcc429 | [
"Apache-2.0"
] | 4 | 2020-09-15T07:00:00.000Z | 2021-08-05T07:05:03.000Z |
import tkinter as tk
from tkinter import *
import os
from PIL import ImageTk, Image
import sounddevice as sd
from scipy.io.wavfile import write
def txt_Editor():
import Notepad
def Delete_screen():
screen.destroy()
def Listening_Test():
from tkinter import filedialog
from pygame import mixer
class MusicPlayer:
def __init__(self, window ):
window.geometry('320x100'); window.title('Iris Player'); window.resizable(0,0)
Load = Button(window, text = 'Load', width = 10, font = ('Times', 10), command = self.load)
Play = Button(window, text = 'Play', width = 10,font = ('Times', 10), command = self.play)
Pause = Button(window,text = 'Pause', width = 10, font = ('Times', 10), command = self.pause)
Stop = Button(window ,text = 'Stop', width = 10, font = ('Times', 10), command = self.stop)
Load.place(x=0,y=20);Play.place(x=110,y=20);Pause.place(x=220,y=20);Stop.place(x=110,y=60)
self.music_file = False
self.playing_state = False
def load(self):
self.music_file = filedialog.askopenfilename()
def play(self):
if self.music_file:
mixer.init()
mixer.music.load(self.music_file)
mixer.music.play()
def pause(self):
if not self.playing_state:
mixer.music.pause()
self.playing_state=True
else:
mixer.music.unpause()
self.playing_state = False
def stop(self):
mixer.music.stop()
global Listening
Listening = Toplevel(screen)
Listening.title("IELTS Listening")
Listening.geometry("500x500")
MusicPlayer(Listening)
def speaking_Test():
def OG2():
Button(speaking,text = "Stop Recording ",height = "2", width = "30", command = stop).pack()
import VoiceRecorder
def Og():
Button(speaking,text = "Stop Recording ",height = "2", width = "30", command = stop).pack()
import argparse
import tempfile
import queue
import sys
import sounddevice as sd
import soundfile as sf
import numpy # Make sure NumPy is loaded before it is used in the callback
assert numpy # avoid "imported but unused" message (W0611)
print("Og check")
def int_or_str(text):
"""Helper function for argument parsing."""
try:
return int(text)
except ValueError:
return text
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument(
'-l', '--list-devices', action='store_true',
help='show list of audio devices and exit')
args, remaining = parser.parse_known_args()
if args.list_devices:
print(sd.query_devices())
parser.exit(0)
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter,
parents=[parser])
parser.add_argument(
'filename', nargs='?', metavar='FILENAME',
help='audio file to store recording to')
parser.add_argument(
'-d', '--device', type=int_or_str,
help='input device (numeric ID or substring)')
parser.add_argument(
'-r', '--samplerate', type=int, help='sampling rate')
parser.add_argument(
'-c', '--channels', type=int, default=1, help='number of input channels')
parser.add_argument(
'-t', '--subtype', type=str, help='sound file subtype (e.g. "PCM_24")')
args = parser.parse_args(remaining)
q = queue.Queue()
def callback(indata, frames, time, status):
"""This is called (from a separate thread) for each audio block."""
if status:
print(status, file=sys.stderr)
q.put(indata.copy())
try:
if args.samplerate is None:
device_info = sd.query_devices(args.device, 'input')
# soundfile expects an int, sounddevice provides a float:
args.samplerate = int(device_info['default_samplerate'])
if args.filename is None:
args.filename = tempfile.mktemp(prefix= test_entry.get()+'___' ,
suffix='.wav', dir='')
# Make sure the file is opened before recording anything:
with sf.SoundFile(args.filename, mode='x', samplerate=args.samplerate,
channels=args.channels, subtype=args.subtype) as file:
with sd.InputStream(samplerate=args.samplerate, device=args.device,
channels=args.channels, callback=callback):
print('#' * 80)
print('press Ctrl+C to stop the recording')
print('#' * 80)
while True:
file.write(q.get())
except KeyboardInterrupt:
print('\nRecording finished: ' + repr(args.filename))
parser.exit(0)
except Exception as e:
parser.exit(type(e).__name__ + ': ' + str(e))
global stop
def stop():
return KeyboardInterrupt
def calling():
test_name = test_entry.get()
if test_name == "1":
Image.open(r"C:\\Users\becto\Desktop\Documents\\Python Scripts\\IELTS\1.jpg").show()
elif test_name=="2":
Image.open(r"C:\Users\becto\Desktop\Documents\Python Scripts\IELTS\1.jpg").show()
elif test_name =="3":
Image.open(r"C:\Users\becto\Desktop\Documents\Python Scripts\IELTS\1.jpg").show()
elif test_name =="4":
Image.open(r"C:\Users\becto\Desktop\Documents\Python Scripts\IELTS\1.jpg").show()
elif test_name=="5":
Image.open(r"C:\Users\becto\Desktop\Documents\Python Scripts\IELTS\1.jpg").show()
elif test_name =="6":
Image.open(r"C:\Users\becto\Desktop\Documents\Python Scripts\IELTS\1.jpg").show()
elif test_name =="7":
Image.open(r"C:\Users\becto\Desktop\Documents\Python Scripts\IELTS\1.jpg").show()
elif test_name =="8":
Image.open(r"C:\Users\becto\Desktop\Documents\Python Scripts\IELTS\1.jpg").show()
elif test_name =="9":
Image.open(r"C:\Users\becto\Desktop\Documents\Python Scripts\IELTS\1.jpg").show()
elif test_name =="10":
Image.open(r"C:\Users\becto\Desktop\Documents\Python Scripts\IELTS\1.jpg").show()
global speaking
speaking = Toplevel(screen)
speaking.title("IELTS Speaking")
speaking.geometry("500x500")
test = StringVar()
print("Checkpoint 1")
global test_entry
test_entry = Entry(speaking, textvariable = test)
test_entry.pack()
print("Checkpoint 2")
print("Checkpoint 3")
calling()
Button(speaking,text = "Record my test",height = "2", width = "30", command = OG2).pack()
def main_screen():
global screen
screen = Tk()
screen.configure(bg='#cc1a30')
screen.geometry("650x700")
photo = ImageTk.PhotoImage(Image.open("ielts.jpg"))
panel = Label(screen, image = photo,bg='#cc1a30' )
panel.pack( side = tk.TOP, fill = tk.X , expand = False)
screen.title("IELTS TEST TAKER")
Label(text = "Welcome! IELTS TEST TAKER", bg = "grey", width = "300", height = "2", font = ("Calibri", 13)).pack()
Label(text = "", bg='#cc1a30' ).pack()
Button(text = "Give Listening Test", height = "2", width = "30", command =Listening_Test ).pack()
Label(text = "", bg='#cc1a30' ).pack()
Button(text = "Give Reading Test",height = "2", width = "30", command = "speaking_Test").pack()
Label(text = "", bg='#cc1a30' ).pack()
Button(text = "Give Writing Test", height = "2", width = "30", command = txt_Editor).pack()
Label(text = "", bg='#cc1a30' ).pack()
Button(text = "Give Speaking Test", height = "2", width = "30", command = speaking_Test).pack()
Label(text = "", bg='#cc1a30' ).pack()
Button(text = "EXIT", height = "2", width = "30", command = Delete_screen).pack()
screen.mainloop()
main_screen()
| 39.271889 | 117 | 0.565126 |
acf7091f2eb5f09e18c6f662c819e5122e3c0133 | 9,483 | py | Python | bart/common/signal.py | douglas-raillard-arm/bart | ee99bdb73ed8ecebc577e01997e2401c1aff99a7 | [
"Apache-2.0"
] | 17 | 2015-08-14T11:30:55.000Z | 2021-07-04T00:02:45.000Z | bart/common/signal.py | douglas-raillard-arm/bart | ee99bdb73ed8ecebc577e01997e2401c1aff99a7 | [
"Apache-2.0"
] | 34 | 2015-09-01T16:25:33.000Z | 2019-02-01T18:14:28.000Z | bart/common/signal.py | douglas-raillard-arm/bart | ee99bdb73ed8ecebc577e01997e2401c1aff99a7 | [
"Apache-2.0"
] | 23 | 2015-08-13T20:50:33.000Z | 2021-07-04T00:02:46.000Z | # Copyright 2015-2016 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
**Signals**
- Definition
A signal is a string representation of a TRAPpy event and the
column in the same event. The signal can be of two types:
- *Pivoted Signal*
A pivoted signal has a pivot specified in its event class.
This means that the signal in the event is a concatenation of different
signals which belong to different **pivot** nodes. The analysis for pivoted
signals must be done by decomposing them into pivoted signals for each node.
For example, an even that represents the load of the CPU can be pivoted on
:code:`"cpu"` which should be a column in the event's `DataFrame`
- *Non-Pivoted Signal*
A non pivoted signal has an event that has no pivot value associated with it.
This probably means that signal has one component and can be analysed without
decomposing it into smaller signals.
- Representation
The following are valid representations of a signal
- :code:`"event_name:event_column"`
- :code:`"trappy.event.class:event_column"`
"""
from trappy.stats.grammar import Parser
from trappy.stats import StatConf
from bart.common.Utils import area_under_curve, interval_sum
# pylint: disable=invalid-name
# pylint: disable=anomalous-backslash-in-string
class SignalCompare(object):
"""
:param data: TRAPpy FTrace Object
:type data: :mod:`trappy.ftrace.FTrace`
:param sig_a: The first signal
:type sig_a: str
:param sig_b: The first signal
:type sig_b: str
:param config: A dictionary of variables, classes
and functions that can be used in the statements
:type config: dict
:param method: The method to be used for reindexing data
This can be one of the standard :mod:`pandas.DataFrame`
methods (eg. pad, bfill, nearest). The default is pad
or use the last valid observation.
:type method: str
:param limit: The number of indices a value will be propagated
when reindexing. The default is None
:type limit: int
:param fill: Whether to fill the NaNs in the data.
The default value is True.
:type fill: bool
.. note::
Both the signals must have the same pivots. For example:
- Signal A has a pivot as :code:`"cpu"` which means that
the trappy event (:mod:`trappy.base.Base`) has a pivot
parameter which is equal to :code:`"cpu"`. Then the signal B
should also have :code:`"cpu"` as it's pivot.
- Signal A and B can both have undefined or None
as their pivots
"""
def __init__(self, data, sig_a, sig_b, **kwargs):
self._parser = Parser(
data,
config=kwargs.pop(
"config",
None),
**kwargs)
self._a = sig_a
self._b = sig_b
self._pivot_vals, self._pivot = self._get_signal_pivots()
# Concatenate the indices by doing any operation (say add)
self._a_data = self._parser.solve(sig_a)
self._b_data = self._parser.solve(sig_b)
def _get_signal_pivots(self):
"""Internal function to check pivot conditions and
return an intersection of pivot on the signals"""
sig_a_info = self._parser.inspect(self._a)
sig_b_info = self._parser.inspect(self._b)
if sig_a_info["pivot"] != sig_b_info["pivot"]:
raise RuntimeError("The pivot column for both signals" +
"should be same (%s,%s)"
% (sig_a_info["pivot"], sig_b_info["pivot"]))
if sig_a_info["pivot"]:
pivot_vals = set(
sig_a_info["pivot_values"]).intersection(sig_b_info["pivot_values"])
pivoted = sig_a_info["pivot"]
else:
pivot_vals = [StatConf.GRAMMAR_DEFAULT_PIVOT]
pivoted = False
return pivot_vals, pivoted
def conditional_compare(self, condition, **kwargs):
"""Conditionally compare two signals
The conditional comparison of signals has two components:
- **Value Coefficient** :math:`\\alpha_{v}` which measures the difference in values of
of the two signals when the condition is true:
.. math::
\\alpha_{v} = \\frac{area\_under\_curve(S_A\ |\ C(t)\ is\ true)}
{area\_under\_curve(S_B\ |\ C(t)\ is\ true)} \\\\
\\alpha_{v} = \\frac{\int S_A(\{t\ |\ C(t)\})dt}{\int S_B(\{t\ |\ C(t)\})dt}
- **Time Coefficient** :math:`\\alpha_{t}` which measures the time during which the
condition holds true.
.. math::
\\alpha_{t} = \\frac{T_{valid}}{T_{total}}
:param condition: A condition that returns a truth value and obeys the grammar syntax
::
"event_x:sig_a > event_x:sig_b"
:type condition: str
:param method: The method for area calculation. This can
be any of the integration methods supported in `numpy`
or `rect`
:type param: str
:param step: The step behaviour for area and time
summation calculation
:type step: str
Consider the two signals A and B as follows:
.. code::
A = [0, 0, 0, 3, 3, 0, 0, 0]
B = [0, 0, 2, 2, 2, 2, 1, 1]
.. code::
A = xxxx
3 *xxxx*xxxx+ B = ----
| |
2 *----*----*----+
| | |
1 | | *----*----+
| | |
0 *x-x-*x-x-+xxxx+ +xxxx*xxxx+
0 1 2 3 4 5 6 7
The condition:
.. math::
A > B
is valid between T=3 and T=5. Therefore,
.. math::
\\alpha_v=1.5 \\\\
\\alpha_t=\\frac{2}{7}
:returns: There are two cases:
- **Pivoted Signals**
::
{
"pivot_name" : {
"pval_1" : (v1,t1),
"pval_2" : (v2, t2)
}
}
- **Non Pivoted Signals**
The tuple of :math:`(\\alpha_v, \\alpha_t)`
"""
if self._pivot:
result = {self._pivot: {}}
mask = self._parser.solve(condition)
step = kwargs.get("step", "post")
for pivot_val in self._pivot_vals:
a_piv = self._a_data[pivot_val]
b_piv = self._b_data[pivot_val]
area = area_under_curve(a_piv[mask[pivot_val]], **kwargs)
try:
area /= area_under_curve(b_piv[mask[pivot_val]], **kwargs)
except ZeroDivisionError:
area = float("nan")
duration = min(a_piv.last_valid_index(), b_piv.last_valid_index())
duration -= max(a_piv.first_valid_index(),
b_piv.first_valid_index())
duration = interval_sum(mask[pivot_val], step=step) / duration
if self._pivot:
result[self._pivot][pivot_val] = area, duration
else:
result = area, duration
return result
def get_overshoot(self, **kwargs):
"""Special case for :func:`conditional_compare`
where the condition is:
::
"sig_a > sig_b"
:param method: The method for area calculation. This can
be any of the integration methods supported in `numpy`
or `rect`
:type param: str
:param step: The step behaviour for calculation of area
and time summation
:type step: str
.. seealso::
:func:`conditional_compare`
"""
condition = " ".join([self._a, ">", self._b])
return self.conditional_compare(condition, **kwargs)
def get_undershoot(self, **kwargs):
"""Special case for :func:`conditional_compare`
where the condition is:
::
"sig_a < sig_b"
:param method: The method for area calculation. This can
be any of the integration methods supported in `numpy`
or `rect`
:type param: str
:param step: The step behaviour for calculation of area
and time summation
:type step: str
.. seealso::
:func:`conditional_compare`
"""
condition = " ".join([self._a, "<", self._b])
return self.conditional_compare(condition, **kwargs)
| 31.504983 | 94 | 0.550037 |
acf70ae6188d58829a7483a17e0ba97d81e021b4 | 6,058 | py | Python | parseXml.py | jackastner/so-nlcode-multilang | 9f610c3eb112f103686f9a5e2954905bbbd5ada1 | [
"MIT"
] | 1 | 2020-05-25T22:07:49.000Z | 2020-05-25T22:07:49.000Z | parseXml.py | jackastner/so-nlcode-multilang | 9f610c3eb112f103686f9a5e2954905bbbd5ada1 | [
"MIT"
] | null | null | null | parseXml.py | jackastner/so-nlcode-multilang | 9f610c3eb112f103686f9a5e2954905bbbd5ada1 | [
"MIT"
] | null | null | null | from bs4 import BeautifulSoup
import ast
import re
import json
import astor
import html
from multiprocessing import Pool
#from csharp.title_filtering.SVM import SVM
params = {
# "langXmlFile" : "/scratch0/python_en.xml",
"langXmlFile" : "Posts.python.xml",
# "xmlFile" : "/scratch0/answers_en_nonzero.xml",
"xmlFile" : "Posts.answers.xml",
"outputFile" : "python_all.json"}
# First download and process the stackoverflow files
# os.system('wget https://archive.org/download/stackexchange/stackoverflow.com-Posts.7z')
# os.system('7z x stackoverflow.com-Posts.7z')
#
#os.system('grep "python" Posts.xml > ' + params['langXmlFile'])
#os.system('grep "PostTypeId=\"2\"" Posts.xml > ' + params['xmlFile'])
# # Title filtering using SVM
# s = SVM()
# s.train("../csharp/title_filtering/balanced/pos_train.txt", "../csharp/title_filtering/balanced/neg_train.txt")
# s.test("../csharp/title_filtering/balanced/pos_test.txt", "../csharp/title_filtering/balanced/neg_test.txt")
# First pass. Get the posts tagged with C#. Filter the input using a grep on c# so that this is faster
accepted_answer_re = re.compile(r"AcceptedAnswerId=\"(\d+)\"")
id_re = re.compile(r"Id=\"(\d+)\"")
title_re = re.compile(r"Title=\"([^\"]+)\"")
tags_re = re.compile(r"Tags=\"([^\"]+)\"")
body_re = re.compile(r"Body=\"([^\"]+)\"")
def process_question(line):
acceptedanswerid = accepted_answer_re.search(line)
if acceptedanswerid is not None and "python" in tags_re.search(line).group(1):
return (int(acceptedanswerid.group(1)), {"id": int(id_re.search(line).group(1)), "title": title_re.search(line).group(1)})
else:
return None
print('finding tagged posts')
with open(params["langXmlFile"], 'r') as f:
pool = Pool(processes=16)
acceptedAnswers = dict([p for p in pool.map(process_question, [l for l in f]) if p is not None])
print('finding accepted answers')
def find_answers(line):
qid = int(id_re.search(line).group(1))
if qid in acceptedAnswers:
return (qid, body_re.search(line).group(1))
else:
return None
# Pass 2, find the corresponding accepted answer
with open(params["xmlFile"], 'r') as f:
pool = Pool(processes=16)
codes = [p for p in pool.map(find_answers, [l for l in f]) if p is not None]
for qid, code in codes:
acceptedAnswers[qid]["code"] = code
def prepare_answer_snippet(answer):
rid, ans = answer
if "code" not in ans:
return None
# Post contains an accepted answer
if "pre" in ans["code"]:
# Title is good
#titleFilter = s.filter(ans['title'])
if True:#titleFilter == 0:
soup = BeautifulSoup(html.unescape(ans["code"]), 'html.parser')
codeTag = soup.find_all('pre')
# Contains exactly one piece of code
if len(codeTag) == 1:
code = codeTag[0].get_text().strip()
# Code must be at most 1000 chars
if (len(code) > 6 and len(code) <= 1000):
# Filter out weird code snippets
if code[0] == "<" or code[0] == "=" or code[0] == "@" or code[0] == "$" or \
code[0:7].lower() == "select " or code[0:7].lower() == "update " or code[0:6].lower() == "alter " or \
code[0:2].lower() == "c:" or code[0:4].lower() == "http" or code[0:4].lower() == "hkey" or \
re.match(r"^[a-zA-Z0-9_]*$", code) is not None:
return None
else:
# Now also make sure it parses
try:
code = code.replace('>>>','').strip()
ast.parse(code)
#code = astor.dump_tree(ast.parse(code), maxline=99999, maxmerged=99999).strip()
lines = [l for l in code.strip().split('\n') if
not re.match(r"^\s*(import|from|def|#)",l)
and l.strip()]
if len(lines) == 1:
code = lines[0].strip()
return {
"question_id": rid,
"parent_answer_post_id": ans['id'],
"intent": ans['title'],
"snippet": code}
except:
print('parse fail')
return None
else:
return None
else:
return None
print('starting processing')
pool = Pool(processes=16)
code_pairs = pool.map(prepare_answer_snippet, acceptedAnswers.items())
with open(params["outputFile"], 'w') as f:
json.dump([p for p in code_pairs if p is not None], f)
# Create training and validation and test sets
#os.system('shuf python_all.txt > python_shuffled.txt')
#numLines = sum(1 for line in open('python_all.txt'))
#trainLines = int(0.8 * numLines)
#validLines = int(0.1 * numLines)
#testLines = numLines - trainLines - validLines
#os.system('head -n ' + str(trainLines) + ' python_shuffled.txt > train.txt')
#os.system('tail -n +' + str(trainLines + 1) + ' python_shuffled.txt | head -n ' + str(validLines) + ' > valid.txt')
#os.system('tail -n +' + str(trainLines + validLines + 1) + ' python_shuffled.txt > test.txt')
# Title Labeling
# This is the way I did it. Then I removed the database, so this is deprecated.
# # Get titles for manual labeling
#
# sqlite3 Posts.sqlite3 "select a.title from Posts a, Posts b where a.tags like '%c#%' and a.accepted_answer_id is not null and a.accepted_answer_id = b.id and b.body like '%<code>%' and b.body not like '%<code>%<code>%' order by random() limit 1000; " > titles_1000.txt
#
# sqlite3 Posts.sqlite3 "select a.title from Posts a, Posts b where a.tags like '%c#%' and a.accepted_answer_id is not null and a.accepted_answer_id = b.id and b.body like '%<code>%' and b.body not like '%<code>%<code>%'; " > titles_all.txt
#
#
# grep "^g " titles_1000.txt > neg.txt
# grep "^n " titles_1000.txt > pos.txt
#
# # post
# sed -e "s/^g //" neg.txt | head -n 283 > neg_train.txt
# sed -e "s/^g //" neg.txt | tail -n 283 > neg_test.txt
#
# sed -e "s/^n //" pos.txt | head -n 116 > pos_train.txt
# sed -e "s/^n //" pos.txt | tail -n 116 > pos_test.txt
| 39.594771 | 272 | 0.609442 |
acf70b66a427ee251f8af03d006494e615fd1e70 | 747 | py | Python | libs/group/handlers/private_command_start.py | PushDotGame/telegram-forward-bot | 40a54aff1a8b10e79fa8a6f611e2371c79869340 | [
"Apache-2.0"
] | 1 | 2020-07-09T02:26:14.000Z | 2020-07-09T02:26:14.000Z | libs/group/handlers/private_command_start.py | PushDotGame/telegram-bots | 40a54aff1a8b10e79fa8a6f611e2371c79869340 | [
"Apache-2.0"
] | null | null | null | libs/group/handlers/private_command_start.py | PushDotGame/telegram-bots | 40a54aff1a8b10e79fa8a6f611e2371c79869340 | [
"Apache-2.0"
] | null | null | null | from telegram.ext import (Dispatcher, CommandHandler, Filters)
from telegram.ext.dispatcher import run_async
from libs.group.kvs import kvs
COMMAND = 'start'
def attach(dispatcher: Dispatcher):
dispatcher.add_handler(
CommandHandler(
command=COMMAND,
filters=Filters.private,
callback=_private_command_start,
)
)
@run_async
def _private_command_start(update, context):
update.message.reply_text(
text=kvs['command_start'].format(
owner_name=kvs['owner_name'],
owner_username=kvs['owner_username'],
),
reply_to_message_id=update.effective_message.message_id,
disable_web_page_preview=True,
)
| 26.678571 | 65 | 0.65328 |
acf70d67c75b3c219a4cc6fc3e0736e214b7ac7f | 567 | py | Python | tests/conftest.py | dxxxm/opencv_wrapper | 4838185cf37b8d93190b5761dcc815ba285ff0cf | [
"MIT"
] | 16 | 2019-04-03T18:34:57.000Z | 2021-11-24T09:24:10.000Z | tests/conftest.py | anbergem/cvhelper | 4838185cf37b8d93190b5761dcc815ba285ff0cf | [
"MIT"
] | 7 | 2019-04-04T10:31:48.000Z | 2020-06-21T10:16:18.000Z | tests/conftest.py | anbergem/cvhelper | 4838185cf37b8d93190b5761dcc815ba285ff0cf | [
"MIT"
] | 3 | 2019-12-20T13:42:19.000Z | 2021-08-13T08:37:14.000Z | import pytest
from opencv_wrapper.model import Contour
@pytest.fixture
def points(mocker):
return mocker.Mock()
@pytest.fixture
def contour(points):
return Contour(points)
@pytest.fixture
def image(mocker):
img = mocker.MagicMock()
img.__len__.return_value = 1
img.size.return_value = 1
return img
@pytest.fixture
def gray_image(image):
image.ndim = 2
return image
@pytest.fixture
def color_image(image):
image.ndim = 3
return image
@pytest.fixture
def image_uint8(image, np_mock):
image.dtype = np_mock.uint8
| 14.538462 | 40 | 0.710758 |
acf70df66e35f3ee35e01b86af984772a799d93f | 2,139 | py | Python | main.py | sergevkim/ImageTranslation | b90f71b6abf0950569e6567ed67cb4bb9f99eaaf | [
"MIT"
] | 1 | 2020-11-28T18:35:31.000Z | 2020-11-28T18:35:31.000Z | main.py | sergevkim/ImageTranslation | b90f71b6abf0950569e6567ed67cb4bb9f99eaaf | [
"MIT"
] | null | null | null | main.py | sergevkim/ImageTranslation | b90f71b6abf0950569e6567ed67cb4bb9f99eaaf | [
"MIT"
] | null | null | null | from argparse import ArgumentParser
import random
import numpy as np #TODO remove with set_seed
import torch
from tirma.datamodules import CityscapesDataModule
from tirma.loggers import NeptuneLogger
from tirma.models import (
#CycleGANTranslator,
Pix2PixTranslator,
)
from tirma.trainer import Trainer
from config import (
CommonArguments,
DataArguments,
TrainArguments,
SpecificArguments,
)
def set_seed(seed=9):
torch.backends.cudnn.deterministic = True
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
random.seed(seed)
np.random.seed(seed)
def main(args):
set_seed()
model = Pix2PixTranslator(
learning_rate=args.learning_rate,
scheduler_gamma=args.scheduler_gamma,
scheduler_step_size=args.scheduler_step_size,
mode=args.mode,
encoder_blocks_num=args.encoder_blocks_num,
decoder_blocks_num=args.decoder_blocks_num,
verbose=args.verbose,
device=args.device,
).to(args.device)
datamodule = CityscapesDataModule(
data_path=args.data_path,
batch_size=args.batch_size if not args.one_batch_overfit else 2,
num_workers=args.num_workers,
)
datamodule.setup(
val_ratio=args.val_ratio,
one_batch_overfit=args.one_batch_overfit,
)
#logger = NeptuneLogger(
# api_key=None,
# project_name=None,
#)
logger = None
trainer = Trainer(
logger=logger,
max_epoch=args.max_epoch,
verbose=args.verbose,
version=args.version,
)
trainer.fit(
model=model,
datamodule=datamodule,
)
if __name__ == "__main__":
parser = ArgumentParser()
default_args_dict = {
**vars(CommonArguments()),
**vars(DataArguments()),
**vars(TrainArguments()),
**vars(SpecificArguments()),
}
for arg, value in default_args_dict.items():
parser.add_argument(
f'--{arg}',
type=type(value),
default=value,
help=f'<{arg}>, default: {value}',
)
args = parser.parse_args()
main(args)
| 23 | 72 | 0.650304 |
acf70e3fc7e40a117bfbc98522ff1eead499e229 | 7,406 | py | Python | nonrad/ccd.py | warcard/nonrad | 702d54f449f6c6e6bf53abefe25845f2de0eab2e | [
"MIT"
] | 21 | 2020-11-15T02:58:24.000Z | 2022-03-30T05:39:56.000Z | nonrad/ccd.py | warcard/nonrad | 702d54f449f6c6e6bf53abefe25845f2de0eab2e | [
"MIT"
] | 2 | 2020-12-10T19:53:25.000Z | 2022-03-31T10:03:27.000Z | nonrad/ccd.py | warcard/nonrad | 702d54f449f6c6e6bf53abefe25845f2de0eab2e | [
"MIT"
] | 15 | 2020-11-22T04:23:43.000Z | 2022-03-19T03:31:20.000Z | # -*- coding: utf-8 -*-
# Copyright (c) Chris G. Van de Walle
# Distributed under the terms of the MIT License.
"""Convenience utilities for nonrad.
This module contains various convenience utilities for working with and
preparing input for nonrad.
"""
from typing import List, Optional, Tuple, Union
import numpy as np
from pymatgen.core import Structure
from pymatgen.io.vasp.outputs import Vasprun
from scipy.optimize import curve_fit
from nonrad.nonrad import AMU2KG, ANGS2M, EV2J, HBAR
def get_cc_structures(
ground: Structure,
excited: Structure,
displacements: np.ndarray,
remove_zero: bool = True
) -> Tuple[List, List]:
"""Generate the structures for a CC diagram.
Parameters
----------
ground : pymatgen.core.structure.Structure
pymatgen structure corresponding to the ground (final) state
excited : pymatgen.core.structure.Structure
pymatgen structure corresponding to the excited (initial) state
displacements : list(float)
list of displacements to compute the perturbed structures. Note: the
displacements are for only one potential energy surface and will be
applied to both (e.g. displacements=np.linspace(-0.1, 0.1, 5)) will
return 10 structures 5 of the ground state displaced at +-10%, +-5%,
and 0% and 5 of the excited state displaced similarly)
remove_zero : bool
remove 0% displacement from list (default is True)
Returns
-------
ground_structs = list(pymatgen.core.structure.Struture)
a list of structures corresponding to the displaced ground state
excited_structs = list(pymatgen.core.structure.Structure)
a list of structures corresponding to the displaced excited state
"""
displacements = np.array(displacements)
if remove_zero:
displacements = displacements[displacements != 0.]
ground_structs = ground.interpolate(excited, nimages=displacements)
excited_structs = ground.interpolate(excited, nimages=(displacements + 1.))
return ground_structs, excited_structs
def get_dQ(ground: Structure, excited: Structure) -> float:
"""Calculate dQ from the initial and final structures.
Parameters
----------
ground : pymatgen.core.structure.Structure
pymatgen structure corresponding to the ground (final) state
excited : pymatgen.core.structure.Structure
pymatgen structure corresponding to the excited (initial) state
Returns
-------
float
the dQ value (amu^{1/2} Angstrom)
"""
return np.sqrt(np.sum(list(map(
lambda x: x[0].distance(x[1])**2 * x[0].specie.atomic_mass,
zip(ground, excited)
))))
def get_Q_from_struct(
ground: Structure,
excited: Structure,
struct: Union[Structure, str],
tol: float = 1e-4,
nround: int = 5,
) -> float:
"""Calculate the Q value for a given structure.
This function calculates the Q value for a given structure, knowing the
endpoints and assuming linear interpolation.
Parameters
----------
ground : pymatgen.core.structure.Structure
pymatgen structure corresponding to the ground (final) state
excited : pymatgen.core.structure.Structure
pymatgen structure corresponding to the excited (initial) state
struct : pymatgen.core.structure.Structure or str
pymatgen structure corresponding to the structure we want to calculate
the Q value for (may also be a path to a file containing a structure)
tol : float
distance cutoff to throw away coordinates for determining Q (sites that
don't move very far could introduce numerical noise)
nround : int
number of decimal places to round to in determining Q value
Returns
-------
float
the Q value (amu^{1/2} Angstrom) of the structure
"""
if isinstance(struct, str):
tstruct = Structure.from_file(struct)
else:
tstruct = struct
dQ = get_dQ(ground, excited)
excited_coords = excited.cart_coords
ground_coords = ground.cart_coords
struct_coords = tstruct.cart_coords
dx = excited_coords - ground_coords
ind = np.abs(dx) > tol
poss_x = np.round((struct_coords - ground_coords)[ind] / dx[ind], nround)
val, count = np.unique(poss_x, return_counts=True)
return dQ * val[np.argmax(count)]
def get_PES_from_vaspruns(
ground: Structure,
excited: Structure,
vasprun_paths: List[str],
tol: float = 0.001
) -> Tuple[np.ndarray, np.ndarray]:
"""Extract the potential energy surface (PES) from vasprun.xml files.
This function reads in vasprun.xml files to extract the energy and Q value
of each calculation and then returns it as a list.
Parameters
----------
ground : pymatgen.core.structure.Structure
pymatgen structure corresponding to the ground (final) state
excited : pymatgen.core.structure.Structure
pymatgen structure corresponding to the excited (initial) state
vasprun_paths : list(strings)
a list of paths to each of the vasprun.xml files that make up the PES.
Note that the minimum (0% displacement) should be included in the list,
and each path should end in 'vasprun.xml' (e.g. /path/to/vasprun.xml)
tol : float
tolerance to pass to get_Q_from_struct
Returns
-------
Q : np.array(float)
array of Q values (amu^{1/2} Angstrom) corresponding to each vasprun
energy : np.array(float)
array of energies (eV) corresponding to each vasprun
"""
num = len(vasprun_paths)
Q, energy = (np.zeros(num), np.zeros(num))
for i, vr_fname in enumerate(vasprun_paths):
vr = Vasprun(vr_fname, parse_dos=False, parse_eigen=False)
Q[i] = get_Q_from_struct(ground, excited, vr.structures[-1], tol=tol)
energy[i] = vr.final_energy
return Q, (energy - np.min(energy))
def get_omega_from_PES(
Q: np.ndarray,
energy: np.ndarray,
Q0: Optional[float] = None,
ax=None,
q: Optional[np.ndarray] = None
) -> float:
"""Calculate the harmonic phonon frequency for the given PES.
Parameters
----------
Q : np.array(float)
array of Q values (amu^{1/2} Angstrom) corresponding to each vasprun
energy : np.array(float)
array of energies (eV) corresponding to each vasprun
Q0 : float
fix the minimum of the parabola (default is None)
ax : matplotlib.axes.Axes
optional axis object to plot the resulting fit (default is None)
q : np.array(float)
array of Q values to evaluate the fitting function at
Returns
-------
float
harmonic phonon frequency from the PES in eV
"""
def f(Q, omega, Q0, dE):
return 0.5 * omega**2 * (Q - Q0)**2 + dE
# set bounds to restrict Q0 to the given Q0 value
bounds = (-np.inf, np.inf) if Q0 is None else \
([-np.inf, Q0 - 1e-10, -np.inf], [np.inf, Q0, np.inf])
popt, _ = curve_fit(f, Q, energy, bounds=bounds) # pylint: disable=W0632
# optional plotting to check fit
if ax is not None:
q_L = np.max(Q) - np.min(Q)
if q is None:
q = np.linspace(np.min(Q) - 0.1 * q_L, np.max(Q) + 0.1 * q_L, 1000)
ax.plot(q, f(q, *popt))
return HBAR * popt[0] * np.sqrt(EV2J / (ANGS2M**2 * AMU2KG))
| 34.446512 | 79 | 0.661491 |
acf70f38e152ef36018b177df62f3bdcb603a2f3 | 4,776 | py | Python | everest/wordhash/word.py | rsbyrne/everest | 1ec06301cdeb7c2b7d85daf6075d996c5529247e | [
"MIT"
] | 2 | 2020-12-17T02:27:28.000Z | 2020-12-17T23:50:13.000Z | everest/wordhash/word.py | rsbyrne/everest | 1ec06301cdeb7c2b7d85daf6075d996c5529247e | [
"MIT"
] | 1 | 2020-12-07T10:14:45.000Z | 2020-12-07T10:14:45.000Z | everest/wordhash/word.py | rsbyrne/everest | 1ec06301cdeb7c2b7d85daf6075d996c5529247e | [
"MIT"
] | 1 | 2020-10-22T11:16:50.000Z | 2020-10-22T11:16:50.000Z | ###############################################################################
''''''
###############################################################################
import random
import os
import string
from . import Reseed, reseed
parentPath = os.path.abspath(os.path.dirname(__file__))
namesDir = os.path.join(parentPath, '_namesources')
pathFn = lambda n: os.path.join(namesDir, n)
with open(pathFn('cities.txt'), mode = 'r') as file:
CITIES = file.read().split('\n')
with open(pathFn('english_words.txt'), mode = 'r') as file:
ENGLISH = file.read().split('\n')
with open(pathFn('names.txt'), mode = 'r') as file:
NAMES = file.read().split('\n')
PROPER = sorted(set([*CITIES, *NAMES]))
GREEK = [
'alpha', 'beta', 'gamma', 'delta', 'epsilon',
'zeta', 'eta', 'theta', 'iota', 'kappa',
'lambda', 'mu', 'nu', 'xi', 'omicron',
'pi', 'rho', 'sigma', 'tau', 'upsilon',
'phi', 'chi', 'psi', 'omega',
]
PHONETIC = [
'Alfa', 'Bravo', 'Charlie', 'Delta', 'Echo',
'Foxtrot', 'Golf', 'Hotel', 'India', 'Juliett',
'Kilo', 'Lima', 'Mike', 'November', 'Oscar',
'Papa', 'Quebec', 'Romeo', 'Sierra', 'Tango',
'Uniform', 'Victor', 'Whiskey', 'Xray', 'Yankee',
'Zulu',
]
WORDNUMS = [
'zero', 'one', 'two', 'three', 'four',
'five', 'six', 'seven', 'eight', 'nine',
]
CODEWORDS = sorted(set([
*[n.lower() for n in PHONETIC],
*[n.lower() for n in GREEK],
*[n.lower() for n in WORDNUMS],
]))
def _make_syllables():
consonants = list("bcdfghjklmnpqrstvwxyz")
conclusters = [
'bl', 'br', 'dr', 'dw', 'fl',
'fr', 'gl', 'gr', 'kl', 'kr',
'kw', 'pl', 'pr', 'sf', 'sk',
'sl', 'sm', 'sn', 'sp', 'st',
'sw', 'tr', 'tw'
]
condigraphs = [
'sh', 'ch', 'th', 'ph', 'zh',
'ts', 'tz', 'ps', 'ng', 'sc',
'gh', 'rh', 'wr'
]
allcons = [*consonants, *conclusters, *condigraphs]
vowels = [*list("aeiou")]
voweldiphthongs = [
'aa', 'ae', 'ai', 'ao', 'au',
'ea', 'ee', 'ei', 'eo', 'eu',
'ia', 'ie', 'ii', 'io', 'iu',
'oa', 'oe', 'oi', 'oo', 'ou',
'ua', 'ue', 'ui', 'uo', 'uu'
]
allvowels = [*vowels, *voweldiphthongs]
cvs = [consonant + vowel for vowel in allvowels for consonant in allcons]
vcs = [vowel + consonant for consonant in allcons for vowel in allvowels]
vcvs = [vowel + cv for vowel in allvowels for cv in cvs]
cvcs = [consonant + vc for consonant in allcons for vc in vcs]
syllables = [*cvs, *vcs, *vcvs, *cvcs]
syllables = list(sorted(set(syllables)))
return syllables
SYLLABLES = _make_syllables()
@reseed
def get_random_syllable(seed = None):
syllable = seed.rchoice(SYLLABLES)
return syllable
@reseed
def get_random_word(seed = None, length = 3):
outWord = ''
for _ in range(length):
outWord += get_random_syllable(seed = seed)
return outWord
@reseed
def get_random_alphanumeric(seed = None, length = 6):
characters = 'abcdefghijklmnopqrstuvwxyz0123456789'
choices = [seed.rchoice(characters) for i in range(length)]
return ''.join(choices)
@reseed
def get_random_phrase(seed = None, phraselength = 2, wordlength = 2):
# 2 * 2 yields 64 bits of entropy
phraseList = []
for _ in range(phraselength):
phraseList.append(
get_random_word(length = wordlength, seed = seed)
)
phrase = "-".join(phraseList)
return phrase
@reseed
def get_random_english(seed = None, n = 1):
return '-'.join([seed.rchoice(ENGLISH) for i in range(n)])
@reseed
def get_random_numerical(seed = None, n = 1):
return ''.join([seed.rchoice(string.digits) for _ in range(n)])
@reseed
def get_random_greek(seed = None, n = 1):
return '-'.join([seed.rchoice(GREEK) for i in range(n)])
@reseed
def get_random_city(seed = None, n = 1):
return '-'.join([seed.rchoice(CITIES) for i in range(n)])
@reseed
def get_random_phonetic(seed = None, n = 1):
return '-'.join([seed.rchoice(PHONETIC) for i in range(n)])
@reseed
def get_random_codeword(seed = None, n = 1):
return '-'.join([seed.rchoice(CODEWORDS) for i in range(n)])
@reseed
def get_random_wordnum(seed = None, n = 1):
return '-'.join([seed.rchoice(WORDNUMS) for i in range(n)])
@reseed
def get_random_name(seed = None, n = 1):
return '-'.join([seed.rchoice(NAMES) for i in range(n)])
@reseed
def get_random_proper(seed = None, n = 1):
return '-'.join([seed.rchoice(PROPER) for i in range(n)])
@reseed
def get_random_cityword(seed = None):
return '-'.join([seed.rchoice(s) for s in [CITIES, ENGLISH]])
###############################################################################
###############################################################################
| 33.398601 | 79 | 0.552554 |
acf7122670ba77efcd3caad03e8fdfa54f762e91 | 3,374 | py | Python | tests/reports/pytesseract02/test_ambev20191T.py | lucasjoao/tcc | 508326a57af1283d2f00aacd2cfefc48afdec4eb | [
"MIT"
] | 1 | 2020-10-13T23:12:25.000Z | 2020-10-13T23:12:25.000Z | tests/reports/pytesseract02/test_ambev20191T.py | lucasjoao/tcc | 508326a57af1283d2f00aacd2cfefc48afdec4eb | [
"MIT"
] | null | null | null | tests/reports/pytesseract02/test_ambev20191T.py | lucasjoao/tcc | 508326a57af1283d2f00aacd2cfefc48afdec4eb | [
"MIT"
] | null | null | null | import unittest
from src import manager as m
from src.helper import result_helper as rh
from data import data as data
class TestsAmbev20191T(unittest.TestCase):
filename = None
manager_pytesseract = None
@classmethod
def setUpClass(cls):
cls.filename = 'ambev_2019_1T.pdf'
cls.manager_pytesseract = m.manager([cls.filename], 'pytesseract', '--psm 6')
@classmethod
def tearDownClass(cls):
cls.filename = None
cls.manager_pytesseract = None
def test_lucro_liquido_monetary(self):
lucro_liquido_monetary_pytesseract = self.manager_pytesseract.run_lucro_liquido_monetary()
result_pytesseract = lucro_liquido_monetary_pytesseract[self.filename]
numbers_from_result_pytesseract = rh.result_helper.get_numbers_as_list(result_pytesseract)
self.assertEqual(len(result_pytesseract), 4, 'lucro líquido (R$): tamanho resultado (pytesseract)')
self.assertNotIn(data.LUCRO_LIQUIDO[self.filename], numbers_from_result_pytesseract,
'lucro líquido (R$): valor (pytesseract)')
def test_lucro_liquido_number(self):
lucro_liquido_number_pytesseract = self.manager_pytesseract.run_lucro_liquido_number()
result_pytesseract = lucro_liquido_number_pytesseract[self.filename]
numbers_from_result = rh.result_helper.get_numbers_as_list(result_pytesseract)
self.assertEqual(len(result_pytesseract), 1, 'lucro líquido (número após conjunto de busca): tamanho resultado')
self.assertNotIn(data.LUCRO_LIQUIDO[self.filename], numbers_from_result,
'lucro líquido (número após conjunto de busca): valor')
def test_patrimonio_liquido_monetary(self):
result = self.manager_pytesseract.run_patrimonio_liquido_monetary()[self.filename]
self.assertEqual(len(result), 0, 'patrimônio líquido (R$): tamanho resultado')
def test_patrimonio_liquido_number(self):
patrimonio_liquido_number_pytesseract = self.manager_pytesseract.run_patrimonio_liquido_number()
result_pytesseract = patrimonio_liquido_number_pytesseract[self.filename]
number_from_result = rh.result_helper.get_numbers_as_list(result_pytesseract)
self.assertEqual(len(result_pytesseract), 2,
'patrimônio líquido (número após conjunto de busca): tamanho resultado')
self.assertNotIn(data.PATRIMONIO_LIQUIDO[self.filename], number_from_result,
'patrimônio líquido (número após conjunto de busca): valor')
def test_roe_monetary(self):
result = self.manager_pytesseract.run_roe_monetary()[self.filename]
self.assertEqual(len(result), 0, 'ROE (R$): tamanho resultado')
def test_roe_number(self):
result = self.manager_pytesseract.run_roe_number()[self.filename]
self.assertEqual(len(result), 0, 'ROE (número após conjunto de busca): tamanho resultado')
def test_roe_calculate(self):
calculate_roe_pytesseract = self.manager_pytesseract.run_calculate_roe()
result_pytesseract = calculate_roe_pytesseract[self.filename]
self.assertEqual(len(result_pytesseract), 10, 'ROE por cálculo: tamanho resultado (pytesseract)')
self.assertNotIn(data.ROE[self.filename], result_pytesseract, 'ROE por cálculo: valor')
if __name__ == '__main__':
unittest.main()
| 44.986667 | 120 | 0.732365 |
acf7122926f8e4265066dff0ee257b894ffe0f15 | 800 | py | Python | tests/programs/package_code/PackageInitCodeMain.py | augustand/Nuitka | b7b9dd50b60505a309f430ce17cad36fb7d75048 | [
"Apache-2.0"
] | null | null | null | tests/programs/package_code/PackageInitCodeMain.py | augustand/Nuitka | b7b9dd50b60505a309f430ce17cad36fb7d75048 | [
"Apache-2.0"
] | null | null | null | tests/programs/package_code/PackageInitCodeMain.py | augustand/Nuitka | b7b9dd50b60505a309f430ce17cad36fb7d75048 | [
"Apache-2.0"
] | null | null | null | # Copyright 2016, Kay Hayen, mailto:kay.hayen@gmail.com
#
# Python tests originally created or extracted from other peoples work. The
# parts were too small to be protected.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import some_package.SomeModule
| 42.105263 | 79 | 0.72375 |
acf7128e00bb3aaaf57aeca2e61ed34aa61cea44 | 405 | py | Python | form/migrations/0008_auto_20200117_2221.py | Akash375/Hackfest_Classplus | 0ae9da78b9bf29293b8325a7172a3b78b13cc984 | [
"Apache-2.0"
] | null | null | null | form/migrations/0008_auto_20200117_2221.py | Akash375/Hackfest_Classplus | 0ae9da78b9bf29293b8325a7172a3b78b13cc984 | [
"Apache-2.0"
] | null | null | null | form/migrations/0008_auto_20200117_2221.py | Akash375/Hackfest_Classplus | 0ae9da78b9bf29293b8325a7172a3b78b13cc984 | [
"Apache-2.0"
] | null | null | null | # Generated by Django 3.0.1 on 2020-01-17 16:51
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('form', '0007_eta_im'),
]
operations = [
migrations.RemoveField(
model_name='eta',
name='im',
),
migrations.RemoveField(
model_name='eta',
name='image',
),
]
| 18.409091 | 47 | 0.530864 |
acf7129384845d6df3608d249c90d3f525f0fccb | 1,006 | py | Python | logging_server.py | lexus2k/embedded-esp32-component-udp_logging | 561d694a199f844e57051f0ba3d19ee18c7beb95 | [
"Apache-2.0"
] | 37 | 2017-05-04T12:11:10.000Z | 2022-03-22T16:32:46.000Z | logging_server.py | lexus2k/embedded-esp32-component-udp_logging | 561d694a199f844e57051f0ba3d19ee18c7beb95 | [
"Apache-2.0"
] | 4 | 2018-11-05T17:18:41.000Z | 2022-01-22T14:05:25.000Z | logging_server.py | lexus2k/embedded-esp32-component-udp_logging | 561d694a199f844e57051f0ba3d19ee18c7beb95 | [
"Apache-2.0"
] | 12 | 2018-06-23T18:14:21.000Z | 2021-12-06T04:14:49.000Z | #!/usr/bin/env python3
# Copyright 2017 by Malte Janduda
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import socket
import datetime
UDP_IP = "0.0.0.0"
UDP_PORT = 1337
sock = socket.socket( socket.AF_INET, socket.SOCK_DGRAM )
sock.bind( (UDP_IP, UDP_PORT) )
print("+============================+")
print("| ESP32 UDP Logging Server |")
print("+============================+")
print("")
while True:
data, addr = sock.recvfrom(1024)
print(datetime.datetime.now(), data.decode(), end='')
| 28.742857 | 75 | 0.680915 |
acf71411e7016e83fe5ffed4ba1b24d2dab9aa48 | 2,724 | py | Python | netbox/dcim/tests/test_forms.py | jeis2497052/netbox | 8741eb38c0b77e493a1194645e1076516c4de556 | [
"Apache-2.0"
] | 1 | 2019-01-15T21:06:23.000Z | 2019-01-15T21:06:23.000Z | netbox/dcim/tests/test_forms.py | jeis2497052/netbox | 8741eb38c0b77e493a1194645e1076516c4de556 | [
"Apache-2.0"
] | null | null | null | netbox/dcim/tests/test_forms.py | jeis2497052/netbox | 8741eb38c0b77e493a1194645e1076516c4de556 | [
"Apache-2.0"
] | 1 | 2021-08-08T12:56:30.000Z | 2021-08-08T12:56:30.000Z | from __future__ import unicode_literals
from django.test import TestCase
from dcim.forms import *
from dcim.models import *
def get_id(model, slug):
return model.objects.get(slug=slug).id
class DeviceTestCase(TestCase):
fixtures = ['dcim', 'ipam']
def test_racked_device(self):
test = DeviceForm(data={
'name': 'test',
'device_role': get_id(DeviceRole, 'leaf-switch'),
'tenant': None,
'manufacturer': get_id(Manufacturer, 'juniper'),
'device_type': get_id(DeviceType, 'qfx5100-48s'),
'site': get_id(Site, 'test1'),
'rack': '1',
'face': RACK_FACE_FRONT,
'position': 41,
'platform': get_id(Platform, 'juniper-junos'),
'status': STATUS_ACTIVE,
})
self.assertTrue(test.is_valid(), test.fields['position'].choices)
self.assertTrue(test.save())
def test_racked_device_occupied(self):
test = DeviceForm(data={
'name': 'test',
'device_role': get_id(DeviceRole, 'leaf-switch'),
'tenant': None,
'manufacturer': get_id(Manufacturer, 'juniper'),
'device_type': get_id(DeviceType, 'qfx5100-48s'),
'site': get_id(Site, 'test1'),
'rack': '1',
'face': RACK_FACE_FRONT,
'position': 1,
'platform': get_id(Platform, 'juniper-junos'),
'status': STATUS_ACTIVE,
})
self.assertFalse(test.is_valid())
def test_non_racked_device(self):
test = DeviceForm(data={
'name': 'test',
'device_role': get_id(DeviceRole, 'pdu'),
'tenant': None,
'manufacturer': get_id(Manufacturer, 'servertech'),
'device_type': get_id(DeviceType, 'cwg-24vym415c9'),
'site': get_id(Site, 'test1'),
'rack': '1',
'face': None,
'position': None,
'platform': None,
'status': STATUS_ACTIVE,
})
self.assertTrue(test.is_valid())
self.assertTrue(test.save())
def test_non_racked_device_with_face(self):
test = DeviceForm(data={
'name': 'test',
'device_role': get_id(DeviceRole, 'pdu'),
'tenant': None,
'manufacturer': get_id(Manufacturer, 'servertech'),
'device_type': get_id(DeviceType, 'cwg-24vym415c9'),
'site': get_id(Site, 'test1'),
'rack': '1',
'face': RACK_FACE_REAR,
'position': None,
'platform': None,
'status': STATUS_ACTIVE,
})
self.assertTrue(test.is_valid())
self.assertTrue(test.save())
| 32.819277 | 73 | 0.541483 |
acf714495042e9fcb81554a5beb69b89a20b0967 | 3,394 | py | Python | nova/console/websocketproxy.py | bopopescu/nova_audit | 1cd2901802f82d39411adfa04cf2f432ff3bf280 | [
"Apache-2.0"
] | 1 | 2020-02-21T19:19:11.000Z | 2020-02-21T19:19:11.000Z | nova/console/websocketproxy.py | bopopescu/nova_audit | 1cd2901802f82d39411adfa04cf2f432ff3bf280 | [
"Apache-2.0"
] | null | null | null | nova/console/websocketproxy.py | bopopescu/nova_audit | 1cd2901802f82d39411adfa04cf2f432ff3bf280 | [
"Apache-2.0"
] | 1 | 2020-07-24T09:15:58.000Z | 2020-07-24T09:15:58.000Z | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
'''
Websocket proxy that is compatible with OpenStack Nova.
Leverages websockify.py by Joel Martin
'''
import Cookie
import socket
import websockify
from nova.consoleauth import rpcapi as consoleauth_rpcapi
from nova import context
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
LOG = logging.getLogger(__name__)
class NovaWebSocketProxy(websockify.WebSocketProxy):
def __init__(self, *args, **kwargs):
websockify.WebSocketProxy.__init__(self, unix_target=None,
target_cfg=None,
ssl_target=None, *args, **kwargs)
def new_client(self):
"""
Called after a new WebSocket connection has been established.
"""
# Reopen the eventlet hub to make sure we don't share an epoll
# fd with parent and/or siblings, which would be bad
from eventlet import hubs
hubs.use_hub()
cookie = Cookie.SimpleCookie()
cookie.load(self.headers.getheader('cookie'))
token = cookie['token'].value
ctxt = context.get_admin_context()
rpcapi = consoleauth_rpcapi.ConsoleAuthAPI()
connect_info = rpcapi.check_token(ctxt, token=token)
if not connect_info:
LOG.audit("Invalid Token: %s", token)
raise Exception(_("Invalid Token"))
host = connect_info['host']
port = int(connect_info['port'])
# Connect to the target
self.msg("connecting to: %s:%s" % (host, port))
LOG.audit("connecting to: %s:%s" % (host, port))
tsock = self.socket(host, port, connect=True)
# Handshake as necessary
if connect_info.get('internal_access_path'):
tsock.send("CONNECT %s HTTP/1.1\r\n\r\n" %
connect_info['internal_access_path'])
while True:
data = tsock.recv(4096, socket.MSG_PEEK)
if data.find("\r\n\r\n") != -1:
if not data.split("\r\n")[0].find("200"):
LOG.audit("Invalid Connection Info %s", token)
raise Exception(_("Invalid Connection Info"))
tsock.recv(len(data))
break
if self.verbose and not self.daemon:
print(self.traffic_legend)
# Start proxying
try:
self.do_proxy(tsock)
except Exception:
if tsock:
tsock.shutdown(socket.SHUT_RDWR)
tsock.close()
self.vmsg("%s:%s: Target closed" % (host, port))
LOG.audit("%s:%s: Target closed" % (host, port))
raise
| 35.354167 | 78 | 0.608132 |
acf71535809f97f5994858e9647f742578b7b9ea | 2,202 | py | Python | tomodachi/invoker/decorator.py | xdmiodz/tomodachi | abe449b0d09683cfc4791e61bc951b0de796e80b | [
"MIT"
] | null | null | null | tomodachi/invoker/decorator.py | xdmiodz/tomodachi | abe449b0d09683cfc4791e61bc951b0de796e80b | [
"MIT"
] | 100 | 2021-04-21T10:00:09.000Z | 2022-03-24T23:13:32.000Z | tomodachi/invoker/decorator.py | justcallmelarry/tomodachi | ea5718e25b9e140e8e44a8803d6c8f915f26b392 | [
"MIT"
] | 1 | 2020-09-04T05:53:16.000Z | 2020-09-04T05:53:16.000Z | import inspect
import types
from functools import wraps
from typing import Any, Callable
class DecorationClass(object):
def __getattribute__(self, name: str) -> Any:
if name == "__class__":
return types.FunctionType
return super(DecorationClass, self).__getattribute__(name)
def __init__(self, fn: Any, decorator_fn: Any, include_function: bool) -> None:
self.__closure__ = fn.__closure__
self.__code__ = fn.__code__
self.__doc__ = fn.__doc__
self.__name__ = fn.__name__
self.__qualname__ = fn.__qualname__
self.__defaults__ = fn.__defaults__
self.__annotations__ = fn.__annotations__
self.__kwdefaults__ = fn.__kwdefaults__
self.args: Any = None
self.kwargs: Any = None
self.function = fn
self.decorator_function = decorator_fn
self.include_function = include_function
async def __call__(self, *args: Any, **kwargs: Any) -> Any:
self.args = args
self.kwargs = kwargs
if not self.include_function:
return_value = self.decorator_function(*args, **kwargs)
else:
return_value = self.decorator_function(self.function, *args, **kwargs)
return_value = (await return_value) if inspect.isawaitable(return_value) else return_value
if return_value is True or return_value is None:
routine = self.function(*args, **kwargs)
return (await routine) if inspect.isawaitable(routine) else routine
return return_value
def __repr__(self) -> str:
return "<function {} at {}>".format(self.__qualname__, hex(id(self)))
def decorator(include_function: Any = False) -> Callable:
fn = None
if include_function and callable(include_function):
fn = include_function
include_function = False
def _decorator(decorator_func: Callable) -> Callable:
def _wrapper(func: Callable) -> Callable:
class_func = DecorationClass(func, decorator_func, include_function)
wraps(func)(class_func)
return class_func
return _wrapper
if fn:
return _decorator(fn)
return _decorator
| 34.40625 | 98 | 0.660309 |
acf7156687e6938075251e96c7fa8c35dabd40fb | 77 | py | Python | Ago-Dic-2019/ERIK EDUARDO MONTOYA MARTINEZ/PRACTICA 1/CAPITULO 4/4.8Cubos.py | Arbupa/DAS_Sistemas | 52263ab91436b2e5a24ce6f8493aaa2e2fe92fb1 | [
"MIT"
] | 41 | 2017-09-26T09:36:32.000Z | 2022-03-19T18:05:25.000Z | Ago-Dic-2019/ERIK EDUARDO MONTOYA MARTINEZ/PRACTICA 1/CAPITULO 4/4.8Cubos.py | Arbupa/DAS_Sistemas | 52263ab91436b2e5a24ce6f8493aaa2e2fe92fb1 | [
"MIT"
] | 67 | 2017-09-11T05:06:12.000Z | 2022-02-14T04:44:04.000Z | Ago-Dic-2019/ERIK EDUARDO MONTOYA MARTINEZ/PRACTICA 1/CAPITULO 4/4.8Cubos.py | Arbupa/DAS_Sistemas | 52263ab91436b2e5a24ce6f8493aaa2e2fe92fb1 | [
"MIT"
] | 210 | 2017-09-01T00:10:08.000Z | 2022-03-19T18:05:12.000Z | Cubos=[]
for numero in range(1,11):
Cubos.append(numero**3)
print(Cubos) | 19.25 | 27 | 0.675325 |
acf715fc15a7df7b482387e53afd194b3fbc1f62 | 2,439 | py | Python | venv/lib/python2.7/site-packages/plotnine/stats/stat_quantile.py | nuriale207/preprocesspack | cc06a9cb79c5e3b392371fcd8d1ccf7185e71821 | [
"MIT"
] | null | null | null | venv/lib/python2.7/site-packages/plotnine/stats/stat_quantile.py | nuriale207/preprocesspack | cc06a9cb79c5e3b392371fcd8d1ccf7185e71821 | [
"MIT"
] | null | null | null | venv/lib/python2.7/site-packages/plotnine/stats/stat_quantile.py | nuriale207/preprocesspack | cc06a9cb79c5e3b392371fcd8d1ccf7185e71821 | [
"MIT"
] | null | null | null | from __future__ import absolute_import, division, print_function
from warnings import warn
import pandas as pd
import statsmodels.formula.api as smf
from ..doctools import document
from .stat import stat
# method_args are any of the keyword args (other than q) for
# statsmodels.regression.quantile_regression.QuantReg.fit
@document
class stat_quantile(stat):
"""
Compute quantile regression lines
{usage}
Parameters
----------
{common_parameters}
quatiles : tuple, optional (default: (0.25, 0.5, 0.75))
Quantiles of y to compute
formula : str, optional (default: 'y ~ x')
Formula relating y variables to x variables
method_args : dict, optional
Extra arguments passed on to the model fitting method,
:meth:`statsmodels.regression.quantile_regression.QuantReg.fit`.
See Also
--------
statsmodels.regression.quantile_regression.QuantReg
plotnine.geoms.geom_quantile
"""
_aesthetics_doc = """
{aesthetics_table}
.. rubric:: Options for computed aesthetics
::
'quantile' # quantile
'group' # group identifier
Calculated aesthetics are accessed using the `calc` function.
e.g. :py:`'stat(quantile)'`.
"""
REQUIRED_AES = {'x', 'y'}
DEFAULT_PARAMS = {'geom': 'quantile', 'position': 'identity',
'na_rm': False, 'quantiles': (0.25, 0.5, 0.75),
'formula': 'y ~ x', 'method_args': {}}
CREATES = {'quantile', 'group'}
def setup_params(self, data):
params = self.params.copy()
if params['formula'] is None:
params['formula'] = 'y ~ x'
warn("Formula not specified, using '{}'")
try:
iter(params['quantiles'])
except TypeError:
params['quantiles'] = (params['quantiles'],)
return params
@classmethod
def compute_group(cls, data, scales, **params):
res = [quant_pred(q, data, **params)
for q in params['quantiles']]
return pd.concat(res, axis=0, ignore_index=True)
def quant_pred(q, data, **params):
mod = smf.quantreg(params['formula'], data)
reg_res = mod.fit(q=q, **params['method_args'])
out = pd.DataFrame({
'x': [data['x'].min(), data['x'].max()],
'quantile': q,
'group': '{}-{}'.format(data['group'].iloc[0], q)})
out['y'] = reg_res.predict(out)
return out
| 28.360465 | 72 | 0.604346 |
acf7198b7d2d0582941d9838fbf6b30e2bbb0a77 | 8,956 | py | Python | azure/servicefabric/models/stateful_service_description.py | EnjoyLifeFund/macHighSierra-py36-pkgs | 5668b5785296b314ea1321057420bcd077dba9ea | [
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | 4 | 2016-06-17T23:25:29.000Z | 2022-03-30T22:37:45.000Z | azure/servicefabric/models/stateful_service_description.py | EnjoyLifeFund/Debian_py36_packages | 1985d4c73fabd5f08f54b922e73a9306e09c77a5 | [
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | 2 | 2016-09-30T21:40:24.000Z | 2017-11-10T18:16:18.000Z | azure/servicefabric/models/stateful_service_description.py | EnjoyLifeFund/Debian_py36_packages | 1985d4c73fabd5f08f54b922e73a9306e09c77a5 | [
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | 3 | 2016-05-03T20:49:46.000Z | 2017-10-05T21:05:27.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .service_description import ServiceDescription
class StatefulServiceDescription(ServiceDescription):
"""Describes a stateful service.
:param application_name:
:type application_name: str
:param service_name:
:type service_name: str
:param service_type_name:
:type service_type_name: str
:param initialization_data:
:type initialization_data: list of int
:param partition_description:
:type partition_description: :class:`PartitionSchemeDescription
<azure.servicefabric.models.PartitionSchemeDescription>`
:param placement_constraints: The placement constraints as a string.
Placement constraints are boolean expressions on node properties and allow
for restricting a service to particular nodes based on the service
requirements. For example, to place a service on nodes where NodeType is
blue specify the following: "NodeColor == blue)".
:type placement_constraints: str
:param correlation_scheme:
:type correlation_scheme: list of :class:`ServiceCorrelationDescription
<azure.servicefabric.models.ServiceCorrelationDescription>`
:param service_load_metrics:
:type service_load_metrics: list of :class:`ServiceLoadMetricDescription
<azure.servicefabric.models.ServiceLoadMetricDescription>`
:param service_placement_policies:
:type service_placement_policies: list of
:class:`ServicePlacementPolicyDescription
<azure.servicefabric.models.ServicePlacementPolicyDescription>`
:param default_move_cost: Possible values include: 'Zero', 'Low',
'Medium', 'High'
:type default_move_cost: str or :class:`enum
<azure.servicefabric.models.enum>`
:param is_default_move_cost_specified: Indicates if the DefaultMoveCost
property is specified.
:type is_default_move_cost_specified: bool
:param service_package_activation_mode: Possible values include:
'SharedProcess', 'ExclusiveProcess'
:type service_package_activation_mode: str or :class:`enum
<azure.servicefabric.models.enum>`
:param service_dns_name: The DNS name of the service. It requires the DNS
system service to be enabled in Service Fabric cluster.
:type service_dns_name: str
:param service_kind: Polymorphic Discriminator
:type service_kind: str
:param target_replica_set_size: The target replica set size as a number.
:type target_replica_set_size: int
:param min_replica_set_size: The minimum replica set size as a number.
:type min_replica_set_size: int
:param has_persisted_state: A flag indicating whether this is a persistent
service which stores states on the local disk. If it is then the value of
this property is true, if not it is false.
:type has_persisted_state: bool
:param flags: Flags indicating whether other properties are set. Each of
the associated properties corresponds to a flag, specified below, which,
if set, indicate that the property is specified.
This property can be a combination of those flags obtained using bitwise
'OR' operator.
For example, if the provided value is 6 then the flags for
QuorumLossWaitDuration (2) and StandByReplicaKeepDuration(4) are set.
- None - Does not indicate any other properties are set. The value is
zero.
- ReplicaRestartWaitDuration - Indicates the ReplicaRestartWaitDuration
property is set. The value is 1.
- QuorumLossWaitDuration - Indicates the QuorumLossWaitDuration property
is set. The value is 2.
- StandByReplicaKeepDuration - Indicates the StandByReplicaKeepDuration
property is set. The value is 4.
:type flags: int
:param replica_restart_wait_duration_seconds: The duration, in seconds,
between when a replica goes down and when a new replica is created.
:type replica_restart_wait_duration_seconds: long
:param quorum_loss_wait_duration_seconds: The maximum duration, in
seconds, for which a partition is allowed to be in a state of quorum loss.
:type quorum_loss_wait_duration_seconds: long
:param stand_by_replica_keep_duration_seconds: The definition on how long
StandBy replicas should be maintained before being removed.
:type stand_by_replica_keep_duration_seconds: long
"""
_validation = {
'service_name': {'required': True},
'service_type_name': {'required': True},
'partition_description': {'required': True},
'service_kind': {'required': True},
'target_replica_set_size': {'required': True, 'minimum': 1},
'min_replica_set_size': {'required': True, 'minimum': 1},
'has_persisted_state': {'required': True},
'replica_restart_wait_duration_seconds': {'maximum': 4294967295, 'minimum': 0},
'quorum_loss_wait_duration_seconds': {'maximum': 4294967295, 'minimum': 0},
'stand_by_replica_keep_duration_seconds': {'maximum': 4294967295, 'minimum': 0},
}
_attribute_map = {
'application_name': {'key': 'ApplicationName', 'type': 'str'},
'service_name': {'key': 'ServiceName', 'type': 'str'},
'service_type_name': {'key': 'ServiceTypeName', 'type': 'str'},
'initialization_data': {'key': 'InitializationData', 'type': '[int]'},
'partition_description': {'key': 'PartitionDescription', 'type': 'PartitionSchemeDescription'},
'placement_constraints': {'key': 'PlacementConstraints', 'type': 'str'},
'correlation_scheme': {'key': 'CorrelationScheme', 'type': '[ServiceCorrelationDescription]'},
'service_load_metrics': {'key': 'ServiceLoadMetrics', 'type': '[ServiceLoadMetricDescription]'},
'service_placement_policies': {'key': 'ServicePlacementPolicies', 'type': '[ServicePlacementPolicyDescription]'},
'default_move_cost': {'key': 'DefaultMoveCost', 'type': 'str'},
'is_default_move_cost_specified': {'key': 'IsDefaultMoveCostSpecified', 'type': 'bool'},
'service_package_activation_mode': {'key': 'ServicePackageActivationMode', 'type': 'str'},
'service_dns_name': {'key': 'ServiceDnsName', 'type': 'str'},
'service_kind': {'key': 'ServiceKind', 'type': 'str'},
'target_replica_set_size': {'key': 'TargetReplicaSetSize', 'type': 'int'},
'min_replica_set_size': {'key': 'MinReplicaSetSize', 'type': 'int'},
'has_persisted_state': {'key': 'HasPersistedState', 'type': 'bool'},
'flags': {'key': 'Flags', 'type': 'int'},
'replica_restart_wait_duration_seconds': {'key': 'ReplicaRestartWaitDurationSeconds', 'type': 'long'},
'quorum_loss_wait_duration_seconds': {'key': 'QuorumLossWaitDurationSeconds', 'type': 'long'},
'stand_by_replica_keep_duration_seconds': {'key': 'StandByReplicaKeepDurationSeconds', 'type': 'long'},
}
def __init__(self, service_name, service_type_name, partition_description, target_replica_set_size, min_replica_set_size, has_persisted_state, application_name=None, initialization_data=None, placement_constraints=None, correlation_scheme=None, service_load_metrics=None, service_placement_policies=None, default_move_cost=None, is_default_move_cost_specified=None, service_package_activation_mode=None, service_dns_name=None, flags=None, replica_restart_wait_duration_seconds=None, quorum_loss_wait_duration_seconds=None, stand_by_replica_keep_duration_seconds=None):
super(StatefulServiceDescription, self).__init__(application_name=application_name, service_name=service_name, service_type_name=service_type_name, initialization_data=initialization_data, partition_description=partition_description, placement_constraints=placement_constraints, correlation_scheme=correlation_scheme, service_load_metrics=service_load_metrics, service_placement_policies=service_placement_policies, default_move_cost=default_move_cost, is_default_move_cost_specified=is_default_move_cost_specified, service_package_activation_mode=service_package_activation_mode, service_dns_name=service_dns_name)
self.target_replica_set_size = target_replica_set_size
self.min_replica_set_size = min_replica_set_size
self.has_persisted_state = has_persisted_state
self.flags = flags
self.replica_restart_wait_duration_seconds = replica_restart_wait_duration_seconds
self.quorum_loss_wait_duration_seconds = quorum_loss_wait_duration_seconds
self.stand_by_replica_keep_duration_seconds = stand_by_replica_keep_duration_seconds
self.service_kind = 'Stateful'
| 62.629371 | 623 | 0.737271 |
acf71bc914fe5a2302bfb3d330b7fcd8c035ceea | 5,152 | py | Python | Leak #5 - Lost In Translation/windows/Resources/Dsz/PyScripts/Lib/dsz/mca_dsz/network/cmd/redirect/type_Status.py | bidhata/EquationGroupLeaks | 1ff4bc115cb2bd5bf2ed6bf769af44392926830c | [
"Unlicense"
] | 9 | 2019-11-22T04:58:40.000Z | 2022-02-26T16:47:28.000Z | Leak #5 - Lost In Translation/windows/Resources/Dsz/PyScripts/Lib/dsz/mca_dsz/network/cmd/redirect/type_Status.py | bidhata/EquationGroupLeaks | 1ff4bc115cb2bd5bf2ed6bf769af44392926830c | [
"Unlicense"
] | null | null | null | Leak #5 - Lost In Translation/windows/Resources/Dsz/PyScripts/Lib/dsz/mca_dsz/network/cmd/redirect/type_Status.py | bidhata/EquationGroupLeaks | 1ff4bc115cb2bd5bf2ed6bf769af44392926830c | [
"Unlicense"
] | 8 | 2017-09-27T10:31:18.000Z | 2022-01-08T10:30:46.000Z | # uncompyle6 version 2.9.10
# Python bytecode 2.7 (62211)
# Decompiled from: Python 3.6.0b2 (default, Oct 11 2016, 05:27:10)
# [GCC 6.2.0 20161005]
# Embedded file name: type_Status.py
from types import *
import mcl.object.IpAddr
STATUS_CONNECTION_TYPE_LISTENING = 1
STATUS_CONNECTION_TYPE_NEW = 2
STATUS_CONNECTION_TYPE_REJECTED = 3
STATUS_CONNECTION_TYPE_CLOSED = 4
class StatusError:
def __init__(self):
self.__dict__['errorModule'] = 0
self.__dict__['errorOs'] = 0
def __getattr__(self, name):
if name == 'errorModule':
return self.__dict__['errorModule']
if name == 'errorOs':
return self.__dict__['errorOs']
raise AttributeError("Attribute '%s' not found" % name)
def __setattr__(self, name, value):
if name == 'errorModule':
self.__dict__['errorModule'] = value
elif name == 'errorOs':
self.__dict__['errorOs'] = value
else:
raise AttributeError("Attribute '%s' not found" % name)
def Marshal(self, mmsg):
from mcl.object.Message import MarshalMessage
submsg = MarshalMessage()
submsg.AddU32(MSG_KEY_STATUS_ERROR_ERROR_MODULE, self.__dict__['errorModule'])
submsg.AddU32(MSG_KEY_STATUS_ERROR_ERROR_OS, self.__dict__['errorOs'])
mmsg.AddMessage(MSG_KEY_STATUS_ERROR, submsg)
def Demarshal(self, dmsg, instance=-1):
import mcl.object.Message
msgData = dmsg.FindData(MSG_KEY_STATUS_ERROR, mcl.object.Message.MSG_TYPE_MSG, instance)
submsg = mcl.object.Message.DemarshalMessage(msgData)
self.__dict__['errorModule'] = submsg.FindU32(MSG_KEY_STATUS_ERROR_ERROR_MODULE)
self.__dict__['errorOs'] = submsg.FindU32(MSG_KEY_STATUS_ERROR_ERROR_OS)
class StatusConnection:
def __init__(self):
self.__dict__['type'] = 0
self.__dict__['socketType'] = 0
self.__dict__['localAddr'] = mcl.object.IpAddr.IpAddr()
self.__dict__['localPort'] = 0
self.__dict__['remoteAddr'] = mcl.object.IpAddr.IpAddr()
self.__dict__['remotePort'] = 0
self.__dict__['socketError'] = 0
def __getattr__(self, name):
if name == 'type':
return self.__dict__['type']
if name == 'socketType':
return self.__dict__['socketType']
if name == 'localAddr':
return self.__dict__['localAddr']
if name == 'localPort':
return self.__dict__['localPort']
if name == 'remoteAddr':
return self.__dict__['remoteAddr']
if name == 'remotePort':
return self.__dict__['remotePort']
if name == 'socketError':
return self.__dict__['socketError']
raise AttributeError("Attribute '%s' not found" % name)
def __setattr__(self, name, value):
if name == 'type':
self.__dict__['type'] = value
elif name == 'socketType':
self.__dict__['socketType'] = value
elif name == 'localAddr':
self.__dict__['localAddr'] = value
elif name == 'localPort':
self.__dict__['localPort'] = value
elif name == 'remoteAddr':
self.__dict__['remoteAddr'] = value
elif name == 'remotePort':
self.__dict__['remotePort'] = value
elif name == 'socketError':
self.__dict__['socketError'] = value
else:
raise AttributeError("Attribute '%s' not found" % name)
def Marshal(self, mmsg):
from mcl.object.Message import MarshalMessage
submsg = MarshalMessage()
submsg.AddU8(MSG_KEY_STATUS_CONNECTION_TYPE, self.__dict__['type'])
submsg.AddU8(MSG_KEY_STATUS_CONNECTION_SOCKET_TYPE, self.__dict__['socketType'])
submsg.AddIpAddr(MSG_KEY_STATUS_CONNECTION_LOCAL_ADDR, self.__dict__['localAddr'])
submsg.AddU16(MSG_KEY_STATUS_CONNECTION_LOCAL_PORT, self.__dict__['localPort'])
submsg.AddIpAddr(MSG_KEY_STATUS_CONNECTION_REMOTE_ADDR, self.__dict__['remoteAddr'])
submsg.AddU16(MSG_KEY_STATUS_CONNECTION_REMOTE_PORT, self.__dict__['remotePort'])
submsg.AddU32(MSG_KEY_STATUS_CONNECTION_SOCKET_ERROR, self.__dict__['socketError'])
mmsg.AddMessage(MSG_KEY_STATUS_CONNECTION, submsg)
def Demarshal(self, dmsg, instance=-1):
import mcl.object.Message
msgData = dmsg.FindData(MSG_KEY_STATUS_CONNECTION, mcl.object.Message.MSG_TYPE_MSG, instance)
submsg = mcl.object.Message.DemarshalMessage(msgData)
self.__dict__['type'] = submsg.FindU8(MSG_KEY_STATUS_CONNECTION_TYPE)
self.__dict__['socketType'] = submsg.FindU8(MSG_KEY_STATUS_CONNECTION_SOCKET_TYPE)
self.__dict__['localAddr'] = submsg.FindIpAddr(MSG_KEY_STATUS_CONNECTION_LOCAL_ADDR)
self.__dict__['localPort'] = submsg.FindU16(MSG_KEY_STATUS_CONNECTION_LOCAL_PORT)
self.__dict__['remoteAddr'] = submsg.FindIpAddr(MSG_KEY_STATUS_CONNECTION_REMOTE_ADDR)
self.__dict__['remotePort'] = submsg.FindU16(MSG_KEY_STATUS_CONNECTION_REMOTE_PORT)
self.__dict__['socketError'] = submsg.FindU32(MSG_KEY_STATUS_CONNECTION_SOCKET_ERROR) | 44.034188 | 101 | 0.669837 |
acf71bef67e422b4b23484d576fe9789969c135a | 1,968 | py | Python | regtests/bench/fannkuch.py | secureosv/pythia | 459f9e2bc0bb2da57e9fa8326697d9ef3386883a | [
"BSD-3-Clause"
] | 17 | 2015-12-13T23:11:31.000Z | 2020-07-19T00:40:18.000Z | regtests/bench/fannkuch.py | secureosv/pythia | 459f9e2bc0bb2da57e9fa8326697d9ef3386883a | [
"BSD-3-Clause"
] | 8 | 2016-02-22T19:42:56.000Z | 2016-07-13T10:58:04.000Z | regtests/bench/fannkuch.py | secureosv/pythia | 459f9e2bc0bb2da57e9fa8326697d9ef3386883a | [
"BSD-3-Clause"
] | 3 | 2016-04-11T20:34:31.000Z | 2021-03-12T10:33:02.000Z | # The Computer Language Benchmarks Game
# http://shootout.alioth.debian.org/
#
# contributed by Sokolov Yura
# modified by Tupteq
# modified by hartsantler 2014
from time import clock
from runtime import *
DEFAULT_ARG = 9
def main():
times = []
for i in range(4):
t0 = clock()
res = fannkuch(DEFAULT_ARG)
#print( 'fannkuch flips:', res)
tk = clock()
times.append(tk - t0)
avg = sum(times) / len(times)
print(avg)
def fannkuch(n):
count = list(range(1, n+1))
perm1 = list(range(n))
perm = list(range(n))
max_flips = 0
m = n-1
r = n
check = 0
#print('--------')
#print perm1
#print('________')
while True:
if check < 30:
check += 1
while r != 1:
count[r-1] = r
r -= 1
if perm1[0] != 0 and perm1[m] != m:
#print '>perm 1:', perm1
perm = perm1[:]
#print '>perm:', perm
flips_count = 0
k = perm[0]
#while k: ## TODO fix for dart
while k != 0:
#print 'flip', k
#perm[:k+1] = perm[k::-1]
assert k < n
assert k < len(perm)
tmp = perm[k::-1]
assert len(tmp) <= len(perm)
#print 'tmp:', tmp
#raise RuntimeError('x')
## slice assignment in python
## allows for the end slice index
## to be greater than the length
#assert k+1 < len(perm) ## not always true!
perm[:k+1] = tmp
assert len(perm) < n+1
#print 'k+1:', k+1
#print 'len perm:', len(perm)
#print 'len tmp:', len(tmp)
assert k+1 <= len(perm)
flips_count += 1
k = perm[0]
#print 'k=', k
if flips_count > 1:
#print 'breaking...'
break
if flips_count > max_flips:
max_flips = flips_count
do_return = True
while r != n:
item = perm1.pop(0)
## python allows for the insertion index
## to be greater than the length of the array.
#assert r < len(perm1) ## not always true!
perm1.insert(r, item)
count[r] -= 1
if count[r] > 0:
do_return = False
break
r += 1
if do_return:
return max_flips
main() | 18.055046 | 49 | 0.57876 |
acf71c2d6a16ae2301202d54a0598a1ea3399324 | 1,816 | py | Python | salt/sdb/consul.py | Noah-Huppert/salt | 998c382f5f2c3b4cbf7d96aa6913ada6993909b3 | [
"Apache-2.0"
] | 19 | 2016-01-29T14:37:52.000Z | 2022-03-30T18:08:01.000Z | salt/sdb/consul.py | Noah-Huppert/salt | 998c382f5f2c3b4cbf7d96aa6913ada6993909b3 | [
"Apache-2.0"
] | 223 | 2016-03-02T16:39:41.000Z | 2022-03-03T12:26:35.000Z | salt/sdb/consul.py | Noah-Huppert/salt | 998c382f5f2c3b4cbf7d96aa6913ada6993909b3 | [
"Apache-2.0"
] | 64 | 2016-02-04T19:45:26.000Z | 2021-12-15T02:02:31.000Z | # -*- coding: utf-8 -*-
"""
Consul sdb Module
:maintainer: SaltStack
:maturity: New
:platform: all
This module allows access to Consul using an ``sdb://`` URI
Like all sdb modules, the Consul module requires a configuration profile to
be configured in either the minion or master configuration file. This profile
requires very little. For example:
.. code-block:: yaml
myconsul:
driver: consul
host: 127.0.0.1
port: 8500
token: b6376760-a8bb-edd5-fcda-33bc13bfc556
scheme: http
consistency: default
dc: dev
verify: True
The ``driver`` refers to the Consul module, all other options are optional.
For option details see: https://python-consul.readthedocs.io/en/latest/#consul
"""
from __future__ import absolute_import, print_function, unicode_literals
from salt.exceptions import CommandExecutionError
try:
import consul
HAS_CONSUL = True
except ImportError:
HAS_CONSUL = False
__func_alias__ = {"set_": "set"}
def set_(key, value, profile=None):
if not profile:
return False
conn = get_conn(profile)
return conn.kv.put(key, value)
def get(key, profile=None):
if not profile:
return False
conn = get_conn(profile)
_, result = conn.kv.get(key)
return result["Value"] if result else None
def get_conn(profile):
"""
Return a client object for accessing consul
"""
params = {}
for key in ("host", "port", "token", "scheme", "consistency", "dc", "verify"):
if key in profile:
params[key] = profile[key]
if HAS_CONSUL:
return consul.Consul(**params)
else:
raise CommandExecutionError(
"(unable to import consul, "
"module most likely not installed. PLease install python-consul)"
)
| 22.419753 | 82 | 0.656938 |
acf71e5a83f63ab5c3390053d21d016908bb1f7a | 4,293 | py | Python | setup.py | tilboerner/raven-python | a698fc406d2e0f26616f0163b2b22e1a3a5b30f5 | [
"BSD-3-Clause"
] | 1 | 2018-10-03T14:51:05.000Z | 2018-10-03T14:51:05.000Z | setup.py | nvllsvm/raven-python | c4403f21973138cd20cf9c005da4fb934836d76e | [
"BSD-3-Clause"
] | null | null | null | setup.py | nvllsvm/raven-python | c4403f21973138cd20cf9c005da4fb934836d76e | [
"BSD-3-Clause"
] | 1 | 2020-07-03T00:57:54.000Z | 2020-07-03T00:57:54.000Z | #!/usr/bin/env python
"""
Raven
=====
Raven is a Python client for `Sentry <http://getsentry.com/>`_. It provides
full out-of-the-box support for many of the popular frameworks, including
`Django <djangoproject.com>`_, `Flask <http://flask.pocoo.org/>`_, and `Pylons
<http://www.pylonsproject.org/>`_. Raven also includes drop-in support for any
`WSGI <https://wsgi.readthedocs.io/>`_-compatible web application.
"""
# Hack to prevent stupid "TypeError: 'NoneType' object is not callable" error
# in multiprocessing/util.py _exit_function when running `python
# setup.py test` (see
# http://www.eby-sarna.com/pipermail/peak/2010-May/003357.html)
for m in ('multiprocessing', 'billiard'):
try:
__import__(m)
except ImportError:
pass
from setuptools import setup, find_packages
from setuptools.command.test import test as TestCommand
import re
import sys
import ast
_version_re = re.compile(r'VERSION\s+=\s+(.*)')
with open('raven/__init__.py', 'rb') as f:
version = str(ast.literal_eval(_version_re.search(
f.read().decode('utf-8')).group(1)))
install_requires = []
unittest2_requires = ['unittest2']
flask_requires = [
'Flask>=0.8',
'blinker>=1.1',
]
flask_tests_requires = [
'Flask-Login>=0.2.0',
]
sanic_requires = []
sanic_tests_requires = []
webpy_tests_requires = [
'paste',
'web.py',
]
# If it's python3, remove unittest2 & web.py.
if sys.version_info[0] == 3:
unittest2_requires = []
webpy_tests_requires = []
# If it's Python 3.5+, add Sanic packages.
if sys.version_info >= (3, 5):
sanic_requires = [
'blinker>=1.1',
'sanic>=0.7.0',
]
sanic_tests_requires = ['aiohttp', ]
tests_require = [
'bottle',
'celery>=2.5',
'coverage<4',
'exam>=0.5.2',
'flake8==3.5.0',
'logbook',
'mock',
'nose',
'pytz',
'pytest>=3.2.0,<3.3.0',
'pytest-timeout==1.2.1',
'pytest-xdist==1.18.2',
'pytest-pythonpath==0.7.2',
'pytest-cov==2.5.1',
'pytest-flake8==1.0.0',
'requests',
'tornado>=4.1,<5.0',
'tox',
'webob',
'webtest',
'wheel',
'anyjson',
'ZConfig',
] + (
flask_requires + flask_tests_requires +
sanic_requires + sanic_tests_requires +
unittest2_requires + webpy_tests_requires
)
class PyTest(TestCommand):
def initialize_options(self):
TestCommand.initialize_options(self)
self.pytest_args = []
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
# import here, cause outside the eggs aren't loaded
import pytest
errno = pytest.main(self.pytest_args)
sys.exit(errno)
setup(
name='raven',
version=version,
author='Sentry',
author_email='hello@getsentry.com',
url='https://github.com/getsentry/raven-python',
description='Raven is a client for Sentry (https://getsentry.com)',
long_description=__doc__,
packages=find_packages(exclude=("tests", "tests.*",)),
zip_safe=False,
extras_require={
'flask': flask_requires,
'tests': tests_require,
':python_version<"3.2"': ['contextlib2'],
},
license='BSD',
tests_require=tests_require,
install_requires=install_requires,
cmdclass={'test': PyTest},
include_package_data=True,
entry_points={
'console_scripts': [
'raven = raven.scripts.runner:main',
],
'paste.filter_app_factory': [
'raven = raven.contrib.paste:sentry_filter_factory',
],
},
classifiers=[
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python',
'Topic :: Software Development',
],
)
| 26.176829 | 78 | 0.624039 |
acf71ed4de41cf97df0903f434e6a2fda7ceff07 | 556 | py | Python | tool/image/test/unitada/build.py | thevoidnn/mkinitcpio-systemd-tool | 29ff01a854193f0cbc9710fe036734a43b54ed7c | [
"Apache-2.0"
] | 89 | 2016-06-04T08:40:39.000Z | 2022-03-21T12:21:34.000Z | tool/image/test/unitada/build.py | thevoidnn/mkinitcpio-systemd-tool | 29ff01a854193f0cbc9710fe036734a43b54ed7c | [
"Apache-2.0"
] | 80 | 2016-12-22T09:02:51.000Z | 2022-02-09T23:48:01.000Z | tool/image/test/unitada/build.py | thevoidnn/mkinitcpio-systemd-tool | 29ff01a854193f0cbc9710fe036734a43b54ed7c | [
"Apache-2.0"
] | 23 | 2017-01-29T02:08:41.000Z | 2021-12-05T14:41:03.000Z | #!/usr/bin/env python
#
# build unitada image
#
from nspawn.build import *
import os
import sys
# import shared config
project_root = os.popen("git rev-parse --show-toplevel").read().strip()
python_module = f"{project_root}/tool/module"
sys.path.insert(0, python_module)
from arkon_config import base_image_url
from arkon_config import unitada_image_url
# declare image identity
IMAGE(url=unitada_image_url)
# provision dependency image
PULL(url=base_image_url)
# copy local resources
COPY("/etc")
COPY("/root")
COPY("/usr")
# publish image
PUSH()
| 17.375 | 71 | 0.760791 |
acf71f9408a5c9599b7ba8b89eb0f6b07fdcf264 | 1,013 | py | Python | django_summernote_ajax/forms.py | pincoin/django-jfu2 | b06804c0cdaac4d52eccd7013996dc48c72ee05e | [
"MIT"
] | null | null | null | django_summernote_ajax/forms.py | pincoin/django-jfu2 | b06804c0cdaac4d52eccd7013996dc48c72ee05e | [
"MIT"
] | null | null | null | django_summernote_ajax/forms.py | pincoin/django-jfu2 | b06804c0cdaac4d52eccd7013996dc48c72ee05e | [
"MIT"
] | null | null | null | from os.path import splitext
from django import forms
from django.conf import settings
from django.template.defaultfilters import filesizeformat
from django.utils.translation import ugettext_lazy as _
class AttachmentForm(forms.Form):
files = forms.FileField(widget=forms.ClearableFileInput(
attrs={'multiple': True}), required=False)
def clean_files(self):
content = self.cleaned_data['files']
content_type = content.content_type.split('/')[0]
extension = splitext(content.name)[1][1:].lower()
if extension not in settings.DSA_FILE_EXTENSIONS \
or content_type not in settings.DSA_CONTENT_TYPES:
raise forms.ValidationError(_('File type is not supported'))
if content.size > settings.DSA_MAX_UPLOAD_SIZE:
raise forms.ValidationError(_('Please keep filesize under %s. Current filesize %s') % (
filesizeformat(settings.DSA_MAX_UPLOAD_SIZE), filesizeformat(content.size)))
return content
| 37.518519 | 99 | 0.707799 |
acf72101bdf016b8bc11cce391ca812dadb37532 | 3,365 | py | Python | flowmeter.py | sanchestm/eDNA-PiPump | 9c1bf6d249dd2ac1dd0ad5fcc9edcf5d5eeef7cb | [
"MIT"
] | null | null | null | flowmeter.py | sanchestm/eDNA-PiPump | 9c1bf6d249dd2ac1dd0ad5fcc9edcf5d5eeef7cb | [
"MIT"
] | null | null | null | flowmeter.py | sanchestm/eDNA-PiPump | 9c1bf6d249dd2ac1dd0ad5fcc9edcf5d5eeef7cb | [
"MIT"
] | null | null | null | import RPi.GPIO as GPIO
import time, sys
from datetime import datetime, timedelta
from Adafruit_IO import Client, RequestError, Feed
ADAFRUIT_IO_KEY = 'aio_vwJZ51l05wBYBNvRppvnKMkcKEjl'
ADAFRUIT_IO_USERNAME = 'thiagoms'
aio = Client(ADAFRUIT_IO_USERNAME, ADAFRUIT_IO_KEY)
## import feeds
flow_instant = aio.feeds('test')
total_flow = aio.feeds('totalliters')
fot = aio.feeds('flow-over-time')
pumpstatus = aio.feeds('pumpstatus')
GPIO.setmode(GPIO.BOARD)
inpt = 22
GPIO.setup(inpt, GPIO.IN)
GPIO.cleanup()
GPIO.setmode(GPIO.BOARD)
GPIO.setup(inpt, GPIO.IN, pull_up_down = GPIO.PUD_DOWN) #PUD_UP
rate_count = 0
tot_count = 0
minutes = 0
constant = 38/36.3157
time_new = 0.0
flow_avg = 0
flow_avg2 = 0
global count
count = 0
long_term_count = 0
delta = 2.1
pwmPin = 12
def countPulse(channel):
global count
count += 1
GPIO.add_event_detect(inpt, GPIO.RISING, callback=countPulse) #GPIO.FALLING
print('Time ', '\t', time.asctime(time.localtime()))
starttime = datetime.now()
working_hours = [starttime + timedelta(hours=n) for n in range(24)]
#running for the number of cycles
for current_hour in working_hours:
long_term_count = 0
#print(current_hour)
aio.send_data(flow_instant.key, 0)
aio.send_data(total_flow.key, 0)
GPIO.setup(pwmPin, GPIO.OUT)
pwm = GPIO.PWM(pwmPin, 10)
dutyCycle = 50
pwm.start(dutyCycle)
aio.send_data(pumpstatus.key, 'RUNNING')
print('Started Time ', '\t', time.asctime(time.localtime()))
#pumping for 5min of the hour
while datetime.now() <= current_hour + timedelta(minutes = 5): ##10 minutes
future = time.time() + delta
rate_count = 0
count = 0
while time.time() <= future:
try:
pass
#print('\rmL/min {:10d} - state {:1d} '.format(round(flow_avg2,2), GPIO.input(inpt)), end = '')
except KeyboardInterrupt:
print('exited')
GPIO.cleanup()
sys.exit()
flow_avg2 = (count/delta)/constant*1000
long_term_count += count
if (flow_avg2 <= 500) & (dutyCycle >=5): dutyCycle -= 5
if (flow_avg2 >= 500) & (dutyCycle <=95): dutyCycle += 5
pwm.ChangeDutyCycle(dutyCycle)
aio.send_data(flow_instant.key, round(flow_avg2,2) )
#aio.send_data(total_flow.key, round(long_term_count/constant/60, 2) )
#if aio.receive(pumpstatus.key).value == 'IDLE': break
if round(long_term_count/constant, 2) > 4: break
#pwm.ChangeDutyCycle(0)
#pwm.stop()
GPIO.cleanup(12)
aio.send_data(pumpstatus.key, 'IDLE')
aio.send_data(flow_instant.key, 0)
aio.send_data(fot.key, round(long_term_count/constant/60, 2) )
print('Stopped Time ', '\t', time.asctime(time.localtime()))
with open('/home/pi/Desktop/Log.txt', 'a+') as f:
f.write('Time {} - flow(L) {:4f}\n'.format( time.asctime(time.localtime()), long_term_count/constant/60 ))
while datetime.now() <= current_hour + timedelta(minutes = 60):
try:
if aio.receive(pumpstatus.key).value == 'RUNNING': break
#print('\rmL/min {:10d} - state {:1d} '.format(round(flow_avg2,2), GPIO.input(inpt)), end = '')
except KeyboardInterrupt:
print('exited')
GPIO.cleanup()
sys.exit()
GPIO.cleanup()
sys.exit()
| 30.315315 | 114 | 0.641605 |
acf72113f2ef3fff21123e712f9d40c964348e47 | 777 | py | Python | tests/commands/mc-1.8/test_spawnpoint.py | Le0Developer/mcast | bdabd897e399ff17c734b9e02d3e1e5099674a1c | [
"MIT"
] | 2 | 2021-12-28T14:10:13.000Z | 2022-01-12T16:59:20.000Z | tests/commands/mc-1.8/test_spawnpoint.py | Le0Developer/mcast | bdabd897e399ff17c734b9e02d3e1e5099674a1c | [
"MIT"
] | 11 | 2021-01-18T09:00:23.000Z | 2021-01-29T09:29:04.000Z | tests/commands/mc-1.8/test_spawnpoint.py | Le0Developer/mcast | bdabd897e399ff17c734b9e02d3e1e5099674a1c | [
"MIT"
] | null | null | null |
from mcfunction.versions.mc_1_8.spawnpoint import (
spawnpoint, ParsedSpawnpointCommand
)
from mcfunction.nodes import EntityNode, PositionNode
def test_spawnpoint():
parsed = spawnpoint.parse('spawnpoint')
parsed: ParsedSpawnpointCommand
assert str(parsed) == 'spawnpoint'
def test_spawnpoint_target():
parsed = spawnpoint.parse('spawnpoint @s')
parsed: ParsedSpawnpointCommand
assert isinstance(parsed.target, EntityNode)
assert str(parsed) == 'spawnpoint @s'
def test_spawnpoint_position():
parsed = spawnpoint.parse('spawnpoint @s 0 0 0')
parsed: ParsedSpawnpointCommand
assert isinstance(parsed.target, EntityNode)
assert isinstance(parsed.position, PositionNode)
assert str(parsed) == 'spawnpoint @s 0 0 0'
| 24.28125 | 53 | 0.7426 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.